hexsha
stringlengths 40
40
| size
int64 5
1.03M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
241
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
208k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
241
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
241
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
1.03M
| avg_line_length
float64 1.5
756k
| max_line_length
int64 4
869k
| alphanum_fraction
float64 0.01
0.98
| count_classes
int64 0
3.38k
| score_classes
float64 0
0.01
| count_generators
int64 0
832
| score_generators
float64 0
0
| count_decorators
int64 0
2.75k
| score_decorators
float64 0
0
| count_async_functions
int64 0
623
| score_async_functions
float64 0
0
| count_documentation
int64 3
581k
| score_documentation
float64 0.4
0.6
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d9b0c3d32e07c56a0732f0fca454740538a940fe | 451 | py | Python | setup.py | Kaslanarian/PythonSVM | 715eeef2a245736167addf45a6aee8b40b54d0c7 | [
"MIT"
] | 2 | 2021-09-25T01:00:37.000Z | 2021-09-27T12:13:24.000Z | setup.py | Kaslanarian/PythonSVM | 715eeef2a245736167addf45a6aee8b40b54d0c7 | [
"MIT"
] | 1 | 2021-09-17T12:08:14.000Z | 2021-09-17T12:08:14.000Z | setup.py | Kaslanarian/PythonSVM | 715eeef2a245736167addf45a6aee8b40b54d0c7 | [
"MIT"
] | null | null | null | import setuptools #enables develop
setuptools.setup(
name='pysvm',
version='0.1',
description='PySVM : A NumPy implementation of SVM based on SMO algorithm',
author_email="191300064@smail.nju.edu.cn",
packages=['pysvm'],
license='MIT License',
long_description=open('README.md', encoding='utf-8').read(),
install_requires=[ #自动安装依赖
'numpy', 'sklearn'
],
url='https://github.com/Kaslanarian/PySVM',
)
| 28.1875 | 79 | 0.660754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.4946 |
d9b0df7f5ef294a68858d836af143c289d120187 | 4,375 | py | Python | Object_detection_image.py | hiperus0988/pyao | 72c56975a3d45aa033bdf7650b5369d59240395f | [
"Apache-2.0"
] | 1 | 2021-06-09T22:17:57.000Z | 2021-06-09T22:17:57.000Z | Object_detection_image.py | hiperus0988/pyao | 72c56975a3d45aa033bdf7650b5369d59240395f | [
"Apache-2.0"
] | null | null | null | Object_detection_image.py | hiperus0988/pyao | 72c56975a3d45aa033bdf7650b5369d59240395f | [
"Apache-2.0"
] | null | null | null | ######## Image Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 1/15/18
# Description:
# This program uses a TensorFlow-trained classifier to perform object detection.
# It loads the classifier uses it to perform object detection on an image.
# It draws boxes and scores around the objects of interest in the image.
## Some of the code is copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it more understandable to me.
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
IMAGE_NAME = 'test1.jpg'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
# Path to image
PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 6
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.60)
# All the results have been drawn on image. Now display the image.
cv2.imshow('Object detector', image)
# Press any key to close the image
cv2.waitKey(0)
# Clean up
cv2.destroyAllWindows()
| 36.458333 | 122 | 0.779886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,505 | 0.572571 |
d9b62ab258f0b51ef25d431f8fa66de9acd438a7 | 1,895 | py | Python | setup.py | giggslam/python-messengerbot-sdk | 4a6fadf96fe3425da9abc4726fbb84db6d84f7b5 | [
"Apache-2.0"
] | 23 | 2019-03-05T08:33:34.000Z | 2021-12-13T01:52:47.000Z | setup.py | giggslam/python-messengerbot-sdk | 4a6fadf96fe3425da9abc4726fbb84db6d84f7b5 | [
"Apache-2.0"
] | null | null | null | setup.py | giggslam/python-messengerbot-sdk | 4a6fadf96fe3425da9abc4726fbb84db6d84f7b5 | [
"Apache-2.0"
] | 6 | 2019-03-07T07:58:02.000Z | 2020-12-18T10:08:47.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
__version__ = ''
with open('facebookbot/__about__.py', 'r') as fd:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fd:
m = reg.match(line)
if m:
__version__ = m.group(1)
break
def _requirements():
with open('requirements.txt', 'r') as fd:
return [name.strip() for name in fd.readlines()]
with open('README.rst', 'r') as fd:
long_description = fd.read()
setup(
name="fbsdk",
version=__version__,
author="Sam Chang",
author_email="t0915290092@gmail.com",
maintainer="Sam Chang",
maintainer_email="t0915290092@gmail.com",
url="https://github.com/boompieman/fbsdk",
description="Facebook Messaging API SDK for Python",
long_description=long_description,
license='Apache License 2.0',
packages=[
"facebookbot", "facebookbot.models"
],
install_requires=_requirements(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Topic :: Software Development"
]
)
| 30.079365 | 76 | 0.663852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,092 | 0.576253 |
d9b8347698a1fe18b6d9ec66f6bfbfa77f2567be | 1,566 | py | Python | using_paramiko.py | allupramodreddy/cisco_py | 5488b56d9324011860b78998e694dcce6da5e3d1 | [
"Apache-2.0"
] | null | null | null | using_paramiko.py | allupramodreddy/cisco_py | 5488b56d9324011860b78998e694dcce6da5e3d1 | [
"Apache-2.0"
] | null | null | null | using_paramiko.py | allupramodreddy/cisco_py | 5488b56d9324011860b78998e694dcce6da5e3d1 | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python3
import paramiko,time
#using as SSH Client
client = paramiko.SSHClient()
# check dir(client) to find available options.
# auto adjust host key verification with yes or no
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# time for connecting to remote Cisco IOS
"""
Manually taking input
addr = input('Provide IP address to connect to: ')
user = input('Username: ')
pwd = getpass.getpass('Password: ')"""
# Taking input from files
f1 = open("devices.txt","r")
f2 = open("commands.txt","r")
for line in f1:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
data = line.split(" ")
# print(data)
addr = data[0]
user = data[1]
pwd = data[2]
f3 = open(addr+".txt","w+")
# print(addr +" "+ user +" " +pwd)
client.connect(addr,username=user,password=pwd,allow_agent=False,look_for_keys=False)
# we have to ask for Shell
device_access = client.invoke_shell()
for line in f2:
device_access.send(line)
time.sleep(1)
output = device_access.recv(55000).decode('ascii')
f3.write(output)
"""
THIS CODE IS FOR SINGLE COMMAND, FOR MULTIPLE COMMANDS CODE BELOW
# send command to the device
device_access.send("ter len 0\nshow run \n")
time.sleep(2)
# receive output from the device, convert it to byte-like format and print it
print(device_access.recv(550000).decode('ascii'))
# We can print the same to a file too
with open("csr1000v.txt","w") as f:
f.write(device_access.recv(550000).decode('ascii'))""" | 23.727273 | 89 | 0.691571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 907 | 0.579183 |
d9b86cc42aaff67200ff3f4f5f6d27121835fd8c | 733 | py | Python | old/.history/a_20201125192943.py | pscly/bisai1 | e619186cec5053a8e02bd59e48fc3ad3af47d19a | [
"MulanPSL-1.0"
] | null | null | null | old/.history/a_20201125192943.py | pscly/bisai1 | e619186cec5053a8e02bd59e48fc3ad3af47d19a | [
"MulanPSL-1.0"
] | null | null | null | old/.history/a_20201125192943.py | pscly/bisai1 | e619186cec5053a8e02bd59e48fc3ad3af47d19a | [
"MulanPSL-1.0"
] | null | null | null | # for n in range(400,500):
# i = n // 100
# j = n // 10 % 10
# k = n % 10
# if n == i ** 3 + j ** 3 + k ** 3:
# print(n)
# 第一道题(16)
# input("请输入(第一次):")
# s1 = input("请输入(第二次):")
# l1 = s1.split(' ')
# l2 = []
# for i in l1:
# if i.isdigit():
# l2.append(int(i))
# for i in l2:
# if not (i % 6):
# print(i, end=" ")
# 第二道题(17)
out_l1 = []
def bian_int_list(l1):
re_l1 = [] # 返回出去的列表
for i in l1:
re_l1.append(i)
def jisuan(str_num):
he1 = 0
global out_l1
for i in l1():
he1 += int(i)**2
if he1 > int(str_num):
out_l1.append(str_num)
return None
while 1:
in_1 = input("请输入数值:")
nums_l1 = in_1.split(' ')
| 13.089286 | 39 | 0.452933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 441 | 0.553325 |
d9c69927875c451378bcb7d50069e903036beefa | 5,490 | py | Python | bathymetry_blink/bathymetry_blink.py | poster515/BlinkyTape_Python | edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0 | [
"MIT"
] | 26 | 2015-02-14T11:37:21.000Z | 2021-05-10T17:24:16.000Z | bathymetry_blink/bathymetry_blink.py | poster515/BlinkyTape_Python | edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0 | [
"MIT"
] | 8 | 2015-02-14T17:33:24.000Z | 2021-10-05T20:32:19.000Z | bathymetry_blink/bathymetry_blink.py | poster515/BlinkyTape_Python | edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0 | [
"MIT"
] | 15 | 2015-01-24T23:36:54.000Z | 2021-10-02T23:40:08.000Z | """
This script will modulate the blinky lights using the following algorithm:
1) uses user-provided location to obtain row of pixel data from bathy image
2) samples a 'number of LEDs' number of pixels from that row
3) shifts the sampled row data to center it at the location specified by user
4) displays resulting pixels on Blinky Tape
5) shifts next row by a given latitude, also specified by user
6) sleeps for user-specified period of time
Uses the following arguments:
-l/--location: tuple
Location of the user in tuple(lat, lon). This represents the center of the LED strip. Defaults to (0, 0)
-u/--update-interval: int
Update interval of the script, in minutes. Defaults to 10.
-p/--port: str
Serial port of the BlinkyLight (e.g., 'ttyAMA0', 'COM3'). Defaults to 'COM5'.
-d/--delta_latitude: int
Vertical change in latitude every update rate. May be 0, but this will result in a never-changing LEDs.
-i/--image: str
Name of the PNG image that contains the color coded pathymetric data.
The file current named mapserv.png was obtained using the following API:
https://www.gebco.net/data_and_products/gebco_web_services/web_map_service/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,180&format=image/png&height=600&width=1200&crs=EPSG:4326&layers=GEBCO_LATEST_SUB_ICE_TOPO&version=1.3.0
In lieu of providing command line arguments, you may alternatively edit the defaults in bath_config.json.
NOTE: runs via:
runfile('/BlinkyTape_Python/bathymetry_blink/bathymetry_blink.py', wdir='/BlinkyTape_Python/')
(C) 2021 Joseph Post (https://joeycodes.dev)
MIT Licensed
"""
import optparse
import json
from blinkytape import BlinkyTape
from time import sleep
from PIL import Image
import numpy as np
import sys
MAX_ERRORS = 3
num_errors = 0
# Obtain default parameters
with open("./bathymetry_blink/bathy_config.json") as f:
config = json.load(f)
# Default Blinky Tape port on Raspberry Pi is /dev/ttyACM0
parser = optparse.OptionParser()
parser.add_option("-p", "--port", dest="portname",
help="serial port (ex: /dev/ttyACM0)", default=config["port"])
parser.add_option("-l", "--location", dest="location",
help="Location of the center of the LED strip (ex: 70,-110)", default=config["location"])
parser.add_option("-u", "--update-rate", dest="update_rate",
help="How often to update elevation profile (mins) (ex: 5)", default=config["update_rate"])
parser.add_option("-d", "--delta-latitude", dest="delta_latitude",
help="Change in latitude during update (ex: 5)", default=config["delta_latitude"])
parser.add_option("-n", "--num-leds", dest="num_leds",
help="Number of LEDs in strip (ex: 60)", default=config["num_leds"])
parser.add_option("-i", "--image", dest="image_name",
help="Name of the map/bathymetry image (ex: ./mapserv.png)", default=config["image"])
(options, args) = parser.parse_args()
if args:
print("Unknown parameters: " + args)
# grab the values provided by user (or defaults)
port = options.portname
loc = options.location
rate = options.update_rate
delta = options.delta_latitude
n_leds = options.num_leds
i_name = options.image_name
# Some visual indication that it works, for headless setups (green tape)
bt = BlinkyTape(port, n_leds)
bt.displayColor(0, 100, 0)
bt.show()
sleep(2)
while True:
try:
# first, load image
im = Image.open(i_name) # Can be many different formats.
cols, rows = im.size
a = np.asarray(im) # of shape (rows, cols, channels)
# map loc latitude to 0-based index
latitude_index = min(rows - 1, max(0, (int)(((loc[0] - -90) / (90 - -90)) * (rows - 0) + 0)))
longitude_index = min(cols - 1, max(0, (int)(((loc[1] - -180) / (180 - -180)) * (cols - 0) + 0)))
# update the location of the next row of elevation data to take
loc[0] += delta
loc[0] = ((loc[0] + 90) % 180) - 90 # wraps to next pole if overflow
print("Lat index: " + str(latitude_index))
print("Lon index: " + str(longitude_index))
print("Next latitude: " + str(loc[0]))
# grab the applicable pixel indices
indices = [(int)(x*(cols/n_leds)) for x in range(n_leds)]
# sample that row of pixel data
output_pixels = np.take(a[latitude_index], indices, axis=0)
# rotate the row to center around the specified longitude
output_pixels = np.roll(output_pixels, longitude_index, axis=0)
# send all pixel data to bt
for pixel in output_pixels:
print("Sending r: {}, g: {}, b: {}".format(*pixel))
bt.sendPixel(*pixel)
# finally, show the image
bt.show()
# delete variables for memory management
del a
del im
# Tape resets to stored pattern after a few seconds of inactivity
sleep(rate * 60) # Wait specified number of minutes
# sleep(10) # Wait specified number of minutes
except KeyboardInterrupt:
print("Keyboard interrupt, ending program.")
sys.exit()
except RuntimeError as e:
print("Encountered runtime error: " + e.args[0])
# flush any incomplete data
bt.show()
num_errors += 1
if num_errors > MAX_ERRORS:
sys.exit("Error count exceeds that allowed.")
| 36.845638 | 230 | 0.654098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,142 | 0.572313 |
d9d317f8ac0c3d87ca7347265d7a9836b41ed098 | 2,481 | py | Python | gci-vci-serverless/src/helpers/vp_saves_helpers.py | ClinGen/gene-and-variant-curation-tools | 30f21d8f03d8b5c180c1ce3cb8401b5abc660080 | [
"MIT"
] | 1 | 2021-09-17T20:39:07.000Z | 2021-09-17T20:39:07.000Z | gci-vci-serverless/src/helpers/vp_saves_helpers.py | ClinGen/gene-and-variant-curation-tools | 30f21d8f03d8b5c180c1ce3cb8401b5abc660080 | [
"MIT"
] | 133 | 2021-08-29T17:24:26.000Z | 2022-03-25T17:24:31.000Z | gci-vci-serverless/src/helpers/vp_saves_helpers.py | ClinGen/gene-and-variant-curation-tools | 30f21d8f03d8b5c180c1ce3cb8401b5abc660080 | [
"MIT"
] | null | null | null | import datetime
import uuid
import simplejson as json
from src.db.s3_client import Client as S3Client
from decimal import Decimal
def get_from_archive(archive_key):
''' Download a VP Save from S3.
:param str archive_key: The vp_save data's location (S3 bucket and file path). This value is required.
'''
if archive_key is None or '/' not in archive_key:
raise ValueError()
bucket, key = archive_key.split('/', 1)
s3_client = S3Client()
try:
archive_object = json.loads(s3_client.get_object(bucket, key)['Body'].read(),parse_float=Decimal)
except Exception as e:
print('ERROR: Error downloading ' + key + ' from ' + bucket + ' bucket. ERROR\n%s' %e)
raise
return archive_object
def build(vp_save={}):
''' Builds and returns a valid vp_save object.
Builds a new vp_save object by creating default values for
required fields and combines any of the given attributes.
'''
vp_save['PK'] = str(uuid.uuid4())
# Set timestamps (for new data)
now = datetime.datetime.now().isoformat()
vp_save['date_created'] = now
vp_save['last_modified'] = now
vp_save['item_type'] = 'vp_save'
return vp_save
def archive(bucket, vp_save_pk, save_data):
''' Archives a vp save data to S3.
Uploads the save data object as a JSON file to S3. The location of the archive
depends on the bucket and the primary key of the save data. If the upload fails,
an exception is raised. If successful, returns the archive location.
:param str bucket: The name of the S3 bucket for the archive. This value is required.
:param str vp_save_pk: The vp_save PK to use as the name of the JSON file. This value is required.
:param obj save_data: The save data object to archive. This value is required.
'''
if bucket is None or len(bucket) <= 0:
raise ValueError()
if vp_save_pk is None or len(vp_save_pk) <= 0:
raise ValueError()
if not save_data:
raise ValueError()
archive_file = __archive_key(save_data) + '/' + vp_save_pk + '.json'
# Upload curation data to S3 archive bucket.
s3_client = S3Client()
try:
s3_client.put_object(
bytes(json.dumps(save_data).encode('UTF-8')),
bucket,
archive_file
)
except Exception as e:
print('ERROR: Error uploading ' + archive_file + ' to ' + bucket + ' bucket. ERROR\n%s' %e)
raise
archive_key_comps = [bucket, archive_file]
return '/'.join(archive_key_comps)
def __archive_key(save_data):
return save_data['PK']
| 27.263736 | 104 | 0.699315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,134 | 0.457074 |
d9d368d362ab070d71b3363fe0fb20728ec9660d | 5,985 | py | Python | src/entity/002_createRdf.py | toyo-bunko/paper_app | f988e05cf83711d98c5ed735c0fd74fcf11e0f05 | [
"Apache-2.0"
] | 1 | 2021-02-28T15:38:37.000Z | 2021-02-28T15:38:37.000Z | src/entity/002_createRdf.py | toyo-bunko/paper_app | f988e05cf83711d98c5ed735c0fd74fcf11e0f05 | [
"Apache-2.0"
] | null | null | null | src/entity/002_createRdf.py | toyo-bunko/paper_app | f988e05cf83711d98c5ed735c0fd74fcf11e0f05 | [
"Apache-2.0"
] | null | null | null | import shutil
import os
import json
import glob
import yaml
import sys
import urllib
import ssl
import csv
import time
import requests
import json
import csv
from rdflib import URIRef, BNode, Literal, Graph
from rdflib.namespace import RDF, RDFS, FOAF, XSD
from rdflib import Namespace
all = Graph()
with open("data/dict.json") as f:
ln_map = json.load(f)
st_path = "../data/index.json"
with open(st_path) as f:
result = json.load(f)
uris = []
for obj in result:
fields = ["spatial", "agential"]
for field in fields:
values = obj[field]
for value in values:
uri = "chname:"+value
if field == "spatial":
uri = "place:"+value
if uri not in uris:
uris.append(uri)
for uri in uris:
print(uri)
tmp = uri.split(":")
prefix = tmp[0]
suffix = tmp[1]
ln = suffix
ln_org = ""
if ln in ln_map:
ln_org = ln
ln = ln_map[ln]
if len(ln) > 20:
continue
# ln = obj["uri"].split(":")[1]
'''
wiki_path = "data/wikidata/"+ln+".json"
wiki = {}
if os.path.exists(wiki_path):
with open(wiki_path) as f:
wiki = json.load(f)
# sameAs
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(wiki_url))
all.add(stmt)
obj = wiki["entities"][wiki_url.split("/")[-1]]
# description
if "descriptions" in obj and "ja" in obj["descriptions"]:
stmt = (subject, URIRef("http://schema.org/description"), Literal(obj["descriptions"]["ja"]["value"], lang="ja"))
all.add(stmt)
# label
if "labels" in obj and "ja" in obj["labels"]:
stmt = (subject, RDFS.label, Literal(obj["labels"]["ja"]["value"]))
all.add(stmt)
ln = wiki_url.split("/")[-1]
'''
db_path = "data/dbpedia_ja/"+ln+".json"
wiki_path = "data/wikidata/"+ln+".json"
db = {}
wiki = {}
if os.path.exists(db_path):
with open(db_path) as f:
db = json.load(f)
if os.path.exists(wiki_path):
with open(wiki_path) as f:
wiki = json.load(f)
db_uri = "http://ja.dbpedia.org/resource/"+ln
if db_uri not in db:
print("not" , db_uri)
continue
# ######
subject = URIRef("https://shibusawa-dlab.github.io/lab1/api/"+prefix+"/"+ln)
if prefix == "chname":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Agent"))
all.add(stmt)
elif prefix == "time":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Time"))
all.add(stmt)
elif prefix == "place":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Place"))
all.add(stmt)
elif prefix == "event":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Event"))
all.add(stmt)
elif prefix == "org":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Organization"))
all.add(stmt)
elif prefix == "keyword":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Keyword"))
all.add(stmt)
elif prefix == "type":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Type"))
all.add(stmt)
# ######
obj = db[db_uri]
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(db_uri))
all.add(stmt)
if "http://dbpedia.org/ontology/thumbnail" in obj:
stmt = (subject, URIRef("http://schema.org/image"), URIRef(obj["http://dbpedia.org/ontology/thumbnail"][0]["value"]))
all.add(stmt)
if "http://www.w3.org/2000/01/rdf-schema#label" in obj:
labels = obj["http://www.w3.org/2000/01/rdf-schema#label"]
for label in labels:
if label["lang"] == "ja":
stmt = (subject, RDFS.label, Literal(label["value"]))
all.add(stmt)
if "http://www.w3.org/2000/01/rdf-schema#comment" in obj:
labels = obj["http://www.w3.org/2000/01/rdf-schema#comment"]
for label in labels:
stmt = (subject, URIRef("http://schema.org/description"), Literal(label["value"], lang=label["lang"]))
all.add(stmt)
if "http://www.w3.org/2002/07/owl#sameAs" in obj:
labels = obj["http://www.w3.org/2002/07/owl#sameAs"]
for label in labels:
value = label["value"]
if "http://dbpedia.org" in value or "http://ja.dbpedia.org" in value or "www.wikidata.org" in value:
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(value))
all.add(stmt)
# 位置情報
'''
if "point" in obj and prefix == "place":
value = obj["point"]["value"].split(" ")
# addGeo関数
geoUri = addGeo({
"lat" : float(value[0]),
"long": float(value[1])
})
stmt = (subject, URIRef("http://schema.org/geo"), geoUri)
if suffix not in places:
places[suffix] = {
"lat" : float(value[0]),
"long": float(value[1])
}
all.add(stmt)
'''
# 正規化前
if ln_org != "" and ln != ln_org:
stmt = (subject, URIRef("http://schema.org/name"), Literal(ln_org))
all.add(stmt)
path = "data/all.json"
all.serialize(destination=path, format='json-ld')
all.serialize(destination=path.replace(".json", ".rdf"), format='pretty-xml') | 29.338235 | 129 | 0.513116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,677 | 0.445795 |
d9d80db949c5d5f415b809076411a2404da55e53 | 10,912 | py | Python | sympy/combinatorics/testutil.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | 2 | 2019-05-18T22:36:49.000Z | 2019-05-24T05:56:16.000Z | sympy/combinatorics/testutil.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | 1 | 2020-04-22T12:45:26.000Z | 2020-04-22T12:45:26.000Z | sympy/combinatorics/testutil.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | 3 | 2021-02-16T16:40:49.000Z | 2022-03-07T18:28:41.000Z | from sympy.combinatorics import Permutation
from sympy.combinatorics.util import _distribute_gens_by_base
rmul = Permutation.rmul
def _cmp_perm_lists(first, second):
"""
Compare two lists of permutations as sets.
This is used for testing purposes. Since the array form of a
permutation is currently a list, Permutation is not hashable
and cannot be put into a set.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _cmp_perm_lists
>>> a = Permutation([0, 2, 3, 4, 1])
>>> b = Permutation([1, 2, 0, 4, 3])
>>> c = Permutation([3, 4, 0, 1, 2])
>>> ls1 = [a, b, c]
>>> ls2 = [b, c, a]
>>> _cmp_perm_lists(ls1, ls2)
True
"""
return {tuple(a) for a in first} == \
{tuple(a) for a in second}
def _naive_list_centralizer(self, other, af=False):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Return a list of elements for the centralizer of a subgroup/set/element.
This is a brute force implementation that goes over all elements of the
group and checks for membership in the centralizer. It is used to
test ``.centralizer()`` from ``sympy.combinatorics.perm_groups``.
Examples
========
>>> from sympy.combinatorics.testutil import _naive_list_centralizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> _naive_list_centralizer(D, D)
[Permutation([0, 1, 2, 3]), Permutation([2, 3, 0, 1])]
See Also
========
sympy.combinatorics.perm_groups.centralizer
"""
from sympy.combinatorics.permutations import _af_commutes_with
if hasattr(other, 'generators'):
elements = list(self.generate_dimino(af=True))
gens = [x._array_form for x in other.generators]
commutes_with_gens = lambda x: all(_af_commutes_with(x, gen) for gen in gens)
centralizer_list = []
if not af:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(Permutation._af_new(element))
else:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(element)
return centralizer_list
elif hasattr(other, 'getitem'):
return _naive_list_centralizer(self, PermutationGroup(other), af)
elif hasattr(other, 'array_form'):
return _naive_list_centralizer(self, PermutationGroup([other]), af)
def _verify_bsgs(group, base, gens):
"""
Verify the correctness of a base and strong generating set.
This is a naive implementation using the definition of a base and a strong
generating set relative to it. There are other procedures for
verifying a base and strong generating set, but this one will
serve for more robust testing.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> _verify_bsgs(A, A.base, A.strong_gens)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims
"""
from sympy.combinatorics.perm_groups import PermutationGroup
strong_gens_distr = _distribute_gens_by_base(base, gens)
current_stabilizer = group
for i in range(len(base)):
candidate = PermutationGroup(strong_gens_distr[i])
if current_stabilizer.order() != candidate.order():
return False
current_stabilizer = current_stabilizer.stabilizer(base[i])
if current_stabilizer.order() != 1:
return False
return True
def _verify_centralizer(group, arg, centr=None):
"""
Verify the centralizer of a group/set/element inside another group.
This is used for testing ``.centralizer()`` from
``sympy.combinatorics.perm_groups``
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _verify_centralizer
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> centr = PermutationGroup([Permutation([0, 1, 2, 3, 4])])
>>> _verify_centralizer(S, A, centr)
True
See Also
========
_naive_list_centralizer,
sympy.combinatorics.perm_groups.PermutationGroup.centralizer,
_cmp_perm_lists
"""
if centr is None:
centr = group.centralizer(arg)
centr_list = list(centr.generate_dimino(af=True))
centr_list_naive = _naive_list_centralizer(group, arg, af=True)
return _cmp_perm_lists(centr_list, centr_list_naive)
def _verify_normal_closure(group, arg, closure=None):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Verify the normal closure of a subgroup/subset/element in a group.
This is used to test
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.testutil import _verify_normal_closure
>>> S = SymmetricGroup(3)
>>> A = AlternatingGroup(3)
>>> _verify_normal_closure(S, A, closure=A)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
"""
if closure is None:
closure = group.normal_closure(arg)
conjugates = set()
if hasattr(arg, 'generators'):
subgr_gens = arg.generators
elif hasattr(arg, '__getitem__'):
subgr_gens = arg
elif hasattr(arg, 'array_form'):
subgr_gens = [arg]
for el in group.generate_dimino():
for gen in subgr_gens:
conjugates.add(gen ^ el)
naive_closure = PermutationGroup(list(conjugates))
return closure.is_subgroup(naive_closure)
def canonicalize_naive(g, dummies, sym, *v):
"""
Canonicalize tensor formed by tensors of the different types
g permutation representing the tensor
dummies list of dummy indices
msym symmetry of the metric
v is a list of (base_i, gens_i, n_i, sym_i) for tensors of type `i`
base_i, gens_i BSGS for tensors of this type
n_i number ot tensors of type `i`
sym_i symmetry under exchange of two component tensors of type `i`
None no symmetry
0 commuting
1 anticommuting
Return 0 if the tensor is zero, else return the array form of
the permutation representing the canonical form of the tensor.
Examples
========
>>> from sympy.combinatorics.testutil import canonicalize_naive
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> g = Permutation([1, 3, 2, 0, 4, 5])
>>> base2, gens2 = get_symmetric_group_sgs(2)
>>> canonicalize_naive(g, [2, 3], 0, (base2, gens2, 2, 0))
[0, 2, 1, 3, 4, 5]
"""
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.tensor_can import gens_products, dummy_sgs
from sympy.combinatorics.permutations import Permutation, _af_rmul
v1 = []
for i in range(len(v)):
base_i, gens_i, n_i, sym_i = v[i]
v1.append((base_i, gens_i, [[]]*n_i, sym_i))
size, sbase, sgens = gens_products(*v1)
dgens = dummy_sgs(dummies, sym, size-2)
if isinstance(sym, int):
num_types = 1
dummies = [dummies]
sym = [sym]
else:
num_types = len(sym)
dgens = []
for i in range(num_types):
dgens.extend(dummy_sgs(dummies[i], sym[i], size - 2))
S = PermutationGroup(sgens)
D = PermutationGroup([Permutation(x) for x in dgens])
dlist = list(D.generate(af=True))
g = g.array_form
st = set()
for s in S.generate(af=True):
h = _af_rmul(g, s)
for d in dlist:
q = tuple(_af_rmul(d, h))
st.add(q)
a = list(st)
a.sort()
prev = (0,)*size
for h in a:
if h[:-2] == prev[:-2]:
if h[-1] != prev[-1]:
return 0
prev = h
return list(a[0])
def graph_certificate(gr):
"""
Return a certificate for the graph
gr adjacency list
The graph is assumed to be unoriented and without
external lines.
Associate to each vertex of the graph a symmetric tensor with
number of indices equal to the degree of the vertex; indices
are contracted when they correspond to the same line of the graph.
The canonical form of the tensor gives a certificate for the graph.
This is not an efficient algorithm to get the certificate of a graph.
Examples
========
>>> from sympy.combinatorics.testutil import graph_certificate
>>> gr1 = {0:[1, 2, 3, 5], 1:[0, 2, 4], 2:[0, 1, 3, 4], 3:[0, 2, 4], 4:[1, 2, 3, 5], 5:[0, 4]}
>>> gr2 = {0:[1, 5], 1:[0, 2, 3, 4], 2:[1, 3, 5], 3:[1, 2, 4, 5], 4:[1, 3, 5], 5:[0, 2, 3, 4]}
>>> c1 = graph_certificate(gr1)
>>> c2 = graph_certificate(gr2)
>>> c1
[0, 2, 4, 6, 1, 8, 10, 12, 3, 14, 16, 18, 5, 9, 15, 7, 11, 17, 13, 19, 20, 21]
>>> c1 == c2
True
"""
from sympy.combinatorics.permutations import _af_invert
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize
items = list(gr.items())
items.sort(key=lambda x: len(x[1]), reverse=True)
pvert = [x[0] for x in items]
pvert = _af_invert(pvert)
# the indices of the tensor are twice the number of lines of the graph
num_indices = 0
for v, neigh in items:
num_indices += len(neigh)
# associate to each vertex its indices; for each line
# between two vertices assign the
# even index to the vertex which comes first in items,
# the odd index to the other vertex
vertices = [[] for i in items]
i = 0
for v, neigh in items:
for v2 in neigh:
if pvert[v] < pvert[v2]:
vertices[pvert[v]].append(i)
vertices[pvert[v2]].append(i+1)
i += 2
g = []
for v in vertices:
g.extend(v)
assert len(g) == num_indices
g += [num_indices, num_indices + 1]
size = num_indices + 2
assert sorted(g) == list(range(size))
g = Permutation(g)
vlen = [0]*(len(vertices[0])+1)
for neigh in vertices:
vlen[len(neigh)] += 1
v = []
for i in range(len(vlen)):
n = vlen[i]
if n:
base, gens = get_symmetric_group_sgs(i)
v.append((base, gens, n, 0))
v.reverse()
dummies = list(range(num_indices))
can = canonicalize(g, dummies, 0, *v)
return can
| 32.47619 | 98 | 0.641679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,734 | 0.525477 |
d9d95781d1bacab44253ba285649d7b99ee1e33d | 542 | py | Python | src/vatic_checker/config.py | jonkeane/vatic-checker | fa8aec6946dcfd3f466b62f9c00d81bc43514b22 | [
"MIT"
] | null | null | null | src/vatic_checker/config.py | jonkeane/vatic-checker | fa8aec6946dcfd3f466b62f9c00d81bc43514b22 | [
"MIT"
] | null | null | null | src/vatic_checker/config.py | jonkeane/vatic-checker | fa8aec6946dcfd3f466b62f9c00d81bc43514b22 | [
"MIT"
] | null | null | null | localhost = "http://localhost/" # your local host
database = "mysql://root@localhost/vaticChecker" # server://user:pass@localhost/dbname
min_training = 2 # the minimum number of training videos to be considered
recaptcha_secret = "" # recaptcha secret for verification
duplicate_annotations = False # Should the server allow for duplicate annotations?
import os.path
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# TODO: remove on server
import os
os.environ['PYTHON_EGG_CACHE'] = '/tmp/apache'
| 38.714286 | 94 | 0.745387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.571956 |
d9e551f94d290cc9b470d1fddfc0e91666dab7ba | 444 | py | Python | setup.py | zhanghang1989/notedown | b0fa1eac88d1cd7fa2261d6c454f82669e6f552b | [
"BSD-2-Clause"
] | null | null | null | setup.py | zhanghang1989/notedown | b0fa1eac88d1cd7fa2261d6c454f82669e6f552b | [
"BSD-2-Clause"
] | null | null | null | setup.py | zhanghang1989/notedown | b0fa1eac88d1cd7fa2261d6c454f82669e6f552b | [
"BSD-2-Clause"
] | null | null | null | from setuptools import setup
# create __version__
exec(open('./_version.py').read())
setup(
name="notedown",
version=__version__,
description="Convert markdown to IPython notebook.",
author="Aaron O'Leary",
author_email='dev@aaren.me',
url='http://github.com/aaren/notedown',
install_requires=['ipython', ],
entry_points={
'console_scripts': [
'notedown = notedown:cli',
],
}
)
| 22.2 | 56 | 0.628378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.445946 |
d9e5c18f6a37dd4a96dd21f7ddefb31b197848dd | 2,853 | py | Python | multithreaded_webcrawler.py | the-muses-ltd/Multithreaded-Webcrawler-Cassandra- | eee68faf3c6ecb548edd0e96ce445dcd366fb735 | [
"MIT"
] | null | null | null | multithreaded_webcrawler.py | the-muses-ltd/Multithreaded-Webcrawler-Cassandra- | eee68faf3c6ecb548edd0e96ce445dcd366fb735 | [
"MIT"
] | null | null | null | multithreaded_webcrawler.py | the-muses-ltd/Multithreaded-Webcrawler-Cassandra- | eee68faf3c6ecb548edd0e96ce445dcd366fb735 | [
"MIT"
] | null | null | null | # This is a reusable webcraawler architecture that can be adapted to scrape any webstie.
# RESULTS:
# Roughly 24 seconds per thousand courses scraped for ThreadPoolExecutor vs 63s for unthreaded script.
# This is a very basic implementation of multithreading in order to show the proof of concept, but is a good base to build off of.
import requests
from bs4 import BeautifulSoup
import csv
from concurrent.futures import ProcessPoolExecutor, as_completed, ThreadPoolExecutor
import time
import logging
from mitopencourseware_crawler_worker import mit_crawler
def courses_spider(max_pages):
data_to_csv = [] #holds all data to send to csv
print("Webcrawler workers have started, please wait while we finish crawling...")
# remove max pages loop (unecessary)
page = 1
while page <= max_pages:
url = 'https://ocw.mit.edu/courses/'
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
# Multithread only the work:
# Tuning is required to find the most efficient amount of workers in the thread pool.
with ThreadPoolExecutor(max_workers=30) as executor:
start = time.time()
futures = [ executor.submit(work, link) for link in soup.findAll('h4', {'class': 'course_title'}, limit=100) ]
data_to_csv = []
for result in as_completed(futures):
data_to_csv.append(result.result())
end = time.time()
print("Time Taken to complete: {:.6f}s".format(end-start))
print("Courses extracted: ", len(data_to_csv))
page += 1
export_to_csv(data_to_csv)
def work(link):
# replace this fucntion with the specific crawler you want to use:
return mit_crawler(link)
# Exports data to a formatted csv file, this will be replaced with multithreaded API calls to the Cassandra Prisma Database
# or on the cloud in production, it will be sent to the S3 temporary database to be picked up by the AWS Lambda funtion which will push it to the Cassandra Database
def export_to_csv(csv_data):
with open('web_crawl_data.csv',mode='w') as csv_file:
field_names = ['Title','URL extension','External Website Logo','URL(href)','Description','Course logo URL']
csv_writer = csv.DictWriter(csv_file, fieldnames=field_names)#delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writeheader()
for course in csv_data:
course_data = {
'Title':course[0],
'URL extension':course[1],
'External Website Logo':course[2],
'URL(href)':course[3],
'Description':course[4],
'Course logo URL':course[5],
}
csv_writer.writerow(course_data)
| 42.58209 | 164 | 0.667368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,311 | 0.459516 |
d9efa4ffda8cacd286187e29ce110d292c7a1e64 | 946 | py | Python | clpy/sparse/util.py | fixstars/clpy | 693485f85397cc110fa45803c36c30c24c297df0 | [
"BSD-3-Clause"
] | 142 | 2018-06-07T07:43:10.000Z | 2021-10-30T21:06:32.000Z | clpy/sparse/util.py | fixstars/clpy | 693485f85397cc110fa45803c36c30c24c297df0 | [
"BSD-3-Clause"
] | 282 | 2018-06-07T08:35:03.000Z | 2021-03-31T03:14:32.000Z | clpy/sparse/util.py | fixstars/clpy | 693485f85397cc110fa45803c36c30c24c297df0 | [
"BSD-3-Clause"
] | 19 | 2018-06-19T11:07:53.000Z | 2021-05-13T20:57:04.000Z | import clpy
import clpy.sparse.base
_preamble_atomic_add = '''
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val) {
unsigned long long* address_as_ull =
(unsigned long long*)address;
unsigned long long old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
'''
def isintlike(x):
try:
return bool(int(x) == x)
except (TypeError, ValueError):
return False
def isscalarlike(x):
return clpy.isscalar(x) or (clpy.sparse.base.isdense(x) and x.ndim == 0)
def isshape(x):
if not isinstance(x, tuple) or len(x) != 2:
return False
m, n = x
return isintlike(m) and isintlike(n)
| 24.25641 | 76 | 0.60148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 524 | 0.553911 |
d9f9cd4e7a0b73e79eb71d2bdbfa755d69a9cc9d | 597 | py | Python | examples/first_char_last_column.py | clarkfitzg/sta141c | 129704ba0952a4b80f9b093dcfa49f49f37b052d | [
"MIT"
] | 24 | 2019-01-08T20:10:11.000Z | 2021-11-26T12:18:58.000Z | examples/first_char_last_column.py | timilchene/sta141c-winter19 | 129704ba0952a4b80f9b093dcfa49f49f37b052d | [
"MIT"
] | 1 | 2017-06-25T05:35:24.000Z | 2017-06-25T05:35:24.000Z | examples/first_char_last_column.py | timilchene/sta141c-winter19 | 129704ba0952a4b80f9b093dcfa49f49f37b052d | [
"MIT"
] | 22 | 2019-01-08T20:02:15.000Z | 2021-12-16T23:27:56.000Z | #!/usr/bin/env python3
"""
For the last column, print only the first character.
Usage:
$ printf "100,200\n0,\n" | python3 first_char_last_column.py
Should print "100,2\n0,"
"""
import csv
from sys import stdin, stdout
def main():
reader = csv.reader(stdin)
writer = csv.writer(stdout)
for row in reader:
try:
row[-1] = row[-1][0]
except IndexError:
# Python: Better to ask forgiveness than permission
# Alternative: Look before you leap
pass
writer.writerow(row)
if __name__ == "__main__":
main()
| 19.258065 | 64 | 0.606365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.463987 |
8a045d9a56c4a8715b77c0b2cd2d5ff977fa98ed | 609 | py | Python | conf/feature_config.py | pupuwudi/nlp_xiaojiang | 182ac4522b6012a52de6e1d0db7e6a47cb716e5b | [
"MIT"
] | null | null | null | conf/feature_config.py | pupuwudi/nlp_xiaojiang | 182ac4522b6012a52de6e1d0db7e6a47cb716e5b | [
"MIT"
] | null | null | null | conf/feature_config.py | pupuwudi/nlp_xiaojiang | 182ac4522b6012a52de6e1d0db7e6a47cb716e5b | [
"MIT"
] | 2 | 2021-01-18T10:07:20.000Z | 2022-01-12T10:09:47.000Z | # -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/5/10 9:13
# @author :Mo
# @function :path of FeatureProject
import pathlib
import sys
import os
# base dir
projectdir = str(pathlib.Path(os.path.abspath(__file__)).parent.parent)
sys.path.append(projectdir)
# path of BERT model
model_dir = projectdir + '/Data/chinese_L-12_H-768_A-12'
config_name = model_dir + '/bert_config.json'
ckpt_name = model_dir + '/bert_model.ckpt'
vocab_file = model_dir + '/vocab.txt'
# gpu使用率
gpu_memory_fraction = 0.32
# 默认取倒数第二层的输出值作为句向量
layer_indexes = [-2]
# 序列的最大程度
max_seq_len = 32
| 22.555556 | 72 | 0.689655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 328 | 0.494721 |
8a1292fe9e365e4f3b12243aeeeb62b3fcd34222 | 1,067 | py | Python | MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 4/Problem Set 4/get_word_score.py | henriqueumeda/-Python-study | 28e93a377afa4732037a29eb74d4bc7c9e24b62f | [
"MIT"
] | null | null | null | MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 4/Problem Set 4/get_word_score.py | henriqueumeda/-Python-study | 28e93a377afa4732037a29eb74d4bc7c9e24b62f | [
"MIT"
] | null | null | null | MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 4/Problem Set 4/get_word_score.py | henriqueumeda/-Python-study | 28e93a377afa4732037a29eb74d4bc7c9e24b62f | [
"MIT"
] | null | null | null | SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1,
'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
def getWordScore(word, n):
"""
Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the
word, multiplied by the length of the word, PLUS 50 points if all n
letters are used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is
worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
word: string (lowercase letters)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: int >= 0
"""
total_points = 0
for letter in word:
total_points += SCRABBLE_LETTER_VALUES[letter]
total_points *= len(word)
if len(word) == n:
total_points += 50
return total_points
print(getWordScore('waybill', 7))
| 35.566667 | 115 | 0.585754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 636 | 0.596064 |
8a15ab57e7398ab067062419a83d15fd9bf34d36 | 434 | py | Python | ex062.py | noahbarros/Python-Exercises | fafda898473bc984280e201ed11d8ad76cc8624a | [
"MIT"
] | 1 | 2021-07-13T21:41:00.000Z | 2021-07-13T21:41:00.000Z | ex062.py | noahbarros/Python-Exercises | fafda898473bc984280e201ed11d8ad76cc8624a | [
"MIT"
] | null | null | null | ex062.py | noahbarros/Python-Exercises | fafda898473bc984280e201ed11d8ad76cc8624a | [
"MIT"
] | null | null | null | primeiro = int(input('Digite o priemiro termo da PA: '))
razão = int(input('Digite a razão da PA: '))
termo = primeiro
cont = 1
total = 0
mais = 10
while mais != 0:
total += mais
while cont <= total:
print(f'{termo} ', end='')
termo += razão
cont += 1
print('Pausa')
mais = int(input('Quantos termos você quer usar a mais? '))
print(f'a progressão foi finalizada com {total} termos mostrados')
| 27.125 | 66 | 0.612903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.407745 |
8a19876a956cc7df8eee4ce39d6fc5531c4cfc7c | 3,401 | py | Python | src/api/datamanage/pro/lifecycle/data_trace/data_set_create.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/api/datamanage/pro/lifecycle/data_trace/data_set_create.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/api/datamanage/pro/lifecycle/data_trace/data_set_create.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from copy import deepcopy
from datamanage.pro import exceptions as dm_pro_errors
from datamanage.utils.api import MetaApi
from datamanage.pro.utils.time import utc_to_local, str_to_datetime
from datamanage.pro.lifecycle.models_dict import (
DATASET_CREATE_MAPPINGS,
DATASET_CREATE_EVENT_INFO_DICT,
DataTraceShowType,
ComplexSearchBackendType,
DataTraceFinishStatus,
)
def get_dataset_create_info(dataset_id, dataset_type):
"""获取数据足迹中和数据创建相关信息
:param dataset_id: 数据id
:param dataset_type: 数据类型
:return: 数据创建相关信息
:rtype: list
"""
# 1)从dgraph中获取数据创建相关信息
data_set_create_info_statement = """
{
get_dataset_create_info(func: eq(%s, "%s")){created_by created_at}
}
""" % (
DATASET_CREATE_MAPPINGS[dataset_type]['data_set_pk'],
dataset_id,
)
query_result = MetaApi.complex_search(
{"backend_type": ComplexSearchBackendType.DGRAPH.value, "statement": data_set_create_info_statement}, raw=True
)
create_info_ret = query_result['data']['data']['get_dataset_create_info']
if not (isinstance(create_info_ret, list) and create_info_ret):
raise dm_pro_errors.GetDataSetCreateInfoError(message_kv={'dataset_id': dataset_id})
# 2)得到格式化创建信息
create_trace_dict = deepcopy(DATASET_CREATE_EVENT_INFO_DICT)
create_trace_dict.update(
{
"sub_type": dataset_type,
"sub_type_alias": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'],
"description": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'],
"created_at": utc_to_local(create_info_ret[0]['created_at']),
"created_by": create_info_ret[0]['created_by'],
"show_type": DataTraceShowType.DISPLAY.value,
"datetime": str_to_datetime(utc_to_local(create_info_ret[0]['created_at'])),
"status": DataTraceFinishStatus.STATUS,
"status_alias": DataTraceFinishStatus.STATUS_ALIAS,
}
)
return [create_trace_dict]
| 44.168831 | 118 | 0.728021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,039 | 0.576151 |
8a20fc9b93bd3fc7e19c79190d5875b049bc7526 | 4,136 | py | Python | build/lib/FinMesh/usgov/__init__.py | johnjdailey/FinMesh | 64048b02bfec1a24de840877b38e82f4fa813d22 | [
"MIT"
] | 1 | 2020-08-14T16:09:54.000Z | 2020-08-14T16:09:54.000Z | build/lib/FinMesh/usgov/__init__.py | johnjdailey/FinMesh | 64048b02bfec1a24de840877b38e82f4fa813d22 | [
"MIT"
] | null | null | null | build/lib/FinMesh/usgov/__init__.py | johnjdailey/FinMesh | 64048b02bfec1a24de840877b38e82f4fa813d22 | [
"MIT"
] | null | null | null | import os
import requests
import xmltodict
import csv
import json
# # # # # # # # # #
# FRED DATA BELOW #
# # # # # # # # # #
FRED_BASE_URL = 'https://api.stlouisfed.org/fred/'
GEOFRED_BASE_URL = 'https://api.stlouisfed.org/geofred/'
def append_fred_token(url):
token = os.getenv('FRED_TOKEN')
return f'{url}&api_key={token}'
FRED_SERIES_OBS_URL = FRED_BASE_URL + 'series/observations?'
def fred_series(series, file_type=None, realtime_start=None, realtime_end=None, limit=None, offset=None, sort_order=None, observation_start=None, observation_end=None, units=None, frequency=None, aggregation_method=None, output_type=None, vintage_dates=None):
## Returns time series historical data for the requested FRED data.
url = FRED_SERIES_OBS_URL + f'series_id={series}'
if file_type: url += f'&file_type={file_type}'
if realtime_start: url += f'&realtime_start={realtime_start}'
if realtime_end: url += f'&realtime_end={realtime_end}'
if limit: url += f'&limit={limit}'
if offset: url += f'&offset={offset}'
if sort_order: url += f'&sort_order={sort_order}'
if observation_start: url += f'&observation_start={observation_start}'
if observation_end: url += f'&observation_end={observation_end}'
if units: url += f'&units={units}'
if frequency: url += f'&frequency={frequency}'
if aggregation_method: url += f'&aggregation_method={aggregation_method}'
if output_type: url += f'&output_type={output_type}'
if vintage_dates: url += f'&vintage_dates={vintage_dates}'
url = append_fred_token(url)
result = requests.get(url)
return result.text
GEOFRED_SERIES_META_URL = GEOFRED_BASE_URL + 'series/group?'
def geofred_series_meta(series_id, file_type=None):
## Returns meta data for the requested FRED data.
url = GEOFRED_SERIES_META_URL + f'series_id={series_id}'
if file_type: url += f'&file_type={file_type}'
url = append_fred_token(url)
result = requests.get(url)
return result.text
GEOFRED_REGIONAL_SERIES_URL = GEOFRED_BASE_URL + 'series/data?'
def geofred_regional_series(series_id, file_type=None, date=None, start_date=None):
## Returns the historical, geographically organized time series data for the requested FRED data.
url = GEOFRED_REGIONAL_SERIES_URL + f'series_id={series_id}'
if file_type: url += f'&file_type={file_type}'
if date: url += f'&date={date}'
if start_date: url += f'&start_date={start_date}'
url = append_fred_token(url)
result = requests.get(url)
return result.text
# # # # # # # # # # # # # # # #
# GOVERNMENT YIELD CURVE DATA #
# # # # # # # # # # # # # # # #
GOV_YIELD_URL = 'https://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData?$filter=month(NEW_DATE)%20eq%204%20and%20year(NEW_DATE)%20eq%202019'
def get_yield():
## Returns government treasury bond yields. Organized in Python dictionary format by bond length.
# Formatting of XML to Python Dict
curve = requests.get(GOV_YIELD_URL)
parse_curve = xmltodict.parse(curve.content)
# This is based around retrieving the n last dates or average of n days.
feed = parse_curve['feed']
entry = feed['entry']
last_entry = len(entry)-1
content = entry[last_entry]['content']['m:properties']
# Dict that contains the whole yield curve so there is no need to bring in each rate.
yield_curve_values = {
'date' : entry[last_entry]['content']['m:properties']['d:NEW_DATE']['#text'],
'1month' : float(content['d:BC_1MONTH']['#text']),
'2month' : float(content['d:BC_2MONTH']['#text']),
'3month' : float(content['d:BC_3MONTH']['#text']),
'6month' : float(content['d:BC_6MONTH']['#text']),
'1year' : float(content['d:BC_1YEAR']['#text']),
'2year' : float(content['d:BC_2YEAR']['#text']),
'3year' : float(content['d:BC_3YEAR']['#text']),
'5year' : float(content['d:BC_5YEAR']['#text']),
'10year' : float(content['d:BC_10YEAR']['#text']),
'20year' : float(content['d:BC_20YEAR']['#text']),
'30year' : float(content['d:BC_30YEAR']['#text']),
}
return yield_curve_values
| 44 | 259 | 0.676499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,869 | 0.451886 |
8a29eefe067ae42942e4915562e64419af3d1cde | 950 | py | Python | scripts_python3/exchange/deleteExchange.py | bcvsolutions/winrm-ad-connector | 9b45dae78d3ba24fe6b00e090f8763d3162e1570 | [
"Apache-2.0"
] | null | null | null | scripts_python3/exchange/deleteExchange.py | bcvsolutions/winrm-ad-connector | 9b45dae78d3ba24fe6b00e090f8763d3162e1570 | [
"Apache-2.0"
] | 2 | 2020-05-27T07:15:28.000Z | 2020-12-17T05:22:54.000Z | scripts_python3/exchange/deleteExchange.py | bcvsolutions/winrm-ad-connector | 9b45dae78d3ba24fe6b00e090f8763d3162e1570 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# All params from IdM is stored in environment and you can get them by os.environ["paramName"]
import sys, os
# this is needed for importing file winrm_wrapper from parent dir
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import winrm_wrapper
import codecs
uid = os.environ["__UID__"]
winrm_wrapper.writeLog("Delete start for " + uid)
# Load PS script from file and replace params
winrm_wrapper.writeLog("loading script")
f = codecs.open(os.environ["script"], encoding='utf-8', mode='r')
command = f.read()
command = command.replace("$uid", uid)
# Call wrapper
winrm_wrapper.executeScript(os.environ["endpoint"], os.environ["authentication"], os.environ["user"],
os.environ["password"], os.environ["caTrustPath"], os.environ["ignoreCaValidation"], command, uid)
winrm_wrapper.writeLog("Delete end for " + uid)
print("__UID__=" + uid)
sys.exit()
| 35.185185 | 134 | 0.705263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 437 | 0.46 |
8a2f400a7655554fbc57b5f622cd3afad8069e45 | 427 | py | Python | gcp-python-fn/main.py | FuriKuri/faas-playground | 52618e21064e327d2874d2b73cfe5fb247d3dd6e | [
"MIT"
] | 1 | 2019-05-07T13:15:16.000Z | 2019-05-07T13:15:16.000Z | gcp-python-fn/main.py | FuriKuri/faas-playground | 52618e21064e327d2874d2b73cfe5fb247d3dd6e | [
"MIT"
] | null | null | null | gcp-python-fn/main.py | FuriKuri/faas-playground | 52618e21064e327d2874d2b73cfe5fb247d3dd6e | [
"MIT"
] | null | null | null | def hello_world(request):
request_json = request.get_json()
name = 'World'
if request_json and 'name' in request_json:
name = request_json['name']
headers = {
'Access-Control-Allow-Origin': 'https://furikuri.net',
'Access-Control-Allow-Methods': 'GET, POST',
'Access-Control-Allow-Headers': 'Content-Type'
}
return ('Hello ' + name + '! From GCP + Python', 200, headers)
| 35.583333 | 66 | 0.620609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.430913 |
8a30c3ee79ce2efcb14fdc2c9e26c3ab71e499c1 | 671 | py | Python | tests/test_i18n.py | vthriller/flask-kajiki | eadaa0aa45d23507066758b9e74091bddbc943c4 | [
"BSD-3-Clause"
] | null | null | null | tests/test_i18n.py | vthriller/flask-kajiki | eadaa0aa45d23507066758b9e74091bddbc943c4 | [
"BSD-3-Clause"
] | null | null | null | tests/test_i18n.py | vthriller/flask-kajiki | eadaa0aa45d23507066758b9e74091bddbc943c4 | [
"BSD-3-Clause"
] | null | null | null | from kajiki import i18n
from flask import request
from flask_kajiki import render_template
# N. B. settting i18n.gettext would affect tests from all modules,
# so we test for request path that only functions from this module could set
def gettext(s):
if request.path == '/test_i18n':
return s.upper()
return s
i18n.gettext = gettext
def test_does_translations(app):
"""Callback interface is able to inject Translator filter"""
with app.test_request_context(path='/test_i18n'):
rendered = render_template('i18n.html')
# TODO DOCTYPE; see also render_args
expected = '<p>HELLO!</p>'
assert rendered == expected
| 27.958333 | 76 | 0.704918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.42921 |
8a3543c746387ad12029585c2e306e26ec984737 | 4,324 | py | Python | Deep_Q_Network/DQN_for_FrozenLake_Discrete_Domain.py | quangnguyendang/Reinforcement_Learning | 2551ce95068561c553500838ee6b976f001ba667 | [
"MIT"
] | null | null | null | Deep_Q_Network/DQN_for_FrozenLake_Discrete_Domain.py | quangnguyendang/Reinforcement_Learning | 2551ce95068561c553500838ee6b976f001ba667 | [
"MIT"
] | null | null | null | Deep_Q_Network/DQN_for_FrozenLake_Discrete_Domain.py | quangnguyendang/Reinforcement_Learning | 2551ce95068561c553500838ee6b976f001ba667 | [
"MIT"
] | null | null | null | # Credit to https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0
import gym
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
env = gym.make('FrozenLake-v0')
# NEURAL NETWORK IMPLEMENTATION
tf.reset_default_graph()
# Feature vector for current state representation
input1 = tf.placeholder(shape=[1, env.observation_space.n], dtype=tf.float32)
# tf.Variable(<initial-value>, name=<optional-name>)
# tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)
# Weighting W vector in range 0 - 0.01 (like the way Andrew Ng did with *0.01
W = tf.Variable(tf.random_uniform([env.observation_space.n, env.action_space.n], 0, 0.01))
# Qout with shape [1, env.action_space.n] - Action state value for Q[s, a] with every a available at a state
Qout = tf.matmul(input1, W)
# Greedy action at a state
predict = tf.argmax(Qout, axis=1)
# Feature vector for next state representation
nextQ = tf.placeholder(shape=[1, env.action_space.n], dtype=tf.float32)
# Entropy loss
loss = tf.reduce_sum(tf.square(Qout - nextQ))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
updateModel = trainer.minimize(loss)
# TRAIN THE NETWORK
init = tf.global_variables_initializer()
# Set learning parameters
y = 0.99
e = 0.1
number_episodes = 2000
# List to store total rewards and steps per episode
jList = []
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(number_episodes):
print("Episode #{} is running!".format(i))
# First state
s = env.reset()
rAll = 0
d = False
j = 0
# Q network
while j < 200: # or While not d:
j += 1
# Choose action by epsilon (e) greedy
# print("s = ", s," --> Identity s:s+1: ", np.identity(env.observation_space.n)[s:s+1])
# s = 0 --> Identity s: s + 1: [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# s = 1 --> Identity s: s + 1: [[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# Identity [s:s+1] is a one-hot vector
# Therefore W is the actual Q value
a, allQ = sess.run([predict, Qout], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1]})
if np.random.rand(1) < e:
a[0] = env.action_space.sample()
s1, r, d, _ = env.step(a[0])
# Obtain next state Q value by feeding the new state throughout the network
Q1 = sess.run(Qout, feed_dict={input1: np.identity(env.observation_space.n)[s1:s1+1]})
maxQ1 = np.max(Q1)
targetQ = allQ
targetQ[0, a[0]] = r + y * maxQ1
# Train our network using target and predicted Q values
_, W1 = sess.run([updateModel, W], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1], nextQ: targetQ})
rAll += r
s = s1
if d:
e = 1./((i/50) + 10)
break
jList.append(j)
rList.append(rAll)
env.close()
plt.figure()
plt.plot(rList, label="Return - Q Learning")
plt.show()
plt.figure()
plt.plot(jList, label="Steps - Q Learning")
plt.show()
# -------------------------------------------------------------------------
# TABULAR IMPLEMENTATION
#
# # Set learning parameters
# lr = 0.8
# y = 0.95
# number_episodes = 20000
#
# # Initial table with all zeros
# Q = np.zeros([env.observation_space.n, env.action_space.n])
#
# # List of reward and steps per episode
# rList = []
# for i in range (number_episodes):
# print("Episode #{} is running!".format(i))
# s = env.reset()
# rAll = 0
# d = False
# j = 0
# while j < 99:
# j += 1
# # Choose an action by greedily (with noise) picking from Q table
# # Because of the noise, it is epsilon-greedy with epsilon decreasing over time
# a = np.argmax(Q[s, :] + np.random.rand(1, env.action_space.n)*(1./(i + 1)))
# s1, r, d, _ = env.step(a)
# # env.render()
#
# # Update Q table with new knowledge
# Q[s, a] = Q[s, a] + lr * (r + y * np.max(Q[s1, :]) - Q[s, a])
# rAll += r
# s = s1
# if d:
# break
# rList.append(rAll)
| 30.666667 | 155 | 0.586725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,371 | 0.548335 |
8a3651a34d3b1893e6f70ebe64b9db39d329cd63 | 8,496 | py | Python | testing/cross_language/util/supported_key_types.py | chanced/tink | 9cc3a01ac0165b033ed51dc9d0812a98b4b6e305 | [
"Apache-2.0"
] | null | null | null | testing/cross_language/util/supported_key_types.py | chanced/tink | 9cc3a01ac0165b033ed51dc9d0812a98b4b6e305 | [
"Apache-2.0"
] | null | null | null | testing/cross_language/util/supported_key_types.py | chanced/tink | 9cc3a01ac0165b033ed51dc9d0812a98b4b6e305 | [
"Apache-2.0"
] | 1 | 2022-01-02T20:54:04.000Z | 2022-01-02T20:54:04.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All KeyTypes and which languages support them."""
# Placeholder for import for type annotations
from tink import aead
from tink import daead
from tink import hybrid
from tink import mac
from tink import prf
from tink import signature
from tink import streaming_aead
from tink.proto import tink_pb2
# All languages supported by cross-language tests.
ALL_LANGUAGES = ['cc', 'java', 'go', 'python']
# All KeyTypes (without the prefix 'type.googleapis.com/google.crypto.tink.')
AEAD_KEY_TYPES = [
'AesEaxKey',
'AesGcmKey',
'AesGcmSivKey',
'AesCtrHmacAeadKey',
'ChaCha20Poly1305Key',
'XChaCha20Poly1305Key',
]
DAEAD_KEY_TYPES = ['AesSivKey']
STREAMING_AEAD_KEY_TYPES = [
'AesCtrHmacStreamingKey',
'AesGcmHkdfStreamingKey',
]
HYBRID_PRIVATE_KEY_TYPES = ['EciesAeadHkdfPrivateKey']
MAC_KEY_TYPES = [
'AesCmacKey',
'HmacKey',
]
SIGNATURE_KEY_TYPES = [
'EcdsaPrivateKey',
'Ed25519PrivateKey',
'RsaSsaPkcs1PrivateKey',
'RsaSsaPssPrivateKey',
]
PRF_KEY_TYPES = [
'AesCmacPrfKey',
'HmacPrfKey',
'HkdfPrfKey',
]
ALL_KEY_TYPES = (
AEAD_KEY_TYPES + DAEAD_KEY_TYPES + STREAMING_AEAD_KEY_TYPES +
HYBRID_PRIVATE_KEY_TYPES + MAC_KEY_TYPES + SIGNATURE_KEY_TYPES +
PRF_KEY_TYPES)
# All languages that are supported by a KeyType
SUPPORTED_LANGUAGES = {
'AesEaxKey': ['cc', 'java', 'python'],
'AesGcmKey': ['cc', 'java', 'go', 'python'],
'AesGcmSivKey': ['cc', 'python'],
'AesCtrHmacAeadKey': ['cc', 'java', 'go', 'python'],
'ChaCha20Poly1305Key': ['java', 'go'],
'XChaCha20Poly1305Key': ['cc', 'java', 'go', 'python'],
'AesSivKey': ['cc', 'java', 'go', 'python'],
'AesCtrHmacStreamingKey': ['cc', 'java', 'go', 'python'],
'AesGcmHkdfStreamingKey': ['cc', 'java', 'go', 'python'],
'EciesAeadHkdfPrivateKey': ['cc', 'java', 'go', 'python'],
'AesCmacKey': ['cc', 'java', 'go', 'python'],
'HmacKey': ['cc', 'java', 'go', 'python'],
'EcdsaPrivateKey': ['cc', 'java', 'go', 'python'],
'Ed25519PrivateKey': ['cc', 'java', 'go', 'python'],
'RsaSsaPkcs1PrivateKey': ['cc', 'java', 'python'],
'RsaSsaPssPrivateKey': ['cc', 'java', 'python'],
'AesCmacPrfKey': ['cc', 'java', 'go', 'python'],
'HmacPrfKey': ['cc', 'java', 'go', 'python'],
'HkdfPrfKey': ['cc', 'java', 'go', 'python'],
}
KEY_TYPE_FROM_URL = {
'type.googleapis.com/google.crypto.tink.' + key_type: key_type
for key_type in ALL_KEY_TYPES}
# For each KeyType, a list of all KeyTemplate Names that must be supported.
KEY_TEMPLATE_NAMES = {
'AesEaxKey': ['AES128_EAX', 'AES256_EAX'],
'AesGcmKey': ['AES128_GCM', 'AES256_GCM'],
'AesGcmSivKey': ['AES128_GCM_SIV', 'AES256_GCM_SIV'],
'AesCtrHmacAeadKey': ['AES128_CTR_HMAC_SHA256', 'AES256_CTR_HMAC_SHA256'],
'ChaCha20Poly1305Key': ['CHACHA20_POLY1305'],
'XChaCha20Poly1305Key': ['XCHACHA20_POLY1305'],
'AesSivKey': ['AES256_SIV'],
'AesCtrHmacStreamingKey': [
'AES128_CTR_HMAC_SHA256_4KB',
'AES256_CTR_HMAC_SHA256_4KB',
],
'AesGcmHkdfStreamingKey': [
'AES128_GCM_HKDF_4KB',
'AES256_GCM_HKDF_4KB',
'AES256_GCM_HKDF_1MB',
],
'EciesAeadHkdfPrivateKey': [
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM',
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256'
],
'AesCmacKey': ['AES_CMAC'],
'HmacKey': [
'HMAC_SHA256_128BITTAG', 'HMAC_SHA256_256BITTAG',
'HMAC_SHA512_256BITTAG', 'HMAC_SHA512_512BITTAG'
],
'EcdsaPrivateKey': [
'ECDSA_P256', 'ECDSA_P384', 'ECDSA_P384_SHA384', 'ECDSA_P521',
'ECDSA_P256_IEEE_P1363', 'ECDSA_P384_IEEE_P1363',
'ECDSA_P384_SHA384_IEEE_P1363', 'ECDSA_P521_IEEE_P1363'
],
'Ed25519PrivateKey': ['ED25519'],
'RsaSsaPkcs1PrivateKey': [
'RSA_SSA_PKCS1_3072_SHA256_F4', 'RSA_SSA_PKCS1_4096_SHA512_F4'
],
'RsaSsaPssPrivateKey': [
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4',
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4'
],
'AesCmacPrfKey': ['AES_CMAC_PRF'],
'HmacPrfKey': ['HMAC_PRF_SHA256', 'HMAC_PRF_SHA512'],
'HkdfPrfKey': ['HKDF_PRF_SHA256'],
}
# KeyTemplate (as Protobuf) for each KeyTemplate name.
KEY_TEMPLATE = {
'AES128_EAX':
aead.aead_key_templates.AES128_EAX,
'AES256_EAX':
aead.aead_key_templates.AES256_EAX,
'AES128_GCM':
aead.aead_key_templates.AES128_GCM,
'AES256_GCM':
aead.aead_key_templates.AES256_GCM,
'AES128_GCM_SIV':
aead.aead_key_templates.AES128_GCM_SIV,
'AES256_GCM_SIV':
aead.aead_key_templates.AES256_GCM_SIV,
'AES128_CTR_HMAC_SHA256':
aead.aead_key_templates.AES128_CTR_HMAC_SHA256,
'AES256_CTR_HMAC_SHA256':
aead.aead_key_templates.AES256_CTR_HMAC_SHA256,
'CHACHA20_POLY1305':
tink_pb2.KeyTemplate(
type_url=('type.googleapis.com/google.crypto.tink.' +
'ChaCha20Poly1305Key'),
output_prefix_type=tink_pb2.TINK),
'XCHACHA20_POLY1305':
aead.aead_key_templates.XCHACHA20_POLY1305,
'AES256_SIV':
daead.deterministic_aead_key_templates.AES256_SIV,
'AES128_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES128_CTR_HMAC_SHA256_4KB,
'AES256_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES256_CTR_HMAC_SHA256_4KB,
'AES128_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES128_GCM_HKDF_4KB,
'AES256_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_4KB,
'AES256_GCM_HKDF_1MB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_1MB,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM':
hybrid.hybrid_key_templates.ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256':
hybrid.hybrid_key_templates
.ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256,
'AES_CMAC':
mac.mac_key_templates.AES_CMAC,
'HMAC_SHA256_128BITTAG':
mac.mac_key_templates.HMAC_SHA256_128BITTAG,
'HMAC_SHA256_256BITTAG':
mac.mac_key_templates.HMAC_SHA256_256BITTAG,
'HMAC_SHA512_256BITTAG':
mac.mac_key_templates.HMAC_SHA512_256BITTAG,
'HMAC_SHA512_512BITTAG':
mac.mac_key_templates.HMAC_SHA512_512BITTAG,
'ECDSA_P256':
signature.signature_key_templates.ECDSA_P256,
'ECDSA_P384':
signature.signature_key_templates.ECDSA_P384,
'ECDSA_P384_SHA384':
signature.signature_key_templates.ECDSA_P384_SHA384,
'ECDSA_P521':
signature.signature_key_templates.ECDSA_P521,
'ECDSA_P256_IEEE_P1363':
signature.signature_key_templates.ECDSA_P256_IEEE_P1363,
'ECDSA_P384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_IEEE_P1363,
'ECDSA_P384_SHA384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_SHA384_IEEE_P1363,
'ECDSA_P521_IEEE_P1363':
signature.signature_key_templates.ECDSA_P521_IEEE_P1363,
'ED25519':
signature.signature_key_templates.ED25519,
'RSA_SSA_PKCS1_3072_SHA256_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_3072_SHA256_F4,
'RSA_SSA_PKCS1_4096_SHA512_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_4096_SHA512_F4,
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4':
signature.signature_key_templates.RSA_SSA_PSS_3072_SHA256_SHA256_32_F4,
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4':
signature.signature_key_templates.RSA_SSA_PSS_4096_SHA512_SHA512_64_F4,
'AES_CMAC_PRF':
prf.prf_key_templates.AES_CMAC,
'HMAC_PRF_SHA256':
prf.prf_key_templates.HMAC_SHA256,
'HMAC_PRF_SHA512':
prf.prf_key_templates.HMAC_SHA512,
'HKDF_PRF_SHA256':
prf.prf_key_templates.HKDF_SHA256,
}
SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME = {
name: SUPPORTED_LANGUAGES[KEY_TYPE_FROM_URL[template.type_url]]
for name, template in KEY_TEMPLATE.items()
}
| 37.263158 | 79 | 0.711982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,121 | 0.485052 |
8a43f4805ca2bfbefacf005fd91befea7f1c3e71 | 492 | py | Python | gen-cfg.py | magetron/secure-flow-prototype | c683939620fec889f882ea095d2b27e3e4bb98fe | [
"Apache-2.0"
] | null | null | null | gen-cfg.py | magetron/secure-flow-prototype | c683939620fec889f882ea095d2b27e3e4bb98fe | [
"Apache-2.0"
] | null | null | null | gen-cfg.py | magetron/secure-flow-prototype | c683939620fec889f882ea095d2b27e3e4bb98fe | [
"Apache-2.0"
] | null | null | null | from staticfg import CFGBuilder
userCfg = CFGBuilder().build_from_file('user.py', './auction/user.py')
bidCfg = CFGBuilder().build_from_file('bid.py', './auction/bid.py')
auctionCfg = CFGBuilder().build_from_file('auction.py','./auction/auction.py')
#auctionEventCfg = CFGBuilder().build_from_file('auction_event.py','./auction/auction_event.py')
bidCfg.build_visual('bidCfg', 'pdf')
auctionCfg.build_visual('auctionCfg', 'pdf')
#auctionEventCfg.build_visual('auctionEventCfg.pdf', 'pdf')
| 41 | 96 | 0.760163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.554878 |
8a4ccded7f4f9f9be895e48e8a31955a7046241e | 4,371 | py | Python | dddppp/settings.py | tysonclugg/dddppp | 22f52d671ca71c2df8d6ac566a1626e5f05b3159 | [
"MIT"
] | null | null | null | dddppp/settings.py | tysonclugg/dddppp | 22f52d671ca71c2df8d6ac566a1626e5f05b3159 | [
"MIT"
] | null | null | null | dddppp/settings.py | tysonclugg/dddppp | 22f52d671ca71c2df8d6ac566a1626e5f05b3159 | [
"MIT"
] | null | null | null | """
Django settings for dddppp project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import pkg_resources
import pwd
PROJECT_NAME = 'dddppp'
# Enforce a valid POSIX environment
# Get missing environment variables via call to pwd.getpwuid(...)
_PW_CACHE = None
_PW_MAP = {
'LOGNAME': 'pw_name',
'USER': 'pw_name',
'USERNAME': 'pw_name',
'UID': 'pw_uid',
'GID': 'pw_gid',
'HOME': 'pw_dir',
'SHELL': 'pw_shell',
}
for _missing_env in set(_PW_MAP).difference(os.environ):
if _PW_CACHE is None:
_PW_CACHE = pwd.getpwuid(os.getuid())
os.environ[_missing_env] = str(getattr(_PW_CACHE, _PW_MAP[_missing_env]))
del _PW_CACHE, _PW_MAP, pwd
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nfd_lvt=&k#h#$a^_l09j#5%s=mg+0aw=@t84ry$&rps43c33+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dddp',
'dddp.server',
'dddp.accounts',
'dddppp.slides',
]
for (requirement, pth) in [
('django-extensions', 'django_extensions'),
]:
try:
pkg_resources.get_distribution(requirement)
except (
pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict,
):
continue
INSTALLED_APPS.append(pth)
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'django.middleware.security.SecurityMiddleware',
]
ROOT_URLCONF = 'dddppp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dddppp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('PGDATABASE', PROJECT_NAME),
'USER': os.environ.get('PGUSER', os.environ['LOGNAME']),
'PASSWORD': os.environ.get('DJANGO_DATABASE_PASSWORD', ''),
'HOST': os.environ.get('PGHOST', ''),
'PORT': os.environ.get('PGPORT', ''),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-au'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# django-secure
# see: https://github.com/carljm/django-secure/ for more options
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_FRAME_DENY = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
DDDPPP_CONTENT_TYPES = []
PROJ_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
| 26.981481 | 77 | 0.695722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,561 | 0.585907 |
8a4fee7da31280c4ead726e734baac5bb3fc023e | 1,227 | py | Python | setup.py | dantas/wifi | e9cd6df7d3411f1532843999f6c33f45369c3fe4 | [
"BSD-2-Clause"
] | 1 | 2019-04-29T14:57:45.000Z | 2019-04-29T14:57:45.000Z | setup.py | dantas/wifi | e9cd6df7d3411f1532843999f6c33f45369c3fe4 | [
"BSD-2-Clause"
] | null | null | null | setup.py | dantas/wifi | e9cd6df7d3411f1532843999f6c33f45369c3fe4 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
import os
__doc__ = """
Command line tool and library wrappers around iwlist and
/etc/network/interfaces.
"""
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
install_requires = [
'setuptools',
'pbkdf2',
]
try:
import argparse
except:
install_requires.append('argparse')
version = '1.0.0'
setup(
name='wifi',
version=version,
author='Rocky Meza, Gavin Wahl',
author_email='rockymeza@gmail.com',
description=__doc__,
long_description=read('README.rst'),
packages=['wifi'],
scripts=['bin/wifi'],
test_suite='tests',
platforms=["Debian"],
license='BSD',
install_requires=install_requires,
classifiers=[
"License :: OSI Approved :: BSD License",
"Topic :: System :: Networking",
"Operating System :: POSIX :: Linux",
"Environment :: Console",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
],
data_files=[
('/etc/bash_completion.d/', ['extras/wifi-completion.bash']),
]
)
| 23.150943 | 70 | 0.625102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 580 | 0.472698 |
8a50f54c898793f1acb00252a2b2f5ed4e326667 | 790 | py | Python | setup.py | skojaku/fastnode2vec | bb65f68469f00f489fa6744d35b8756200b4e285 | [
"MIT"
] | 61 | 2020-04-21T18:58:47.000Z | 2022-03-26T22:41:45.000Z | setup.py | skojaku/fastnode2vec | bb65f68469f00f489fa6744d35b8756200b4e285 | [
"MIT"
] | 17 | 2020-04-21T22:37:17.000Z | 2022-03-31T22:36:03.000Z | setup.py | skojaku/fastnode2vec | bb65f68469f00f489fa6744d35b8756200b4e285 | [
"MIT"
] | 6 | 2020-07-30T01:41:59.000Z | 2022-01-19T10:13:01.000Z | #!/usr/bin/env python3
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="fastnode2vec",
version="0.0.5",
author="Louis Abraham",
license="MIT",
author_email="louis.abraham@yahoo.fr",
description="Fast implementation of node2vec",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/louisabraham/fastnode2vec",
packages=["fastnode2vec"],
install_requires=["numpy", "numba", "gensim", "click", "tqdm"],
python_requires=">=3.6",
entry_points={"console_scripts": ["fastnode2vec = fastnode2vec.cli:node2vec"]},
classifiers=["Topic :: Scientific/Engineering :: Artificial Intelligence"],
)
| 29.259259 | 83 | 0.694937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.464557 |
8a54334c8ec0d2c98a16bb220c95973a631adeb1 | 3,810 | py | Python | unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py | duliodenis/python_master_degree | 3ab76838ce2fc1606f28e988a3273dd27122a621 | [
"MIT"
] | 19 | 2019-03-14T01:39:32.000Z | 2022-02-03T00:36:43.000Z | unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py | duliodenis/python_master_degree | 3ab76838ce2fc1606f28e988a3273dd27122a621 | [
"MIT"
] | 1 | 2020-04-10T01:01:16.000Z | 2020-04-10T01:01:16.000Z | unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py | duliodenis/python_master_degree | 3ab76838ce2fc1606f28e988a3273dd27122a621 | [
"MIT"
] | 5 | 2019-01-02T20:46:05.000Z | 2020-07-08T22:47:48.000Z | #
# Data Structures: Linked List Merge Sort: The Conquer Step
# Python Techdegree
#
# Created by Dulio Denis on 3/24/19.
# Copyright (c) 2019 ddApps. All rights reserved.
# ------------------------------------------------
from linked_list import Node, LinkedList
def merge_sort(linked_list):
'''
Sorts a linked list in ascending order.
- Recuresively divide the linked list into sublists containing a single node
- Repeatedly merge the sublists to produce sorted swublists until one remains
Returns a sorted linked list.
Runs in O(kn log n) time.
'''
if linked_list.size() == 1:
return linked_list
elif linked_list.is_empty():
return linked_list
left_half, right_half = split(linked_list)
left = merge_sort(left_half)
right = merge_sort(right_half)
return merge(left, right)
def split(linked_list):
'''
Divide the unsorted list at the midpoint into sublists.
Takes O(k log n) quasilinear time.
'''
if linked_list == None or linked_list.head == None:
left_half = linked_list
right_half = None
return left_half, right_half
else: # non-empty linked lists
size = linked_list.size()
midpoint = size // 2
mid_node = linked_list.node_at_index(midpoint-1)
left_half = linked_list
right_half = LinkedList()
right_half = mid_node.next_node
mid_node.next_node = None
return left_half, right_half
def merge(left, right):
'''
Merges two linked lists, sorting by data in nodes.
Returns a new, merged list.
Runs in O(n) linear time.
'''
# Create a new linked list that contains nodes from
# merging left and right
merged = LinkedList()
# Add a fake head that is discarded later to simplify code
merged.add(0)
# Set current to the head of the linked list
current = merged.head
# Obtain head nodes for left and right linked lists
left_head = left.head
right_head = right.head
# Iterate over left and right until we reach the tail node
# of either
while left_head or right_head:
# If the head node of the left is None, we're past the tail
# Add the node from right to merged linkned list
if left_head is None:
current.next_node = right_head
# Call next on right to set loop condition to False
right_head = right_head.next_node
# If the head node of right is None, we're past the tail
# Add the tail node from left to merged linked list
elif right_head is None:
current.next_node = left_head
# Call next on left to set loop condition to False
left_head = left_head.next_node
else:
# Not at either tail node
# Obtain node data to perform comparison operations
left_data = left_head.data
right_data = right_head.data
# If data on left is less than right, set current to left node
if left_data < right_data:
current.next_node = left_head
# Move left head to next node
left_head = left_head.next_node
# If data on left is greater than right, set current to right node
else:
current.next_node = right_head
# Move right head to next node
right_head = right_head.next_node
# Move current to next node
current = current.next_node
# Discard fake head and set first merged node as head
head = merged.head.next_node
merged.head = head
return merged
l = LinkedList()
l.add(10)
l.add(2)
l.add(44)
l.add(15)
l.add(200)
print(l)
sorted_linked_list = merge_sort(l)
print(sorted_linked_list)
| 32.288136 | 81 | 0.630971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,722 | 0.451969 |
8a5438fd129b5b6996b6b2555c75bb6bb382b7d5 | 5,639 | py | Python | nearpy/examples/example2.py | samyoo78/NearPy | 1b534b864d320d875508e95cd2b76b6d8c07a90b | [
"MIT"
] | 624 | 2015-01-02T21:45:28.000Z | 2022-03-02T11:04:27.000Z | nearpy/examples/example2.py | samyoo78/NearPy | 1b534b864d320d875508e95cd2b76b6d8c07a90b | [
"MIT"
] | 65 | 2015-02-06T09:47:46.000Z | 2021-09-26T01:45:26.000Z | nearpy/examples/example2.py | samyoo78/NearPy | 1b534b864d320d875508e95cd2b76b6d8c07a90b | [
"MIT"
] | 136 | 2015-01-07T04:45:41.000Z | 2021-11-25T17:46:07.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Ole Krause-Sparmann
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
import scipy
import unittest
import time
from nearpy import Engine
from nearpy.distances import CosineDistance
from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper
def example2():
# Dimension of feature space
DIM = 100
# Number of data points (dont do too much because of exact search)
POINTS = 20000
##########################################################
print('Performing indexing with HashPermutations...')
t0 = time.time()
# Create permutations meta-hash
permutations = HashPermutations('permut')
# Create binary hash as child hash
rbp_perm = RandomBinaryProjections('rbp_perm', 14)
rbp_conf = {'num_permutation':50,'beam_size':10,'num_neighbour':100}
# Add rbp as child hash of permutations hash
permutations.add_child_hash(rbp_perm, rbp_conf)
# Create engine
engine_perm = Engine(DIM, lshashes=[permutations], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm.store_vector(v)
# Then update permuted index
permutations.build_permuted_index()
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 3
print('\nNeighbour distances with HashPermutations:')
print(' -> Candidate count is %d' % engine_perm.candidate_count(query))
results = engine_perm.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix, query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with HashPermutationMapper...')
t0 = time.time()
# Create permutations meta-hash
permutations2 = HashPermutationMapper('permut2')
# Create binary hash as child hash
rbp_perm2 = RandomBinaryProjections('rbp_perm2', 14)
# Add rbp as child hash of permutations hash
permutations2.add_child_hash(rbp_perm2)
# Create engine
engine_perm2 = Engine(DIM, lshashes=[permutations2], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm2.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with HashPermutationMapper:')
print(' -> Candidate count is %d' % engine_perm2.candidate_count(query))
results = engine_perm2.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with multiple binary hashes...')
t0 = time.time()
hashes = []
for k in range(20):
hashes.append(RandomBinaryProjections('rbp_%d' % k, 10))
# Create engine
engine_rbps = Engine(DIM, lshashes=hashes, distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_rbps.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with multiple binary hashes:')
print(' -> Candidate count is %d' % engine_rbps.candidate_count(query))
results = engine_rbps.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
| 32.039773 | 90 | 0.662529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,668 | 0.473134 |
8a60852354e6415290eaf2e5371028a21ee46376 | 1,004 | py | Python | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res18_market1501_176_80_1.1G_1.3/code/core/data_manager.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | 1 | 2020-12-18T14:49:19.000Z | 2020-12-18T14:49:19.000Z | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res50_market1501_256_128_5.4G_1.3/code/core/data_manager.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res50_market1501_256_128_5.4G_1.3/code/core/data_manager.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
import glob
import re
from os import path as osp
from .market1501 import Market1501
__factory = {
'market1501': Market1501
}
def get_names():
return list(__factory.keys())
def init_dataset(name, *args, **kwargs):
if name not in __factory.keys():
raise KeyError("Unknown datasets: {}".format(name))
return __factory[name](*args, **kwargs)
| 27.888889 | 74 | 0.737052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.593625 |
8a62e622419e3b5175ed6a324e076188b956be4c | 2,313 | py | Python | azure-devops/azext_devops/vstsCompressed/service_hooks/v4_0/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | azure-devops/azext_devops/vstsCompressed/service_hooks/v4_0/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | 37 | 2020-04-27T07:45:19.000Z | 2021-04-05T07:27:15.000Z | azure-devops/azext_devops/vstsCompressed/service_hooks/v4_0/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import Consumer
from .models import ConsumerAction
from .models import Event
from .models import EventTypeDescriptor
from .models import ExternalConfigurationDescriptor
from .models import FormattedEventMessage
from .models import IdentityRef
from .models import InputDescriptor
from .models import InputFilter
from .models import InputFilterCondition
from .models import InputValidation
from .models import InputValue
from .models import InputValues
from .models import InputValuesError
from .models import InputValuesQuery
from .models import Notification
from .models import NotificationDetails
from .models import NotificationResultsSummaryDetail
from .models import NotificationsQuery
from .models import NotificationSummary
from .models import Publisher
from .models import PublisherEvent
from .models import PublishersQuery
from .models import ReferenceLinks
from .models import ResourceContainer
from .models import SessionToken
from .models import Subscription
from .models import SubscriptionsQuery
from .models import VersionedResource
__all__ = [
'Consumer',
'ConsumerAction',
'Event',
'EventTypeDescriptor',
'ExternalConfigurationDescriptor',
'FormattedEventMessage',
'IdentityRef',
'InputDescriptor',
'InputFilter',
'InputFilterCondition',
'InputValidation',
'InputValue',
'InputValues',
'InputValuesError',
'InputValuesQuery',
'Notification',
'NotificationDetails',
'NotificationResultsSummaryDetail',
'NotificationsQuery',
'NotificationSummary',
'Publisher',
'PublisherEvent',
'PublishersQuery',
'ReferenceLinks',
'ResourceContainer',
'SessionToken',
'Subscription',
'SubscriptionsQuery',
'VersionedResource',
]
| 33.042857 | 94 | 0.685257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,056 | 0.45655 |
8a678b6dfe1f80688ee851169cd059181b03b309 | 5,922 | py | Python | electrum/dnssec.py | Jesusown/electrum | 0df05dd914c823acae1828cad3b20bdeb13150e9 | [
"MIT"
] | 5,905 | 2015-01-02T17:05:36.000Z | 2022-03-29T07:28:29.000Z | electrum/dnssec.py | Jesusown/electrum | 0df05dd914c823acae1828cad3b20bdeb13150e9 | [
"MIT"
] | 6,097 | 2015-01-01T21:20:25.000Z | 2022-03-31T23:55:01.000Z | electrum/dnssec.py | Jesusown/electrum | 0df05dd914c823acae1828cad3b20bdeb13150e9 | [
"MIT"
] | 2,202 | 2015-01-02T18:31:25.000Z | 2022-03-28T15:35:03.000Z | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Check DNSSEC trust chain.
# Todo: verify expiration dates
#
# Based on
# http://backreference.org/2010/11/17/dnssec-verification-with-dig/
# https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py
import dns
import dns.name
import dns.query
import dns.dnssec
import dns.message
import dns.resolver
import dns.rdatatype
import dns.rdtypes.ANY.NS
import dns.rdtypes.ANY.CNAME
import dns.rdtypes.ANY.DLV
import dns.rdtypes.ANY.DNSKEY
import dns.rdtypes.ANY.DS
import dns.rdtypes.ANY.NSEC
import dns.rdtypes.ANY.NSEC3
import dns.rdtypes.ANY.NSEC3PARAM
import dns.rdtypes.ANY.RRSIG
import dns.rdtypes.ANY.SOA
import dns.rdtypes.ANY.TXT
import dns.rdtypes.IN.A
import dns.rdtypes.IN.AAAA
from .logging import get_logger
_logger = get_logger(__name__)
# hard-coded trust anchors (root KSKs)
trust_anchors = [
# KSK-2017:
dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU='),
# KSK-2010:
dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='),
]
def _check_query(ns, sub, _type, keys):
q = dns.message.make_query(sub, _type, want_dnssec=True)
response = dns.query.tcp(q, ns, timeout=5)
assert response.rcode() == 0, 'No answer'
answer = response.answer
assert len(answer) != 0, ('No DNS record found', sub, _type)
assert len(answer) != 1, ('No DNSSEC record found', sub, _type)
if answer[0].rdtype == dns.rdatatype.RRSIG:
rrsig, rrset = answer
elif answer[1].rdtype == dns.rdatatype.RRSIG:
rrset, rrsig = answer
else:
raise Exception('No signature set in record')
if keys is None:
keys = {dns.name.from_text(sub):rrset}
dns.dnssec.validate(rrset, rrsig, keys)
return rrset
def _get_and_validate(ns, url, _type):
# get trusted root key
root_rrset = None
for dnskey_rr in trust_anchors:
try:
# Check if there is a valid signature for the root dnskey
root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr})
break
except dns.dnssec.ValidationFailure:
# It's OK as long as one key validates
continue
if not root_rrset:
raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS')
keys = {dns.name.root: root_rrset}
# top-down verification
parts = url.split('.')
for i in range(len(parts), 0, -1):
sub = '.'.join(parts[i-1:])
name = dns.name.from_text(sub)
# If server is authoritative, don't fetch DNSKEY
query = dns.message.make_query(sub, dns.rdatatype.NS)
response = dns.query.udp(query, ns, 3)
assert response.rcode() == dns.rcode.NOERROR, "query error"
rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0]
rr = rrset[0]
if rr.rdtype == dns.rdatatype.SOA:
continue
# get DNSKEY (self-signed)
rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None)
# get DS (signed by parent)
ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys)
# verify that a signed DS validates DNSKEY
for ds in ds_rrset:
for dnskey in rrset:
htype = 'SHA256' if ds.digest_type == 2 else 'SHA1'
good_ds = dns.dnssec.make_ds(name, dnskey, htype)
if ds == good_ds:
break
else:
continue
break
else:
raise Exception("DS does not match DNSKEY")
# set key for next iteration
keys = {name: rrset}
# get TXT record (signed by zone)
rrset = _check_query(ns, url, _type, keys)
return rrset
def query(url, rtype):
# 8.8.8.8 is Google's public DNS server
nameservers = ['8.8.8.8']
ns = nameservers[0]
try:
out = _get_and_validate(ns, url, rtype)
validated = True
except Exception as e:
_logger.info(f"DNSSEC error: {repr(e)}")
out = dns.resolver.resolve(url, rtype)
validated = False
return out, validated
| 39.218543 | 418 | 0.700777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,763 | 0.466565 |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 51