content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Generated by Django 3.1.2 on 2020-10-29 00:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('categories', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(help_text='Question title', max_length=100, unique=True)),
('answer', models.TextField(help_text='Question answer', max_length=500)),
('difficulty', models.CharField(choices=[('e', 'Easy'), ('i', 'Intermediate'), ('h', 'Hard')], db_index=True, help_text='Difficulty level of question', max_length=1)),
('is_public', models.BooleanField(default=True, help_text='Field specifies if user can see question instance')),
('author_email', models.EmailField(blank=True, default=None, help_text='Email address of question author', max_length=254, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('framework', models.ForeignKey(blank=True, default=None, help_text='Question framework category', null=True, on_delete=django.db.models.deletion.SET_NULL, to='categories.framework')),
('language', models.ForeignKey(blank=True, default=None, help_text='Question language category', null=True, on_delete=django.db.models.deletion.SET_NULL, to='categories.language')),
('team', models.ForeignKey(blank=True, default=None, help_text='Question team category', null=True, on_delete=django.db.models.deletion.SET_NULL, to='categories.team')),
],
options={
'ordering': ['-updated_at'],
},
),
migrations.CreateModel(
name='QuestionSuggestion',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('questions.question',),
),
]
| python |
#!/usr/bin/python2
import argparse
import traceback
from os.path import exists
from selenium import webdriver
from selenium.webdriver.common.by import By
import ipdb
from time import sleep
from random import random
from telegram_send import send
import re
from datetime import datetime
import pickle
def update_price_history(urls, xpaths, driver, history):
for url, xpath in reversed(zip(urls, xpaths)):
driver.get(url)
sleep(10 + random()*10)
el = driver.find_element(By.XPATH, xpath)
send(['URL: {}'.format(url.split('.com.br')[0] + '.com.br')])
send(['Price: {}'.format(el.text)])
now = datetime.now()
history[url][now] = float(re.findall(
r'((?:\d+\.)?\d+),(\d+)', el.text)[0][0])
send(['Price (removing cents): {}'.format(
history[url][now])])
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=('Monitor prices'))
parser.add_argument('--save', dest='price_path', type=str, required=True,
help=('path to pickle with price history'))
args = parser.parse_args()
driver = webdriver.Chrome()
urls = ['https://www.submarino.com.br/produto/11110021/piano-casio-digital-cdp-130?WT.srch=1&acc=d47a04c6f99456bc289220d5d0ff208d&epar=bp_pl_00_go_g35175&gclid=Cj0KCQjw19DlBRCSARIsAOnfReg0oOYhB-Z9e5baR_6pmTsOGcqR5vo0dUsQtXVdKvBpF2Cw9ki8aA8aArZvEALw_wcB&i=561e51156ed24cafb531b545&o=57b79bcdeec3dfb1f86e55ea&opn=XMLGOOGLE&sellerId=9055134000184',
'https://www.americanas.com.br/produto/11110021/piano-casio-digital-cdp-130?WT.srch=1&acc=e789ea56094489dffd798f86ff51c7a9&epar=bp_pl_00_go_im_todas_geral_gmv&gclid=Cj0KCQjw19DlBRCSARIsAOnfReiVThiS401KKXZCUl5B4hPwQfmY2gaSul4CQw0FPpkzPKQfi1R6RrAaAhugEALw_wcB&i=55ef647d6ed24cafb524af04&o=57b76ef4eec3dfb1f865ed0a&opn=YSMESP&sellerId=9055134000184',
'https://www.shoptime.com.br/produto/11110021/piano-casio-digital-cdp-130?WT.srch=1&acc=a76c8289649a0bef0524c56c85e71570&epar=bp_pl_00_go_im_todas_geral_gmv&gclid=Cj0KCQjw19DlBRCSARIsAOnfRei-6JZrmz0zi-xklMxUUZbbnvZCFIBop2UjSS2cEfRAsnNyw8a_JjQaAnqFEALw_wcB&i=5616c63a6ed24cafb5401d32&o=57f3dd0aeec3dfb1f81374c5&opn=GOOGLEXML&sellerId=9055134000184',
'https://www.girafa.com.br/teclas/Casio/piano-digital-casio-cdp-130bk-midi-preto-com-88-teclas.htm?gclid=Cj0KCQjw19DlBRCSARIsAOnfReiFVOVNghlymnhlW2NVgyJ0rlOhWCYutP4RGn6KnpZ2pZk5Ime28g4aAjSsEALw_wcB',
'https://www.magazineluiza.com.br/piano-digital-cdp130-bk-casio-preto-88-teclas-sensitivas-/p/fc820a195j/im/ispi/?&utm_source=google&partner_id=17322&seller_id=supersonora&product_group_id=361752213757&ad_group_id=48543697915&aw_viq=pla&gclid=Cj0KCQjw19DlBRCSARIsAOnfRegT8tvF-Z-1gHp_p-ePfLxVU1xwpi0L3zQkIzJYy3u1cwY1PzbzrF4aAmutEALw_wcB']
xpaths = ['//*[@id="content"]/div/div/section/div/div[2]/div[2]/section[2]/div/div[2]/div[1]/div[2]/p[3]/span',
'//*[@id="offer-5b7d7e13172743a0f5bc9163"]/div/div[1]/div[2]/p[3]/span',
'//*[@id="content"]/div/div/section/div/div[2]/div[2]/section[2]/div/div[2]/div[1]/div[2]/p[3]/span',
'/html/body/article/div[3]/div[5]/div/div/div[2]/div[5]/div[2]/span[3]',
'/html/body/div[3]/div[4]/div[1]/div[4]/div[2]/div[4]/div/div/div/span[2]']
# TODO: Load from disk
try:
history = pickle.load(open(args.price_path, 'rb'))
except:
history = {}
for url in urls:
history[url] = {}
while True:
try:
update_price_history(urls, xpaths, driver, history)
pickle.dump(history, open(args.price_path, 'wb'))
interval = 720 + 720*random()
sleep(interval)
except KeyboardInterrupt:
traceback.print_exc()
print('Saving price history in {}'.format(args.price_path))
pickle.dump(history, open(args.price_path, 'wb'))
print('Done!')
| python |
#!/usr/bin/env python
# Copyright 2013 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forms for parsing and validating frontend requests."""
import datetime
# Local libraries
from flask.ext.wtf import (
BooleanField, DataRequired, Email, Form, HiddenField, IntegerField,
Length, NumberRange, Optional, Required, SubmitField, TextField)
# Local modules
from . import app
class BuildForm(Form):
"""Form for creating or editing a build."""
name = TextField(validators=[Length(min=1, max=200)])
class ReleaseForm(Form):
"""Form for viewing or approving a release."""
id = HiddenField(validators=[NumberRange(min=1)])
name = HiddenField(validators=[Length(min=1, max=200)])
number = HiddenField(validators=[NumberRange(min=1)])
good = HiddenField()
bad = HiddenField()
reviewing = HiddenField()
class RunForm(Form):
"""Form for viewing or approving a run."""
id = HiddenField(validators=[NumberRange(min=1)])
name = HiddenField(validators=[Length(min=1, max=200)])
number = HiddenField(validators=[NumberRange(min=1)])
test = HiddenField(validators=[Length(min=1, max=200)])
type = HiddenField(validators=[Length(min=1, max=200)])
approve = HiddenField()
disapprove = HiddenField()
class CreateApiKeyForm(Form):
"""Form for creating an API key."""
build_id = HiddenField(validators=[NumberRange(min=1)])
purpose = TextField('Purpose', validators=[Length(min=1, max=200)])
create = SubmitField('Create')
class RevokeApiKeyForm(Form):
"""Form for revoking an API key."""
id = HiddenField()
build_id = HiddenField(validators=[NumberRange(min=1)])
revoke = SubmitField('Revoke')
class AddAdminForm(Form):
"""Form for adding a build admin."""
email_address = TextField('Email address',
validators=[Length(min=1, max=200)])
build_id = HiddenField(validators=[NumberRange(min=1)])
add = SubmitField('Add')
class RemoveAdminForm(Form):
"""Form for removing a build admin."""
user_id = HiddenField(validators=[Length(min=1, max=200)])
build_id = HiddenField(validators=[NumberRange(min=1)])
revoke = SubmitField('Revoke')
class ModifyWorkQueueTaskForm(Form):
"""Form for modifying a work queue task."""
task_id = HiddenField()
action = HiddenField()
delete = SubmitField('Delete')
retry = SubmitField('Retry')
class SettingsForm(Form):
"""Form for modifying build settings."""
name = TextField(validators=[Length(min=1, max=200)])
send_email = BooleanField('Send notification emails')
email_alias = TextField('Mailing list for notifications',
validators=[Optional(), Email()])
build_id = HiddenField(validators=[NumberRange(min=1)])
save = SubmitField('Save')
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright(c) 2020 De Montfort University. All rights reserved.
#
#
"""
Find all solutions script.
Written for use with the Gunport Problem solving scripts.
"""
import numpy as np
import common as cmn # Common defines and functions
__author__ = 'David Kind'
__date__ = '30-01-2020'
__version__ = '1.6'
__copyright__ = 'Copyright(c) 2019 De Montfort University. All rights reserved.'
def findall(board):
"""
Takes the solution board as an input, this is a numpy ndarray and then
performs rotations and flips to extract all the possible solutions.
Parameters:
board: encoded numpy ndarray of dominoes fitted to the board.
Returns:
A list of all the solutions found; these are the numpy ndarrays'.
"""
# Keep track of all the solutions we have found
all_solns = list()
# Add the initial solution and treat this as the fundamental solution.
all_solns.append(board)
# Rotate the board to find new solutions
all_solns = domino_rotation(all_solns, board)
# Re-run the rotations but with a flipped/mirrored board
fboard = np.fliplr(board)
# Add the new solution if it does not already exist in the solutions list.
if True not in [np.array_equal(fboard, soln) for soln in all_solns]:
all_solns.append(fboard)
# Rotate the board to find new solutions
all_solns = domino_rotation(all_solns, fboard)
# Check for a square, 2x dominoes together, as there could be several and
# then check rotations. Get a list of boards with any squares.
squares = domino_squares(board)
for square in squares:
if True not in [np.array_equal(square, soln) for soln in all_solns]:
all_solns.append(square)
else:
# This solution already exists, try the next one.
continue
# Rotate the board to find new solutions
all_solns = domino_rotation(all_solns, square)
# Re-run the rotations but with a flipped/mirrored board
fboard = np.fliplr(square)
# Add the new solution if it does not already exist in the solutions list.
if True not in [np.array_equal(fboard, soln) for soln in all_solns]:
all_solns.append(fboard)
else:
# This solution already exists, try the next one.
continue
# Rotate the board to find new solutions
all_solns = domino_rotation(all_solns, fboard)
return all_solns
def domino_correction(board):
"""
Simply parses a numpy ndarray and converts 1s' to 2s' and 2s' to 1s'
returning the result back to the calling function.
Parameters:
board: encoded numpy ndarray of dominoes fitted to the board
Returns:
The updated board array.
"""
# Determine the size/shape of the board array parameter
(ysize, xsize) = board.shape
# Parse each board location in turn and convert if necessary
result = np.zeros((ysize, xsize), 'uint8')
for x in range(xsize):
for y in range(ysize):
if board[y, x] == cmn.CELL_HDOMINO:
result[y, x] = cmn.CELL_VDOMINO
elif board[y, x] == cmn.CELL_VDOMINO:
result[y, x] = cmn.CELL_HDOMINO
return result
def domino_rotation(asolns, brd):
"""
Rotate the new solution and add the result to the list of all solutions
if it unique.
In order to find all the solutions the fundamental solution will be
rotated by 90 degrees 3 times. The fundamental solution will be flipped
and then rotated by 90 degrees 3 times.
Note: adjusted solutions may have to have the domino orientation
updated, for example a rotation by 90 degrees means that vertical
dominoes will have to be changed to horizontal dominoes and horizontal
dominoes will have to be changed to vertical dominoes. This maintains
the resultant output plot colour coding.
:param asolns: list of numpy arrays, all solutions found so far.
:param brd: 2D numpy array of the board to be rotated.
:return: list of numpy arrays, all solutions.
"""
# Add the new solution if it does not already exist in the solutions list.
nsoln = domino_correction(np.rot90(brd, 1))
if True not in [np.array_equal(nsoln, soln) for soln in asolns]:
asolns.append(nsoln)
nsoln = np.rot90(brd, 2)
# Add the new solution if it does not already exist in the solutions list.
if True not in [np.array_equal(nsoln, soln) for soln in asolns]:
asolns.append(nsoln)
nsoln = domino_correction(np.rot90(brd, 3))
# Add the new solution if it does not already exist in the solutions list.
if True not in [np.array_equal(nsoln, soln) for soln in asolns]:
asolns.append(nsoln)
return asolns
def domino_squares(brd):
"""
Checks the board for domino squares and returns a list of all the new
combinations of boards with the squares swapped. These new solutions will
have to be rotated and checked to see if they in turn provide new solutions.
:param brd: 2D numpy array of the board to be rotated.
:return: list boards with modified squares.
"""
# Create a simple copy of the board to make it easier to identify squares.
# Holes are cleared as are the 2nd square of the current domino.
sbrd = np.copy(brd)
(rows, cols) = np.shape(sbrd)
# Now loop through the board clearing all unnecessary locations.
for row in range(rows):
for col in range(cols):
# Retrieve the next shape
shape = sbrd[row][col]
# Skip the cell if it is already empty.
if shape == cmn.CELL_UNASSIGNED:
continue
if shape == cmn.CELL_VDOMINO:
sbrd[row + 1][col] = cmn.CELL_UNASSIGNED
elif shape == cmn.CELL_HDOMINO:
sbrd[row][col + 1] = cmn.CELL_UNASSIGNED
else:
# Clear the hole, it's been processed
sbrd[row][col] = cmn.CELL_UNASSIGNED
# print(sbrd) # debug
# Now loop through and find any squares
squares = []
for row in range(rows):
for col in range(cols):
shape = sbrd[row][col]
if shape == cmn.CELL_HDOMINO and (row + 1) < rows and \
sbrd[row + 1][col] == cmn.CELL_HDOMINO:
# Found 2x horizontal dominoes, convert to 2 vertical dominoes.
nbrd = np.copy(brd)
nbrd[row][col] = cmn.CELL_VDOMINO
nbrd[row][col + 1] = cmn.CELL_VDOMINO
nbrd[row + 1][col] = cmn.CELL_VDOMINO
nbrd[row + 1][col + 1] = cmn.CELL_VDOMINO
squares.append(nbrd)
elif shape == cmn.CELL_VDOMINO and (col + 1) < cols and \
sbrd[row][col + 1] == cmn.CELL_VDOMINO:
# Found 2x vertical dominoes
nbrd = np.copy(brd)
nbrd[row][col] = cmn.CELL_HDOMINO
nbrd[row][col + 1] = cmn.CELL_HDOMINO
nbrd[row + 1][col] = cmn.CELL_HDOMINO
nbrd[row + 1][col + 1] = cmn.CELL_HDOMINO
squares.append(nbrd)
# It is a current limitation that the code is unable to cater for complex
# combinations of groups of dominoes together. ie. 3 vertical dominoes
# together would result in alternating blocks of horizontal dominoes.
# Ideally we would want to create a list of combinations of multiple
# squares, when available.
return squares
def display(solns):
"""
Displays all the solutions in the array.
:param solns: numpy array of solutions
:return: n/a
"""
print(solns)
for idx, board in enumerate(solns):
print("{} ---------------------------".format(idx))
print("{}".format(board))
if __name__ == '__main__':
# Note: 0=space/hole, 1=horizontal domino, 2=vertical domino
# Add a fundamental solution for 3x3 board
TESTGRID = np.zeros((3, 3), 'uint8')
TESTGRID[0, 1] = cmn.CELL_VDOMINO
TESTGRID[1, 0] = cmn.CELL_VDOMINO
TESTGRID[1, 1] = cmn.CELL_VDOMINO
TESTGRID[1, 2] = cmn.CELL_VDOMINO
TESTGRID[2, 0] = cmn.CELL_VDOMINO
TESTGRID[2, 2] = cmn.CELL_VDOMINO
display(findall(TESTGRID))
print("+" * 80)
# Add a fundamental solution for 4x3 board
TESTGRID = np.zeros((4, 3), 'uint8')
TESTGRID[0, 1] = cmn.CELL_VDOMINO
TESTGRID[1, 0] = cmn.CELL_VDOMINO
TESTGRID[1, 1] = cmn.CELL_VDOMINO
TESTGRID[1, 2] = cmn.CELL_VDOMINO
TESTGRID[2, 0] = cmn.CELL_VDOMINO
TESTGRID[2, 2] = cmn.CELL_VDOMINO
TESTGRID[3, 1] = cmn.CELL_HDOMINO
TESTGRID[3, 2] = cmn.CELL_HDOMINO
display(findall(TESTGRID))
print("+" * 80)
# Add a fundamental solution for 5x5 board [2]-[0] 7 holes, 9 dominoes.
# Ensure each square is replaced with either horizontal or vertical
# dominoes. This solution is unusual as it has a square composed of two
# vertical dominoes. Observation and logic tells us that the two
# vertical dominoes can be replaced with two horizontal dominoes.
TESTGRID = np.zeros((5, 5), 'uint8')
# Board row #1
TESTGRID[0, 1] = cmn.CELL_HDOMINO
TESTGRID[0, 2] = cmn.CELL_HDOMINO
TESTGRID[0, 4] = cmn.CELL_VDOMINO
# Board row #2
TESTGRID[1, 0] = cmn.CELL_HDOMINO
TESTGRID[1, 1] = cmn.CELL_HDOMINO
TESTGRID[1, 3] = cmn.CELL_VDOMINO
TESTGRID[1, 4] = cmn.CELL_VDOMINO
# Board row #3
TESTGRID[2, 1] = cmn.CELL_VDOMINO
TESTGRID[2, 2] = cmn.CELL_VDOMINO
TESTGRID[2, 3] = cmn.CELL_VDOMINO
# Board row #4
TESTGRID[3, 0] = cmn.CELL_VDOMINO
TESTGRID[3, 1] = cmn.CELL_VDOMINO
TESTGRID[3, 2] = cmn.CELL_VDOMINO
TESTGRID[3, 4] = cmn.CELL_VDOMINO
# Board row #5
TESTGRID[4, 0] = cmn.CELL_VDOMINO
TESTGRID[4, 2] = cmn.CELL_HDOMINO
TESTGRID[4, 3] = cmn.CELL_HDOMINO
TESTGRID[4, 4] = cmn.CELL_VDOMINO
display(findall(TESTGRID))
print("+" * 80)
# EOF
| python |
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib import messages
from test1.player_performance import player_stats
# Create your views here.
def home(Request):
# Go to Homepage
return render(Request, 'homepage.html')
def search(Request):
# If users enter correct player names, go to the stats page
# Otherwise, show the error page or return to the homepage
if Request.GET.get('playername'):
playername = Request.GET.get('playername')
result = player_stats(playername)
if result:
return render(Request, 'statistical_page1.html', result)
else:
return render(Request, 'error_page.html')
else:
return render(Request, 'homepage.html')
| python |
#!/usr/bin/env python
import argparse, grp, pwd, os, sys, tarfile
def main(argv):
parser = argparse.ArgumentParser(description='Extract a tar archive using simple I/O.', add_help = False)
parser.add_argument('-?', '-h', '--help', help='Display this message and exit', action='store_true', dest='help')
parser.add_argument('-v', '--verbose', help='Be verbose', action='store_true', dest='verbose')
parser.add_argument('-U', '--unlink-first', help='Remove each file prior to extracting over it', action='store_true', dest='overwrite')
parser.add_argument('-C', '--directory', metavar='destdir', help='Extract files to this base directory', dest='directory')
parser.add_argument('--strip-components', metavar='NUMBER', type=int, help='Strip NUMBER leading components from file names on extraction', dest='strip')
parser.add_argument('tarfile', metavar='tar-file', help='File to extract, if not stdin', nargs='?', action='store')
args = parser.parse_args()
if args.help:
parser.print_help()
sys.exit(0)
directory = os.path.abspath(args.directory or '.')
verbose = args.verbose
overwrite = args.overwrite
tar_file = args.tarfile or '/dev/stdin'
strip = args.strip or 0
print 'Extracting tar archive %s to directory %s' % (tar_file, directory)
tar = tarfile.open(tar_file, 'r|*')
for entry in tar:
name = split_path(entry.name)[strip:]
if len(name) == 0:
continue
else:
name = os.path.join(directory, *name)
if entry.isdir():
if not os.path.exists(name):
if verbose:
print '[Creating directory] %s' % name
os.mkdir(name)
chown(name, entry)
elif not os.path.isdir(name):
raise RuntimeError('%s already exists and is not a directory!' % name)
else:
if verbose:
print '[Directory exists] %s' % name
elif entry.isfile():
src = tar.extractfile(entry)
if os.path.exists(name):
if overwrite:
os.unlink(name)
else:
print '[File exists] %s' % name
continue
if verbose:
print '[Creating file] %s' % name
with open(name, 'wb') as dst:
chown(name, entry)
while True:
buffer = src.read(65536)
if not buffer:
break
dst.write(buffer)
else:
print 'Ignoring unknown object %s' % entry.name
def chown(name, entry):
uid = entry.uid
gid = entry.gid
try:
uid = pwd.getpwnam(entry.uname).pw_uid
gid = pwd.getgrnam(entry.gname).gr_gid
except:
None
try:
os.chown(name, uid, gid)
except OSError as err:
print '[chown() failed] %s' % name
def split_path(p):
a, b = os.path.split(p)
return (split_path(a) if len(a) else []) + [b]
if __name__ == "__main__":
main(sys.argv[1:])
| python |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/iris_dtree.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="pJAXuwceKMxg"
# # Decision tree classifier on Iris data
#
# Based on
# https://github.com/ageron/handson-ml2/blob/master/06_decision_trees.ipynb
# + id="agyukRFGIDqW"
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# Common imports
import numpy as np
import os
import pandas as pd
from matplotlib.colors import ListedColormap
from sklearn.datasets import load_iris
import seaborn as sns
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
# + id="uZRZ4wPuV-E5"
# Font sizes
SIZE_SMALL = 18 #14
SIZE_MEDIUM = 20 # 18
SIZE_LARGE = 24
# https://stackoverflow.com/a/39566040
plt.rc('font', size=SIZE_SMALL) # controls default text sizes
plt.rc('axes', titlesize=SIZE_SMALL) # fontsize of the axes title
plt.rc('axes', labelsize=SIZE_SMALL) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SIZE_SMALL) # fontsize of the tick labels
plt.rc('ytick', labelsize=SIZE_SMALL) # fontsize of the tick labels
plt.rc('legend', fontsize=SIZE_SMALL) # legend fontsize
plt.rc('figure', titlesize=SIZE_LARGE) # fontsize of the figure title
# + [markdown] id="lRYWVyJaKLy8"
# # Data
# + colab={"base_uri": "https://localhost:8080/", "height": 734} id="fd2kv3DxIOeJ" outputId="cd5e5059-d9ce-4b42-9a31-75bcc8f07608"
iris = load_iris()
X = iris.data
y = iris.target
print(iris.feature_names)
# Convert to pandas dataframe
df = pd.DataFrame(data=X, columns=iris.feature_names)
df['label'] = pd.Series(iris.target_names[y], dtype='category')
# we pick a color map to match that used by decision tree graphviz
#cmap = ListedColormap(['#fafab0','#a0faa0', '#9898ff']) # orange, green, blue/purple
#cmap = ListedColormap(['orange', 'green', 'purple'])
palette = {'setosa': 'orange', 'versicolor': 'green', 'virginica': 'purple'}
g = sns.pairplot(df, vars = df.columns[0:4], hue="label", palette=palette)
#g = sns.pairplot(df, vars = df.columns[0:4], hue="label")
plt.savefig("iris_scatterplot_v2.pdf")
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="pfBk8QDIIRBs" outputId="8ab79085-4a1f-441a-9f26-e8527dba1c1b"
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import load_iris
iris = load_iris()
print(iris.target_names)
print(iris.feature_names)
#ndx = [0, 2] # sepal length, petal length
ndx = [2, 3] # petal lenght and width
X = iris.data[:, ndx]
y = iris.target
xnames = [iris.feature_names[i] for i in ndx]
ynames = iris.target_names
# + id="26Opc8mnI5g8"
def plot_surface(clf, X, y, xnames, ynames):
n_classes = 3
plot_step = 0.02
markers = [ 'o', 's', '^']
plt.figure(figsize=(10,10))
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.xlabel(xnames[0])
plt.ylabel(xnames[1])
# we pick a color map to match that used by decision tree graphviz
cmap = ListedColormap(['orange', 'green', 'purple'])
#cmap = ListedColormap(['blue', 'orange', 'green'])
#cmap = ListedColormap(sns.color_palette())
plot_colors = [cmap(i) for i in range(4)]
cs = plt.contourf(xx, yy, Z, cmap=cmap, alpha=0.5)
# Plot the training points
for i, color, marker in zip(range(n_classes), plot_colors, markers):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], label=ynames[i],
edgecolor='black', color = color, s=50, cmap=cmap,
marker = marker)
plt.legend()
# + [markdown] id="f9dQZFpEKRnF"
# # Depth 2
# + colab={"base_uri": "https://localhost:8080/"} id="MV4wn6aQKIVb" outputId="381d118f-c9f0-4f97-c324-b73554bcde31"
tree_clf = DecisionTreeClassifier(max_depth=2, random_state=42)
tree_clf.fit(X, y)
# + colab={"base_uri": "https://localhost:8080/", "height": 380} id="YpIKMcF1IV6o" outputId="1575923e-3b33-4a1c-ec3d-71f8c114792c"
from graphviz import Source
from sklearn.tree import export_graphviz
export_graphviz(
tree_clf,
out_file= "iris_tree.dot",
feature_names=xnames,
class_names=ynames,
rounded=True,
impurity = False,
filled=True
)
Source.from_file("iris_tree.dot")
# + id="N80oHMuhZecS" outputId="995424ee-85f7-4383-e12c-db7d5eb1a42f" colab={"base_uri": "https://localhost:8080/", "height": 34}
plt.savefig("dtree_iris_depth2_tree_v2.pdf")
# + colab={"base_uri": "https://localhost:8080/", "height": 622} id="o4iYj9MyJDes" outputId="d8d9949d-c62e-442a-cb11-d3a6808fc370"
plot_surface(tree_clf, X, y, xnames, ynames)
plt.savefig("dtree_iris_depth2_surface_v2.pdf")
# + [markdown] id="szbqxtLy1V0w"
# # Depth 3
# + colab={"base_uri": "https://localhost:8080/"} id="af6Lep1T1X8s" outputId="c911874a-98eb-4645-a1c0-d638d30f3dd0"
tree_clf = DecisionTreeClassifier(max_depth=3, random_state=42)
tree_clf.fit(X, y)
# + colab={"base_uri": "https://localhost:8080/", "height": 520} id="F7jaEWV11azu" outputId="054bc3d9-14c9-4469-ed29-b0eddf9e00f1"
export_graphviz(
tree_clf,
out_file= "iris_tree.dot",
feature_names=xnames,
class_names=ynames,
rounded=True,
impurity = False,
filled=True
)
Source.from_file("iris_tree.dot")
# + colab={"base_uri": "https://localhost:8080/", "height": 608} id="eJHigAzb1dD9" outputId="4d92d070-e67e-46f7-92b2-bd3e21f0f663"
plot_surface(tree_clf, X, y, xnames, ynames)
# + [markdown] id="wLturuH-Kcql"
# # Depth unrestricted
# + colab={"base_uri": "https://localhost:8080/"} id="p5bJENQTJDu4" outputId="05e2c26b-eae2-40fd-cbb8-39512b0b516b"
tree_clf = DecisionTreeClassifier(max_depth=None, random_state=42)
tree_clf.fit(X, y)
# + colab={"base_uri": "https://localhost:8080/", "height": 796} id="qgnp_RHYJIyq" outputId="38ffa159-0e83-4dd4-ea5b-a4439803be71"
from graphviz import Source
from sklearn.tree import export_graphviz
export_graphviz(
tree_clf,
out_file= "iris_tree.dot",
feature_names=xnames,
class_names=ynames,
rounded=True,
filled=False,
impurity=False
)
Source.from_file("iris_tree.dot")
# + colab={"base_uri": "https://localhost:8080/", "height": 608} id="5mlmxuKxJM7u" outputId="048915a4-f92a-4399-e3d8-8a346751383f"
plot_surface(tree_clf, X, y, xnames, ynames)
# + id="z2ibCZ6kJTaW"
| python |
from itertools import chain, repeat
from six.moves import cStringIO as StringIO
from . import builtin
from .file_types import source_file
from .. import safe_str
from .. import shell
from ..backends.make import writer as make
from ..backends.ninja import writer as ninja
from ..build_inputs import Edge
from ..file_types import File, Node, Phony
from ..iterutils import isiterable, iterate, listify
from ..path import Path, Root
from ..shell import posix as pshell
from ..tools import common as tools
class BaseCommand(Edge):
def __init__(self, build, env, name, outputs, cmd=None, cmds=None,
environment=None, extra_deps=None):
if (cmd is None) == (cmds is None):
raise ValueError('exactly one of "cmd" or "cmds" must be ' +
'specified')
elif cmds is None:
cmds = [cmd]
inputs = [i for line in cmds for i in iterate(line)
if isinstance(i, Node) and i.creator]
cmds = [env.run_arguments(line) for line in cmds]
self.name = name
self.cmds = cmds
self.inputs = inputs
self.env = environment or {}
Edge.__init__(self, build, outputs, extra_deps=extra_deps)
class Command(BaseCommand):
def __init__(self, build, env, name, **kwargs):
BaseCommand.__init__(self, build, env, name, Phony(name), **kwargs)
@builtin.function('build_inputs', 'env')
def command(build, env, name, **kwargs):
return Command(build, env, name, **kwargs).public_output
class BuildStep(BaseCommand):
msbuild_output = True
def __init__(self, build, env, name, **kwargs):
name = listify(name)
project_name = name[0]
type = kwargs.pop('type', source_file)
if not isiterable(type):
type = repeat(type, len(name))
type_args = kwargs.pop('args', None)
if type_args is None:
type_args = repeat([], len(name))
type_kwargs = kwargs.pop('kwargs', None)
if type_kwargs is None:
type_kwargs = repeat({}, len(name))
outputs = [self._make_outputs(*i) for i in
zip(name, type, type_args, type_kwargs)]
BaseCommand.__init__(self, build, env, project_name, outputs, **kwargs)
@staticmethod
def _make_outputs(name, type, args, kwargs):
f = getattr(type, 'type', type)
result = f(Path(name, Root.builddir), *args, **kwargs)
if not isinstance(result, File):
raise ValueError('expected a function returning a file')
return result
@builtin.function('build_inputs', 'env')
def build_step(build, env, name, **kwargs):
return BuildStep(build, env, name, **kwargs).public_output
@make.rule_handler(Command, BuildStep)
def make_command(rule, build_inputs, buildfile, env):
# Join all the commands onto one line so that users can use 'cd' and such.
buildfile.rule(
target=rule.output,
deps=rule.inputs + rule.extra_deps,
recipe=[pshell.global_env(rule.env, rule.cmds)],
phony=isinstance(rule, Command)
)
@ninja.rule_handler(Command, BuildStep)
def ninja_command(rule, build_inputs, buildfile, env):
ninja.command_build(
buildfile, env,
output=rule.output,
inputs=rule.inputs + rule.extra_deps,
command=shell.global_env(rule.env, rule.cmds),
console=isinstance(rule, Command)
)
try:
from ..backends.msbuild import writer as msbuild
@msbuild.rule_handler(Command, BuildStep)
def msbuild_command(rule, build_inputs, solution, env):
project = msbuild.ExecProject(
env, name=rule.name,
commands=[shell.global_env(rule.env, rule.cmds)],
dependencies=solution.dependencies(rule.extra_deps),
)
solution[rule.output[0]] = project
except ImportError:
pass
| python |
import os
import yaml
filepath = os.path.join(os.path.curdir, "config", "settings.yml")
def __get_setting():
with open(filepath, encoding="utf-8")as f:
return yaml.load(f)
def app_id():
return __get_setting()["YOLP"]["appid"]
def coordinates():
return __get_setting()["coordinates"]
def slackurl():
return __get_setting()["slack"]["url"]
| python |
import datetime
import os
import sys
import quickfix as fix
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import app.pricefeed
import app.pxm44 as pxm44
DATA_DICTIONARY = fix.DataDictionary()
DATA_DICTIONARY.readFromURL('spec/pxm44.xml')
# 20 level book
MSG = fix.Message('8=FIX.4.4|9=1299|35=i|34=1113826|49=XCT|52=20171106-14:57:08.528|56=Q001|296=1|302=1|295=20|299=0|106=1|134=100000|135=100000|188=1.80699|190=1.80709|299=1|106=1|134=250000|135=250000|188=1.80698|190=1.80710|299=2|106=1|134=500000|135=500000|188=1.80697|190=1.80711|299=3|106=1|134=750000|135=750000|188=1.80695|190=1.80712|299=4|106=1|134=1000000|135=1000000|188=1.80694|190=1.80713|299=5|106=1|134=2000000|135=2000000|188=1.80693|190=1.80714|299=6|106=1|134=3000000|135=3000000|188=1.80692|190=1.80715|299=7|106=1|134=5000000|135=5000000|188=1.80691|190=1.80716|299=8|106=1|134=7500000|135=7500000|188=1.80690|190=1.80717|299=9|106=1|134=10000000|135=10000000|188=1.80689|190=1.80718|299=10|106=1|134=15000000|135=15000000|188=1.80688|190=1.80719|299=11|106=1|134=20000000|135=20000000|188=1.80687|190=1.80720|299=12|106=1|134=30000000|135=30000000|188=1.80686|190=1.80721|299=13|106=1|134=40000000|135=40000000|188=1.80685|190=1.80722|299=14|106=1|134=50000000|135=50000000|188=1.80684|190=1.80723|299=15|106=1|134=60000000|135=60000000|188=1.80683|190=1.80724|299=16|106=1|134=70000000|135=70000000|188=1.80682|190=1.80725|299=17|106=1|134=80000000|135=80000000|188=1.80681|190=1.80726|299=18|106=1|134=90000000|135=90000000|188=1.80680|190=1.80727|299=19|106=1|134=10000000|135=10000000|188=1.80679|190=1.80728|10=209|'.replace('|', '\x01'), DATA_DICTIONARY)
def bench_process_quote_set(iterations):
quote_set = pxm44.MassQuote.NoQuoteSets()
quote_entry = pxm44.MassQuote.NoQuoteSets.NoQuoteEntries()
MSG.getGroup(1, quote_set)
start_time = datetime.datetime.now()
for _ in range(iterations):
app.pricefeed.process_quote_set(quote_set, quote_entry)
end_time = datetime.datetime.now()
duration = (end_time - start_time).total_seconds()
return ('process_quote_set', iterations, duration)
def print_results(func, iterations, duration):
print(','.join([
func,
str(iterations),
str(duration),
'%f' % (duration / iterations)
]))
def main():
print('function,iterations,total,iteration')
res = bench_process_quote_set(100000)
print_results(*res)
if __name__ == '__main__':
main()
# function,iterations,total,iteration
# process_quote_set,100000,22.834905,0.000228
| python |
import pandas as pd
from ml2_mini_project.dataPrep.apply_moving_average import apply_moving_average
from ml2_mini_project.dataPrep.apply_pct_change import apply_pct_change
from ml2_mini_project.dataPrep.collapse_dataframe_into_new import \
collapse_dataframe_into_new
from ml2_mini_project.dataPrep.normalize_by_columns import normalize_by_columns
# Choose the column that data should be grouped by (such as countries, regions
# etc. Assumes, that a column contains multiple groups.
# str
groupby = 'country_region'
# Choose the column that should be checked against a condidion to collapse the
# data
# str
collapse_on = 'Deaths'
# Choose the threshhold that each group should start on (e.g. start at 50
# cases)
# int
threshhold = 20
# Define the columns that should be normalized (after collapse)
# list
columns_to_normalize = ["Cases", "Deaths"]
# Apply moving average window (will be applied to columns_to_normalize)
# int
window = 7
# Choose the input file
df = pd.read_csv('./2020-05-16_GoogleMobilityDataGlobal_joined.csv')
# First collapse the data Collapse step
df = collapse_dataframe_into_new(df, groupby, collapse_on, threshhold)
df.to_csv('./collapse_on_' + collapse_on + '.csv')
# Normalization step
# Only necessary if change in %_cases is not used
# df = normalize_by_columns(df, groupby, columns_to_normalize)
# df.to_csv('./normalized_df.csv')
# Try Moving Average over period X
df = apply_moving_average(df, groupby, columns_to_normalize, window)
df.to_csv('./moving_average.csv')
# Calculate %_change in target variable
df = apply_pct_change(df, groupby, columns_to_normalize)
df.to_csv('./pct_change.csv')
| python |
# SPDX-License-Identifier: MIT
# Copyright (c) 2022 MBition GmbH
from typing import Any, Dict, List, Optional, Union, Type
from ..odxtypes import DataType
from ..utils import read_description_from_odx
from ..globals import logger
from .compumethodbase import CompuMethod
from .compuscale import CompuScale
from .identicalcompumethod import IdenticalCompuMethod
from .limit import IntervalType, Limit
from .linearcompumethod import LinearCompuMethod
from .scalelinearcompumethod import ScaleLinearCompuMethod
from .tabintpcompumethod import TabIntpCompuMethod
from .texttablecompumethod import TexttableCompuMethod
def _parse_compu_scale_to_linear_compu_method(scale_element,
internal_type: DataType,
physical_type: DataType,
is_scale_linear=False,
additional_kwargs={}):
assert physical_type in [DataType.A_FLOAT32,
DataType.A_FLOAT64,
DataType.A_INT32,
DataType.A_UINT32]
assert internal_type in [DataType.A_FLOAT32,
DataType.A_FLOAT64,
DataType.A_INT32,
DataType.A_UINT32]
computation_python_type: Union[Type[float], Type[int]]
if internal_type.as_python_type() == float or physical_type.as_python_type() == float:
computation_python_type = float
else:
computation_python_type = int
kwargs = additional_kwargs.copy()
kwargs["internal_type"] = internal_type
kwargs["physical_type"] = physical_type
coeffs = scale_element.find("COMPU-RATIONAL-COEFFS")
nums = coeffs.iterfind("COMPU-NUMERATOR/V")
offset = computation_python_type(next(nums).text)
factor = computation_python_type(next(nums).text)
if coeffs.find("COMPU-DENOMINATOR/V") is not None:
kwargs["denominator"] = float(
coeffs.find("COMPU-DENOMINATOR/V").text)
assert kwargs["denominator"] > 0
# Read lower limit
internal_lower_limit = read_limit_from_odx(
scale_element.find("LOWER-LIMIT"),
internal_type=internal_type
)
if internal_lower_limit is None:
internal_lower_limit = Limit(float("-inf"), IntervalType.INFINITE)
kwargs["internal_lower_limit"] = internal_lower_limit
# Read upper limit
internal_upper_limit = read_limit_from_odx(
scale_element.find("UPPER-LIMIT"),
internal_type=internal_type
)
if internal_upper_limit is None:
if not is_scale_linear:
internal_upper_limit = Limit(float("inf"), IntervalType.INFINITE)
else:
assert (internal_lower_limit is not None
and internal_lower_limit.interval_type == IntervalType.CLOSED)
logger.info("Scale linear without UPPER-LIMIT")
internal_upper_limit = internal_lower_limit
kwargs["internal_upper_limit"] = internal_upper_limit
return LinearCompuMethod(offset=offset, factor=factor, **kwargs)
def read_limit_from_odx(et_element, internal_type: DataType):
limit: Optional[Limit] = None
if et_element is not None:
if et_element.get("INTERVAL-TYPE"):
interval_type = IntervalType(et_element.get("INTERVAL-TYPE"))
else:
interval_type = IntervalType.CLOSED
if interval_type == IntervalType.INFINITE:
if et_element.tag == "LOWER-LIMIT":
limit = Limit(float("-inf"), interval_type)
else:
assert et_element.tag == "UPPER-LIMIT"
limit = Limit(float("inf"), interval_type)
else:
if internal_type == DataType.A_BYTEFIELD:
limit = Limit(int("0x" + et_element.text, 16), interval_type)
elif internal_type.as_python_type() == float:
limit = Limit(float(et_element.text), interval_type)
else:
limit = Limit(int(et_element.text, 10), interval_type)
return limit
def read_compu_method_from_odx(et_element, internal_type: DataType, physical_type: DataType) -> CompuMethod:
compu_category = et_element.find("CATEGORY").text
assert compu_category in ["IDENTICAL", "LINEAR", "SCALE-LINEAR",
"TEXTTABLE", "COMPUCODE", "TAB-INTP",
"RAT-FUNC", "SCALE-RAT-FUNC"]
if et_element.find("COMPU-PHYS-TO-INTERNAL") is not None: # TODO: Is this never used?
raise NotImplementedError(
f"Found COMPU-PHYS-TO-INTERNAL for category {compu_category}")
kwargs: Dict[str, Any] = {"internal_type": internal_type}
if compu_category == "IDENTICAL":
assert (internal_type == physical_type or (
internal_type in [DataType.A_ASCIISTRING, DataType.A_UTF8STRING] and physical_type == DataType.A_UNICODE2STRING)
), (f"Internal type '{internal_type}' and physical type '{physical_type}'"
f" must be the same for compu methods of category '{compu_category}'")
return IdenticalCompuMethod(internal_type=internal_type, physical_type=physical_type)
if compu_category == "TEXTTABLE":
assert physical_type == DataType.A_UNICODE2STRING
compu_internal_to_phys = et_element.find("COMPU-INTERNAL-TO-PHYS")
internal_to_phys: List[CompuScale] = []
for scale in compu_internal_to_phys.iterfind("COMPU-SCALES/COMPU-SCALE"):
lower_limit = read_limit_from_odx(scale.find("LOWER-LIMIT"),
internal_type=internal_type)
upper_limit = read_limit_from_odx(scale.find("UPPER-LIMIT"),
internal_type=internal_type)
if scale.find("COMPU-INVERSE-VALUE/VT") is not None:
compu_inverse_value = scale.find(
"COMPU-INVERSE-VALUE/VT"
).text
elif scale.find("COMPU-INVERSE-VALUE/V") is not None:
compu_inverse_value = float(
scale.find("COMPU-INVERSE-VALUE/V").text
)
else:
compu_inverse_value = None
internal_to_phys.append(CompuScale(
short_label=(scale.find("SHORT-LABEL").text
if scale.find("SHORT-LABEL") is not None else None),
description=read_description_from_odx(scale.find("DESC")),
lower_limit=lower_limit,
upper_limit=upper_limit,
compu_inverse_value=compu_inverse_value,
compu_const=scale.find("COMPU-CONST").find("VT").text
))
kwargs["internal_to_phys"] = internal_to_phys
for scale in internal_to_phys:
assert isinstance(scale.lower_limit.value, int) or isinstance(scale.upper_limit.value, int),\
"Text table compu method doesn't have expected format!"
return TexttableCompuMethod(**kwargs)
elif compu_category == "LINEAR":
# Compu method can be described by the function f(x) = (offset + factor * x) / denominator
scale = et_element.find(
"COMPU-INTERNAL-TO-PHYS/COMPU-SCALES/COMPU-SCALE")
return _parse_compu_scale_to_linear_compu_method(scale, internal_type, physical_type, additional_kwargs=kwargs)
elif compu_category == "SCALE-LINEAR":
scales = et_element.iterfind(
"COMPU-INTERNAL-TO-PHYS/COMPU-SCALES/COMPU-SCALE")
linear_methods = [_parse_compu_scale_to_linear_compu_method(
scale, internal_type, physical_type, additional_kwargs=kwargs) for scale in scales]
return ScaleLinearCompuMethod(linear_methods)
elif compu_category == "TAB-INTP":
return TabIntpCompuMethod(internal_type=internal_type, physical_type=physical_type)
# TODO: Implement other categories (never instantiate CompuMethod)
logger.warning(
f"Warning: Computation category {compu_category} is not implemented!")
return CompuMethod(DataType.A_UINT32, DataType.A_UINT32, f"NOT-IMPLEMENTED:{compu_category}")
| python |
import unittest
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'MCwebDjango.settings')
import django
django.setup()
from django.utils import timezone
from mcwebapp.models import *
from django.contrib.auth.models import User
def populate():
curr_time = timezone.now()
# Create superuser.
# Note: there does not seem to be a "get_or_create" for the superuser, hence the try structure.
try:
user = User.objects.get(username='superuser')
print('Used existing superuser. Are you sure you migrated?')
except:
print('Creating superuser...')
user = User.objects.create_superuser('superuser', 'super@super.com', 'superpass')
user.save()
# Create template.
t = TemplateFile.objects.get_or_create(name='SampleTemplate')[0]
t.upload_date = curr_time
t.file_name = 'templateFiles/SampleTemplate.json'
t.user = user
t.save()
# Create PDFFile.
p = PDFFile.objects.get_or_create(name='SamplePDF')[0]
p.upload_date = curr_time
p.file_name = 'pdfFiles/SamplePDF.pdf'
p.template = t
p.save()
# Create JSONFile.
j = JSONFile.objects.get_or_create(name='jsonFile')[0]
j.upload_date = curr_time
j.file_name = 'jsonFiles/SamplePDF.json'
j.pdf = p
j.save()
# Create MatchPattern.
m = MatchPattern.objects.get_or_create(regex='Sample')[0]
m.template = t
m.save()
if __name__ == '__main__':
print('Populating the database...')
populate()
print('Database population successful.')
| python |
import anki_vector
import time
def main():
args = anki_vector.util.parse_command_args()
with anki_vector.Robot() as robot:
for _ in range(10):
if robot.proximity.last_sensor_reading:
distance = robot.proximity.last_sensor_reading.distance
print("=====================================================================")
print(distance.distance_inches)
print("=====================================================================")
time.sleep(1.0)
else:
print("Can't be bothered to work right now")
robot.say_text("Hold your horses")
time.sleep(3.0)
if __name__ == "__main__":
main() | python |
import datetime
from enum import Enum
class Escape(Enum):
BEGIN = '\033\033[92m'
END = '\033[0m'
_dayName = {1:'Mo',2:'Tu',3:'We',4:'Th',5:'Fr',6:'Sa',7:'Su'}
def _title(year,month):
date = datetime.date(year,month,1)
return '{0:^21}'.format(date.strftime('%B'))
def _dayHead(nday=37):
out = ''
for i in range(nday):
out = out+' '+_dayName[i%7+1]
return out
def _month2str(year,month,dates=set()):
date = datetime.date(year,month,1)
inc = datetime.timedelta(days=1)
offset = (date.isoweekday()-1)*3
out = offset*' '
addedChars = offset
while date.month == month:
if date in dates:
out = out + Escape.BEGIN.value+'{0: >3}'.format(date.day)+Escape.END.value
else:
out = out + '{0: >3}'.format(date.day)
addedChars = addedChars + 3
if addedChars == 21:
out = out + '\n'
addedChars=0
date = date + inc
out = out + (21-addedChars)*' '
return out
def _chopMonthString(s):
out = s.split('\n')
while len(out) < 6:
out = out + [21*' ']
return out
def composeMonth(year,month,dates=set()):
"""Format the dates in a month as a small block of text with a line
for each week. Returns a list where each item is one of the lines.
"""
output = [_title(year,month),_dayHead(7)]
output.extend(_chopMonthString(_month2str(year,month,dates)))
return output
def printYear(year,dates=set()):
"""Print the calendar for a year with four months on each row."""
months = [composeMonth(year,month,dates) for month in range(1,13)]
for group in range(3):
index = 4*group
for line in range(8):
print(months[index][line],end=' ')
print(months[index+1][line],end=' ')
print(months[index+2][line],end=' ')
print(months[index+3][line],end='\n')
print('\n')
| python |
import torch
import torchvision
from torch.utils.data import DataLoader, Subset
import pytorch_lightning as pl
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
import os, sys
from glob import glob
import cv2
from PIL import Image
sys.path.append('../')
from celeba.dataset import CelebaDataset
import albumentations as Augment
from albumentations.pytorch.transforms import ToTensor
def basic_transforms(img_height, img_width, image_pad=0):
return Augment.Compose([#Augment.ToGray(p=1.0),
Augment.Resize(img_height+image_pad, img_width+image_pad, interpolation=cv2.INTER_NEAREST, always_apply=True),
Augment.RandomCrop(img_height, img_width, always_apply=True),
Augment.HorizontalFlip(p=0.5),
Augment.RandomBrightnessContrast(p=1.0),
])#ToTensor()
def extra_transforms():
return Augment.Compose([Augment.GaussNoise(p=0.75),
Augment.CoarseDropout(p=0.5),])
class CelebaDataModule(pl.LightningDataModule):
def __init__(self, data_dir, batch_size, image_size):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.image_size = image_size
'''
self.transform = transforms.Compose(
[
#transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
#transforms.RandomCrop(image_size),
#transforms.Grayscale(),
transforms.RandomHorizontalFlip(),
#transforms.RandomVerticalFlip(),
transforms.ToTensor(),
]
)
'''
#def prepare_data():
#download, unzip here. anything that should not be done distributed
def setup(self, stage=None):
if stage == 'fit' or stage is None:
self.data_train = CelebaDataset(os.path.join(self.data_dir,'train'),
transform=basic_transforms(img_height=self.image_size,
img_width=self.image_size,
image_pad=0),
)#noise_transform=extra_transforms())
self.data_val = CelebaDataset(os.path.join(self.data_dir,'val'),
transform=basic_transforms(self.image_size,self.image_size))
#self.data_train = CelebaDataset(os.path.join(self.data_dir,'train'), transform=self.transform)
#self.data_val = CelebaDataset(os.path.join(self.data_dir,'val'), transform=self.transform)
def train_dataloader(self):
return DataLoader(self.data_train, batch_size=self.batch_size, shuffle=True)
def val_dataloader(self):
return DataLoader(self.data_val, batch_size=self.batch_size, shuffle=False)
if __name__ == '__main__':
dm = CelebaDataModule(data_dir='/home/markpp/datasets/celeba/',
batch_size=16,
image_size=64)
dm.setup()
# cleanup output dir
import os, shutil
output_root = "output/"
if os.path.exists(output_root):
shutil.rmtree(output_root)
os.makedirs(output_root)
sample_idx = 0
for batch_id, batch in enumerate(dm.val_dataloader()):
imgs = batch
for img in imgs:
print(img.shape)
img = img.mul(255).permute(1, 2, 0).byte().numpy()
output_dir = os.path.join(output_root,str(batch_id).zfill(6))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
filename = "id-{}.png".format(str(sample_idx).zfill(6))
cv2.imwrite(os.path.join(output_dir,filename),img)
sample_idx = sample_idx + 1
if batch_id > 1:
break
| python |
from enum import Enum
from typing import Optional, List
from happy_config.typechecking.types import Type, StructuralType, PrimitiveType
from happy_config.typechecking.typecheck_error import TypeCheckError, TypeMismatch, InvalidField, InvalidEnumValue
def check_type(x, tp: Type) -> Optional[TypeCheckError]:
def construct_dict(path: List[str], v) -> dict:
if len(path) == 1:
return {path[0]: v}
return construct_dict(path[:-1], {path[-1]: v})
def recur(x, tp: Type, path: List[str]) -> Optional[TypeCheckError]:
def check_struct(tp: StructuralType) -> Optional[TypeCheckError]:
if not isinstance(x, dict):
return TypeMismatch(path=path, expect=tp, actual=type(x))
# x is a instance of dict
dict_x: dict = x
for k, v in dict_x.items():
if len(k.split(':')) > 1:
# handle path-like key
ks = k.split(':')
d = construct_dict(ks, v)
err = recur(d, tp, path=path)
else:
# normal key
if k not in tp.fields.keys():
return InvalidField(path=path, field_name=k, struct=tp)
err = recur(v, tp.fields[k], path=path + [k])
if err is not None:
return err
return None
def check_primitive(tp: PrimitiveType) -> Optional[TypeCheckError]:
if isinstance(x, tp.tp):
return None
elif issubclass(tp.tp, Enum):
try:
x1 = tp.tp(x)
return None
except ValueError as e:
return InvalidEnumValue(path=path, msg=f'{e}')
else:
return TypeMismatch(path=path, expect=tp, actual=type(x))
return tp.pmatch(check_struct, check_primitive)
return recur(x, tp, path=[])
| python |
"""
Example:
Solving nonsmooth problem
#K|x1| + |x2| -> min
#x0 = [10^4, 10]
x_opt = all-zeros
f_opt = 0
"""
from numpy import *
from openopt import NSP
K = 10**3
f = lambda x: abs(x[0]) + abs(x[1])*K + abs(x[2]) * K**2
x0 = [1000, 0.011, 0.01]
#OPTIONAL: user-supplied gradient/subgradient
df = lambda x: [sign(x[0]), sign(x[1])*K, sign(x[2]) * K**2]
#p.df = lambda x: 2*x
#p.plot = 0
#p.xlim = (inf, 5)
#p.ylim = (0, 5000000)
#p.checkdf()
solvers = ['r2', 'ipopt', 'algencan','ralg']
solvers = ['r2', 'algencan','ralg']
#solvers = ['ralg', 'r2']
solvers = ['r2', 'lincher']
solvers = ['ralg']
solvers = ['r2']
#solvers = ['scipy_slsqp']
#solvers = ['algencan']
#solvers = ['ipopt']
colors = ['r', 'b', 'k', 'g']
maxIter = 1000
for i, solver in enumerate(solvers):
p = NSP(f, x0, df=df, xtol = 1e-11, ftol=1e-10, maxIter = maxIter, maxTime=150)
#p.checkdf()
r = p.solve(solver, maxVectorNum=4, iprint=1, showLS=0, plot=0, color=colors[i], show=solver==solvers[-1]) # ralg is name of a solver
#for i, solver in enumerate(solvers):
# p2 = NSP(f, r.xf, df=df, xtol = 1e-6, maxIter = 1200, maxTime=150, ftol=1e-6)
# #p.checkdf()
# r2 = p2.solve(solver, maxVectorNum=15, iprint=1, showLS=1, plot=0, color=colors[i], show=solver==solvers[-1]) # ralg is name of a solver
#print 'x_opt:\n', r.xf
print 'f_opt:', r.ff # should print small positive number like 0.00056
| python |
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 522367919
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 4, 3, 2)
assert board is not None
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_move(board, 1, 1, 3) == 1
assert gamma_free_fields(board, 1) == 18
assert gamma_move(board, 2, 3, 3) == 1
board861069519 = gamma_board(board)
assert board861069519 is not None
assert board861069519 == (".112.\n"
".....\n"
".....\n"
".....\n")
del board861069519
board861069519 = None
assert gamma_move(board, 3, 3, 0) == 1
assert gamma_free_fields(board, 3) == 16
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 2, 4, 0) == 1
assert gamma_move(board, 3, 2, 1) == 1
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_busy_fields(board, 3) == 2
assert gamma_free_fields(board, 3) == 4
board621179980 = gamma_board(board)
assert board621179980 is not None
assert board621179980 == (".112.\n"
".1...\n"
"..3..\n"
"...32\n")
del board621179980
board621179980 = None
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_golden_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_free_fields(board, 1) == 5
assert gamma_golden_move(board, 1, 3, 3) == 1
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_golden_move(board, 3, 3, 1) == 0
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 4, 2) == 0
assert gamma_move(board, 3, 1, 3) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_free_fields(board, 1) == 5
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 3, 1, 3) == 0
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_golden_move(board, 1, 2, 2) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 1, 0, 3) == 1
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_busy_fields(board, 3) == 2
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 3, 4, 1) == 0
assert gamma_move(board, 1, 3, 4) == 0
assert gamma_busy_fields(board, 1) == 9
assert gamma_golden_possible(board, 2) == 1
assert gamma_golden_move(board, 2, 1, 2) == 1
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 1, 4, 0) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_busy_fields(board, 3) == 2
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 1, 3, 2) == 1
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_golden_move(board, 2, 0, 4) == 0
assert gamma_move(board, 3, 1, 3) == 0
assert gamma_move(board, 3, 3, 2) == 0
assert gamma_busy_fields(board, 3) == 2
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_free_fields(board, 1) == 5
gamma_delete(board)
| python |
import name_lib_main
my_name = "Fred"
my_length = name_lib_main.name_length(my_name)
my_lower_case = name_lib_main.lower_case_name(my_name)
print(f"In my code, my length is {my_length} and my lower case name is: {my_lower_case}") | python |
# -*- coding: utf-8 -*-
"""
@Time : 2018/1/25 14:04
@Author : Elvis
zsl_resnet.py
for m in self.fc1:
if hasattr(m, 'weight'):
orthogonal(m.weight)
"""
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.models import resnet18, resnet50, resnet101
# from torch.nn.init import kaiming_normal, orthogonal
# class ConvPoolNet(nn.Module):
# def __init__(self, cnn, w_attr, num_attr=312, num_classes=150):
# super(ConvPoolNet, self).__init__()
# self.cnn = nn.Sequential(*list(cnn.children())[:-2])
# self.feat_size = cnn.fc.in_features
#
# self.convPool = nn.Conv2d(self.feat_size, self.feat_size, kernel_size=7, dilation=0)
# self.fc0 = nn.Sequential(
# nn.Linear(self.feat_size, num_attr),
# )
# self.fc1 = nn.Sequential(
# nn.Linear(self.feat_size, num_attr),
# nn.Dropout(0.5),
# nn.Sigmoid(),
# # nn.Tanh(),
# # nn.Linear(self.feat_size, 32),
# # nn.Linear(32, num_attr),
# )
#
# self.fc2 = nn.Linear(num_attr, num_classes, bias=False)
# self.fc2.weight = nn.Parameter(w_attr, requires_grad=False)
#
# def forward(self, x):
# feat = self.cnn(x)
#
# feat = feat.view(feat.shape[0], -1)
# attr = self.fc0(feat)
# # xt = self.fc1(attr)
# wt = self.fc1(feat)
# xt = wt.mul(attr)
# attr_y = self.fc2(xt) # xt (batch, square sum root
# return attr_y, attr
class AttriCNN(nn.Module):
def __init__(self, cnn, w_attr, num_attr=312, num_classes=200):
super(AttriCNN, self).__init__()
self.cnn = nn.Sequential(*list(cnn.children())[:-1])
self.feat_size = cnn.fc.in_features
self.fc1 = nn.Sequential(
nn.Linear(self.feat_size, num_attr, bias=False),
# nn.Dropout(0.5),
# nn.Sigmoid(),
)
self.fc2 = nn.Linear(num_attr, num_classes, bias=False)
self.fc2.weight = nn.Parameter(w_attr, requires_grad=False)
def forward(self, x):
feat = self.cnn(x)
feat = feat.view(feat.shape[0], -1)
xt = self.fc1(feat)
attr_y = self.fc2(xt)
return attr_y, (feat, self.fc1[0].weight)
class AttriWeightedCNN(nn.Module):
def __init__(self, cnn, w_attr, num_attr=312, num_classes=150):
super(AttriWeightedCNN, self).__init__()
self.cnn = nn.Sequential(*list(cnn.children())[:-1])
self.feat_size = cnn.fc.in_features
self.fc0 = nn.Sequential(
nn.Linear(self.feat_size, num_attr),
# nn.Dropout(0.5),
# nn.Tanh(),
)
self.fc1 = nn.Sequential(
nn.Linear(self.feat_size, num_attr),
nn.Dropout(0.5),
nn.Sigmoid(),
# nn.Tanh(),
# nn.Linear(self.feat_size, 32),
# nn.Linear(32, num_attr),
)
self.fc2 = nn.Linear(num_attr, num_classes, bias=False)
self.fc2.weight = nn.Parameter(w_attr, requires_grad=False)
def forward(self, x):
feat = self.cnn(x)
feat = feat.view(feat.shape[0], -1)
attr = self.fc0(feat)
# xt = self.fc1(attr)
wt = self.fc1(feat)
xt = wt.mul(attr)
attr_y = self.fc2(xt) # xt (batch, square sum root
return attr_y, wt
# class BiCompatCNN(nn.Module):
# def __init__(self, cnn, w_attr, num_attr=312, num_classes=200):
# super(BiCompatCNN, self).__init__()
# self.cnn = nn.Sequential(*list(cnn.children())[:-1])
# self.feat_size = cnn.fc.in_features
#
# self.fc1 = nn.Sequential(
# nn.Linear(self.feat_size, num_attr, bias=False),
# # nn.Dropout(0.5),
# # nn.Sigmoid(),
# )
#
# self.fc2 = nn.Linear(num_attr, num_classes, bias=False)
# self.fc2.weight = nn.Parameter(w_attr, requires_grad=False)
#
# def forward(self, x):
# feat = self.cnn(x)
# feat = feat.view(feat.shape[0], -1)
# xt = self.fc1(feat)
# attr_y = self.fc2(xt)
# return attr_y, (feat, self.fc1[0].weight)
def attrWeightedCNN(num_attr=312, num_classes=150):
cnn = resnet50(pretrained=True)
w_attr = np.load("data/order_cub_attr.npy")
w_attr = w_attr[:num_classes, :] / 100.
w_attr = torch.FloatTensor(w_attr) # 312 * 150
return AttriWeightedCNN(cnn=cnn, w_attr=w_attr, num_attr=num_attr, num_classes=num_classes)
def attrWCNNg(num_attr=312, num_classes=200):
cnn = resnet50(pretrained=True)
w_attr = np.load("data/order_cub_attr.npy")
w_attr = w_attr / 100.
w_attr = torch.FloatTensor(w_attr) # 312 * 150
return AttriCNN(cnn=cnn, w_attr=w_attr, num_attr=num_attr, num_classes=num_classes)
def attrWCNNg_sun(num_attr=102, num_classes=717):
cnn = resnet50(pretrained=True)
w_attr = np.load("data/order_sun_attr.npy")
# w_attr = w_attr / 100.
w_attr = torch.FloatTensor(w_attr) # 312 * 150
return AttriCNN(cnn=cnn, w_attr=w_attr, num_attr=num_attr, num_classes=num_classes)
def attrCNN_cubfull(num_attr=312, num_classes=200):
cnn = resnet50(pretrained=True)
w_attr = np.load("data/cub_attr.npy")
w_attr = torch.FloatTensor(w_attr / 100.) # 312 * 200
return AttriWeightedCNN(cnn=cnn, w_attr=w_attr, num_attr=num_attr, num_classes=num_classes)
def attrCNN_awa2(num_attr=85, num_classes=50):
cnn = resnet18(pretrained=True)
w_attr = np.load("data/order_awa2_attr.npy")
# w_attr = w_attr[:num_classes, :]
w_attr = torch.FloatTensor(w_attr / 100.)
return AttriWeightedCNN(cnn=cnn, w_attr=w_attr, num_attr=num_attr, num_classes=num_classes)
def attrCNNg_awa2(num_attr=85, num_classes=50):
cnn = resnet18(pretrained=True)
w_attr = np.load("data/order_awa2_attr.npy")
# w_attr = w_attr[:num_classes, :]
w_attr = torch.FloatTensor(w_attr / 100.)
return AttriCNN(cnn=cnn, w_attr=w_attr, num_attr=num_attr, num_classes=num_classes)
def CNNw(num_classes=150):
cnn = resnet101(pretrained=True)
feat_size = cnn.fc.in_features
cnn.fc = nn.Linear(feat_size, num_classes, bias=False)
return cnn
class DeepRIS(nn.Module):
def __init__(self, cnn, w_attr, num_attr=312, num_classes=150):
super(DeepRIS, self).__init__()
self.cnn = cnn
feat_size = self.cnn.fc.in_features
self.cnn.fc = nn.Sequential(
nn.Linear(feat_size, num_attr),
nn.Sigmoid(),
nn.Dropout(0.4)
)
self.fc2 = nn.Linear(num_attr, num_classes, bias=False)
self.fc2.weight = nn.Parameter(w_attr, requires_grad=False)
# for m in self.cnn.fc:
# if hasattr(m, 'weight'):
# orthogonal(m.weight)
def forward(self, x):
attr = self.cnn(x)
attr_y = self.fc2(attr) # (batch, square sum root
return attr_y, attr
def soft_celoss(logit, prob):
""" Cross-entropy function"""
soft_logit = F.log_softmax(logit, dim=1)
loss = torch.sum(prob * soft_logit, 1)
return loss
def soft_loss(out, targets):
"""Compute the total loss"""
ws = np.load("data/cub_ws_14.npy")
ws = torch.FloatTensor(ws).cuda()
targets_data = targets.data
targets_data = targets_data.type(torch.cuda.LongTensor)
soft_target = ws[targets_data]
soft_target = Variable(soft_target, requires_grad=False).cuda()
soft_ce = - torch.mean(soft_celoss(out, soft_target))
ce = F.cross_entropy(out, targets)
alpha = 0.2
loss = alpha * ce + (1. - alpha) * soft_ce
return loss
def soft_loss_awa2(out, targets):
"""Compute the total loss"""
ws = np.load("data/awa2_ws_14.npy")
ws = torch.FloatTensor(ws).cuda()
targets_data = targets.data
targets_data = targets_data.type(torch.cuda.LongTensor)
soft_target = ws[targets_data]
soft_target = Variable(soft_target, requires_grad=False).cuda()
soft_ce = - torch.mean(soft_celoss(out, soft_target))
ce = F.cross_entropy(out, targets)
alpha = 0.
loss = alpha * ce + (1. - alpha) * soft_ce
return loss
def soft_loss_sun(out, targets):
"""Compute the total loss"""
ws = np.load("data/sun_ws_14.npy")
ws = torch.FloatTensor(ws).cuda()
targets_data = targets.data
targets_data = targets_data.type(torch.cuda.LongTensor)
soft_target = ws[targets_data]
soft_target = Variable(soft_target, requires_grad=False).cuda()
soft_ce = - torch.mean(soft_celoss(out, soft_target))
ce = F.cross_entropy(out, targets)
alpha = 0.5
loss = alpha * ce + (1. - alpha) * soft_ce
return loss
class RegLoss(nn.Module):
def __init__(self, lamda1=0.1, lamda2=0.1, superclass="cub"):
super(RegLoss, self).__init__()
self.lamda1 = lamda1
self.lamda2 = lamda2
wa = np.load("data/order_%s_attr.npy" % superclass)
if superclass != "sun":
wa = wa / 100.
if superclass == "cub":
num_seen = 150
elif superclass == "sun":
num_seen = 645
else:
num_seen = 40
self.wa_seen = Variable(torch.FloatTensor(wa[:num_seen, :]), requires_grad=False).cuda()
self.wa_unseen = Variable(torch.FloatTensor(wa[num_seen:, :]), requires_grad=False).cuda()
# self.wa = torch.FloatTensor(wa).cuda()
def forward(self, out, targets, w):
# targets_data = targets.data
# targets_data = targets_data.type(torch.cuda.LongTensor)
# sy = self.wa[targets_data]
# sy_var = Variable(sy, requires_grad=False).cuda()
ce = F.cross_entropy(out, targets)
xt, wt = w
ws_seen = torch.matmul(self.wa_seen, wt)
ws_unseen = torch.matmul(self.wa_unseen, wt)
loss = ce + self.lamda1 * torch.mean(torch.mean(ws_seen ** 2, 1)) - \
self.lamda2 * torch.mean(torch.mean(wt ** 2, 1))
# self.lamda2 * torch.mean(torch.mean(ws_unseen ** 2, 1)) + \
# self.lamda2 * torch.mean((torch.matmul(sy_var, wt) - xt) ** 2)
# torch.mean(torch.norm((torch.matmul(sy_var, wt) - xt), 2, 1))
# self.lamda2 * torch.mean(torch.norm(torch.matmul(sy_var, w), 2, 1))
# torch.mean(torch.matmul(sy_var, w) ** 2)
# self.lamda2 * torch.mean(torch.mean(ws ** 2, 1)) torch.mean(torch.norm(ws, 2, 1))
# + self.lamda1 * torch.mean(torch.norm(xt, 2, 1))
return loss
| python |
#!/usr/bin/env python
"""
Usage: explain_lm FST STR
Explain the cost assigned to a string STR by the fst FST.
"""
def main(fst_path, string):
fst = FST(fst_path)
s = string.replace('<noise>', '%')
subst = {'^': '<bol>', '$': '<eol>', ' ': '<spc>', '%': '<noise>'}
fst.explain([subst.get(c, c) for c in s])
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print __doc__
sys.exit(1)
from lvsr.ops import FST
main(*sys.argv)
| python |
from __future__ import division
import random
import os
import numpy as np
import pickle
import datetime
import json
class Decision(object):
def __init__(self, pair, result, reviewer, time):
self.pair = pair
self.result = result
self.reviewer = reviewer
self.time = time
def dict(self):
return {'Pair':[str(self.pair[0]),str(self.pair[1])], 'Result':str(self.result), 'reviewer':str(self.reviewer), 'time':str(self.time)}
def ACJ(data, maxRounds, noOfChoices = 1, logPath = None, optionNames = ["Choice"]):
if noOfChoices < 2:
return UniACJ(data, maxRounds, logPath, optionNames)
else:
return MultiACJ(data, maxRounds, noOfChoices, logPath, optionNames)
class MultiACJ(object):
'''Holds multiple ACJ objects for running comparisons with multiple choices.
The first element of the list of acj objects keeps track of the used pairs.'''
def __init__(self, data, maxRounds, noOfChoices, logPath = None, optionNames = None):
self.data = list(data)
self.n = len(data)
self.round = 0
self.step = 0
self.noOfChoices = noOfChoices
self.acjs = [ACJ(data, maxRounds) for _ in range(noOfChoices)]
self.logPath = logPath
if optionNames == None:
self.optionNames = [str(i) for i in range(noOfChoices)]
else:
self.optionNames = optionNames
self.nextRound()
def getScript(self, ID):
'''Gets script with ID'''
return self.acjs[0].getScript(ID)
def getID(self, script):
'''Gets ID of script'''
return self.acjs[0].getID(script)
def infoPairs(self):
'''Returns pairs based on summed selection arrays from Progressive Adaptive Comparitive Judgement
Politt(2012) + Barrada, Olea, Ponsoda, and Abad (2010)'''
pairs = []
#Create
sA = np.zeros((self.n, self.n))
for acj in self.acjs:
sA = sA+acj.selectionArray()
while(np.max(sA)>0):
iA, iB = np.unravel_index(sA.argmax(), sA.shape)
pairs.append([self.data[iA], self.data[iB]])
sA[iA,:] = 0
sA[iB,:] = 0
sA[:,iA] = 0
sA[:,iB] = 0
return pairs
def nextRound(self):
'''Returns next round of pairs'''
roundList = self.infoPairs()
for acj in self.acjs:
acj.nextRound(roundList)
acj.step = 0
self.round = self.acjs[0].round
self.step = self.acjs[0].step
return self.acjs[0].roundList
def nextPair(self):
'''gets next pair from main acj'''
p = self.acjs[0].nextPair(startNext=False)
if p == -1:
if self.nextRound() != None:
p = self.acjs[0].nextPair(startNext=False)
else:
return None
self.step = self.acjs[0].step
return p
def nextIDPair(self):
'''Gets ID of next pair'''
pair = self.nextPair()
if pair == None:
return None
idPair = []
for p in pair:
idPair.append(self.getID(p))
return idPair
def WMS(self):
ret = []
for acj in self.acjs:
ret.append(acj.WMS())
return ret
def comp(self, pair, result = None, update = None, reviewer = 'Unknown', time = 0):
'''Adds in a result between a and b where true is a wins and False is b wins'''
if result == None:
result = [True for _ in range(self.noOfChoices)]
if self.noOfChoices != len(result):
raise StandardError('Results list needs to be noOfChoices in length')
for i in range(self.noOfChoices):
self.acjs[i].comp(pair, result[i], update, reviewer, time)
if self.logPath != None:
self.log(self.logPath, pair, result, reviewer, time)
def IDComp(self, idPair, result = None, update = None, reviewer = 'Unknown', time = 0):
'''Adds in a result between a and b where true is a wins and False is b wins. Uses IDs'''
pair = []
for p in idPair:
pair.append(self.getScript(p))
self.comp(pair, result, update, reviewer, time)
def rankings(self, value=True):
'''Returns current rankings
Default is by value but score can be used'''
rank = []
for acj in self.acjs:
rank.append(acj.rankings(value))
return rank
def reliability(self):
'''Calculates reliability'''
rel = []
for acj in self.acjs:
rel.append(acj.reliability()[0])
return rel
def log(self, path, pair, result, reviewer = 'Unknown', time = 0):
'''Writes out a log of a comparison'''
timestamp = datetime.datetime.now().strftime('_%Y_%m_%d_%H_%M_%S_%f')
with open(path+os.sep+str(reviewer)+timestamp+".log", 'w+') as file:
file.write("Reviewer:%s\n" % str(reviewer))
file.write("A:%s\n" % str(pair[0]))
file.write("B:%s\n" % str(pair[1]))
for i in range(len(result)):
file.write("Winner of %s:%s\n" %(self.optionNames[i], "A" if result[i] else "B"))
file.write("Time:%s\n" % str(time))
def JSONLog(self):
'''Write acjs states to JSON files'''
for acj in self.acjs:
acj.JSONLog()
def percentReturned(self):
return self.acjs[0].percentReturned()
def results(self):
'''Prints a list of scripts and thier value scaled between 0 and 100'''
rank = []
for r in self.rankings():
rank.append(list(zip(r[0], (r[1]-r[1].min())*100/(r[1].max()-r[1].min()))))
return rank
def decisionCount(self, reviewer):
return self.acjs[0].decisionCount(reviewer)
class UniACJ(object):
'''Base object to hold comparison data and run algorithm
script is used to refer to anything that is being ranked with ACJ
Dat is an array to hold the scripts with rows being [id, script, score, quality, trials]
Track is an array with each value representing number of times a winner (dim 0) has beaten the loser (dim 1)
Decisions keeps track of all the descisions madein descision objects
'''
def __init__(self, data, maxRounds, logPath = None, optionNames = None):
self.reviewers = []
self.optionNames = optionNames
self.noOfChoices = 1
self.round = 0
self.maxRounds = maxRounds
self.update = False
self.data = list(data)
self.dat = np.zeros((5, len(data)))
self.dat[0] = np.asarray(range(len(data)))
#self.dat[1] = np.asarray(data)
#self.dat[2] = np.zeros(len(data), dtype=float)
#self.dat[3] = np.zeros(len(data), dtype=float)
#self.dat[4] = np.zeros(len(data), dtype=float)
self.track = np.zeros((len(data), len(data)))
self.n = len(data)
self.swis = 5
self.roundList = []
self.step = -1
self.decay = 1
self.returned = []
self.logPath = logPath
self.decisions = []
def nextRound(self, extRoundList = None):
'''Returns next round of pairs'''
print("Hello")
self.round = self.round+1
self.step = 0
if self.round > self.maxRounds:
self.maxRounds = self.round
#print(self.round)
if self.round > 1:
self.updateAll()
if extRoundList == None:
self.roundList = self.infoPairs()
else:
self.roundList = extRoundList
self.returned = [False for i in range(len(self.roundList))]
return self.roundList
def polittNextRound(self):
self.round = self.round+1
if self.round > self.maxRounds:
self.roundList = None
elif self.round<2:
self.roundList = self.randomPairs()
elif self.round<2+self.swis:
self.updateAll()
self.roundList = self.scorePairs()
else:
#if self.round == 1+swis:
#self.dat[3] = (1/self.dat[1].size)*self.dat[2][:]
self.updateAll()
self.roundList = self.valuePairs()
return self.roundList
#return self.scorePairs()
def getID(self, script):
'''Gets ID of script'''
return self.data.index(script)
def getScript(self, ID):
'''Gets script with ID'''
return self.data[ID]
def nextPair(self, startNext = True):
'''Returns next pair. Will start new rounds automatically if startNext is true'''
self.step = self.step + 1
if self.step >= len(self.roundList):
if all(self.returned):
if (startNext):
self.nextRound()
#self.polittNextRound()
if self.roundList == None or self.roundList == []:
return None
else:
return -1
else:
o = [p for p in self.roundList if not self.returned[self.roundList.index(p)]]
return random.choice(o)
return self.roundList[self.step]
def nextIDPair(self, startNext = True):
'''Returns ID of next pair'''
pair = self.nextPair()
if pair == None:
return None
idPair = []
for p in pair:
idPair.append(self.getID(p))
return idPair
def singleProb(self, iA, iB):
prob = np.exp(self.dat[3][iA]-self.dat[3][iB])/(1+np.exp(self.dat[3][iA]-self.dat[3][iB]))
return prob
def prob(self, iA):
'''Returns a numpy array of the probability of A beating other values
Based on the Bradley-Terry-Luce model (Bradley and Terry 1952; Luce 1959)'''
probs = np.exp(self.dat[3][iA]-self.dat[3])/(1+np.exp(self.dat[3][iA]-self.dat[3]))
return probs
def fullProb(self):
'''Returns a 2D array of all probabilities of x beating y'''
pr = np.zeros((self.n, self.n))
for i in range(self.n):
pr[i] = self.dat[3][i]
return np.exp(pr-self.dat[3])/(1+np.exp(pr-self.dat[3]))
def fisher(self):
'''returns fisher info array'''
prob = self.fullProb()
return ((prob**2)*(1-prob)**2)+((prob.T**2)*(1-prob.T)**2)
def selectionArray(self):
'''Returns a selection array based on Progressive Adaptive Comparitive Judgement
Politt(2012) + Barrada, Olea, Ponsoda, and Abad (2010)'''
F = self.fisher()*np.logical_not(np.identity(self.n))
ran = np.random.rand(self.n, self.n)*np.max(F)
a = 0
b = 0
#Create array from fisher mixed with noise
for i in range(1, self.round+1):
a = a + (i-1)**self.decay
for i in range(1, self.maxRounds+1):
b = b + (i-1)**self.decay
W = a/b
S = ((1-W)*ran)+(W*F)
#Remove i=j and already compared scripts
return S*np.logical_not(np.identity(self.n))*np.logical_not(self.track+self.track.T)
def updateValue(self, iA):
'''Updates the value of script A using Newton's Method'''
scoreA = self.dat[2][iA]
valA = self.dat[3][iA]
probA = self.prob(iA)
x = np.sum(probA)-0.5#Subtract where i = a
y = np.sum(probA*(1-probA))-0.25#Subtract where i = a
if x == 0:
exit()
#print(self.dat[3])
return self.dat[3][iA]+((self.dat[2][iA]-x)/y)
#print(self.dat[3][iA])
#print("--------")
def updateAll(self):
'''Updates the value of all scripts using Newton's Method'''
newDat = np.zeros(self.dat[3].size)
for i in self.dat[0]:
newDat[i] = self.updateValue(i)
self.dat[3] = newDat[:]
def randomPairs(self, dat = None):
'''Returns a list of random pairs from dat'''
if dat == None:
dat = self.data
shufDat = np.array(dat, copy=True)
ranPairs = []
while len(shufDat)>1:
a = shufDat[0]
b = shufDat[1]
shufDat = shufDat[2:]
ranPairs.append([a,b])
return ranPairs
def scorePairs(self, dat = None, scores = None):
'''Returns random pairs with matching scores or close if no match'''
if dat == None:
dat = self.dat
shuf = np.array(dat[:3], copy=True)
np.random.shuffle(shuf.T)
shuf.T
shuf = shuf[:, np.argsort(shuf[2])]
pairs = []
i = 0
#Pairs matching scores
while i<(shuf[0].size-1):
aID = shuf[0][i]
bID = shuf[0][i+1]
if (self.track[aID][bID]+self.track[bID][aID])==0 and shuf[2][i]==shuf[2][i+1]:
pairs.append([self.data[shuf[0][i]], self.data[shuf[0][i+1]]])
shuf = np.delete(shuf, [i, i+1], 1)
else:
i = i+1
#Add on closest score couplings of unmatched scores
i = 0
while i<shuf[0].size-1:
aID = shuf[0][i]
j = i+1
while j<shuf[0].size:
bID = shuf[0][j]
if (self.track[aID][bID]+self.track[bID][aID])==0:
pairs.append([self.data[shuf[0][i]], self.data[shuf[0][j]]])
shuf = np.delete(shuf, [i, j], 1)
break
else:
j = j+1
if j == shuf[0].size:
i = i+1
return pairs
def valuePairs(self):
'''Returns pairs matched by close values Politt(2012)'''
shuf = np.array(self.dat, copy=True)#Transpose to shuffle columns rather than rows
np.random.shuffle(shuf.T)
shuf.T
pairs = []
i = 0
while i<shuf[0].size-1:
aID = shuf[0][i]
newShuf = shuf[:, np.argsort(np.abs(shuf[3] - shuf[3][i]))]
j = 0
while j<newShuf[0].size:
bID = newShuf[0][j]
if (self.track[aID][bID]+self.track[bID][aID])==0 and self.data[aID]!=self.data[bID]:
pairs.append([self.data[shuf[0][i]], self.data[newShuf[0][j]]])
iJ = np.where(shuf[0]==newShuf[0][j])[0][0]
shuf = np.delete(shuf, [i, iJ], 1)
break
else:
j = j+1
if j == shuf[0].size:
i = i+1
return pairs
def infoPairs(self):
'''Returns pairs based on selection array from Progressive Adaptive Comparitive Judgement
Politt(2012) + Barrada, Olea, Ponsoda, and Abad (2010)'''
pairs = []
#Create
sA = self.selectionArray()
while(np.max(sA)>0):
iA, iB = np.unravel_index(sA.argmax(), sA.shape)
pairs.append([self.data[iA], self.data[iB]])
sA[iA,:] = 0
sA[iB,:] = 0
sA[:,iA] = 0
sA[:,iB] = 0
return pairs
def rmse(self):
'''Calculate rmse'''
prob = self.fullProb()
y = 1/np.sqrt(np.sum(prob*(1-prob), axis=1)-0.25)
return np.sqrt(np.mean(np.square(y)))
def trueSD(self):
'''Calculate true standard deviation'''
sd = np.std(self.dat[3])
return ((sd**2)/(self.rmse()**2))**(0.5)
def reliability(self):
'''Calculates reliability'''
G = self.trueSD()/self.rmse()
return [(G**2)/(1+(G**2))]
def SR(self, pair, result):
'''Calculates the Squared Residual and weight of a decision'''
p = [self.getID(a) for a in pair]
if result:
prob = self.singleProb(p[0], p[1])
else:
prob = self.singleProb(p[1], p[0])
res = 1-prob
weight = prob*(1-prob)
SR = (res**2)
return SR, weight
def addDecision(self, pair, result, reviewer, time = 0):
'''Adds an SSR to the SSR array'''
self.decisions.append(Decision(pair, result,reviewer, time))
def revID(self, reviewer):
return self.reviewers.index(reviewer)
def WMS(self, decisions = None):
'''Builds data lists:
[reviewer] [sum of SR, sum of weights]
and uses it to make dict reviewer: WMS
WMS = Sum SR/Sum weights
also returns mean and std div'''
if decisions == None:
decisions = self.decisions
self.reviewers = []
SRs = []
weights = []
for dec in decisions:
if dec.reviewer not in self.reviewers:
self.reviewers.append(dec.reviewer)
SRs.append(0)
weights.append(0)
SR, weight = self.SR(dec.pair, dec.result)
revID = self.reviewers.index(dec.reviewer)
SRs[revID] = SRs[revID] + SR
weights[revID] = weights[revID] + weight
WMSs = []
WMSDict = {}
for i in range(len(self.reviewers)):
WMS = SRs[i]/weights[i]
WMSs.append(WMS)
WMSDict[self.reviewers[i]]=WMS
return WMSDict, np.mean(WMSs), np.std(WMSs)
def comp(self, pair, result = True, update = None, reviewer = 'Unknown', time = 0):
'''Adds in a result between a and b where true is a wins and False is b wins'''
self.addDecision(pair, result, reviewer, time)
if pair[::-1] in self.roundList:
pair = pair[::-1]
result = not result
if pair in self.roundList:
self.returned[self.roundList.index(pair)] = True
a = pair[0]
b = pair[1]
if update == None:
update = self.update
iA = self.data.index(a)
iB = self.data.index(b)
if result:
self.track[iA,iB] = 1
self.track[iB,iA] = 0
else:
self.track[iA,iB] = 0
self.track[iB,iA] = 1
self.dat[2,iA] = np.sum(self.track[iA,:])
self.dat[2,iB] = np.sum(self.track[iB,:])
self.dat[4,iA] = self.dat[4][iA]+1
self.dat[4,iB] = self.dat[4][iB]+1
if self.logPath != None:
self.log(self.logPath, pair, result, reviewer, time)
def IDComp(self, idPair, result = True, update = None, reviewer = 'Unknown', time=0):
'''Adds in a result between a and b where true is a wins and False is b wins, Uses IDs'''
pair = []
for p in idPair:
pair.append(self.getScript(p))
self.comp(pair, result, update, reviewer, time)
def percentReturned(self):
if len(self.returned) == 0:
return 0
return (sum(self.returned)/len(self.returned))*100
def log(self, path, pair, result, reviewer = 'Unknown', time = 0):
'''Writes out a log of a comparison'''
timestamp = datetime.datetime.now().strftime('_%Y_%m_%d_%H_%M_%S_%f')
with open(path+os.sep+str(reviewer)+timestamp+".log", 'w+') as file:
file.write("Reviewer:%s\n" % str(reviewer))
file.write("A:%s\n" % str(pair[0]))
file.write("B:%s\n" % str(pair[1]))
file.write("Winner:%s\n" %("A" if result else "B"))
file.write("Time:%s\n" % str(time))
def JSONLog(self, path = None):
'''Writes out a JSON containing data from ACJ'''
if path == None:
path = self.logPath
choice = self.optionNames[0].replace(" ", "_")
ACJDict = {"Criteria":choice, "Scripts":self.scriptDict(), "Reviewers":self.reviewerDict(), "Decisions":self.decisionList()}
with open(path+os.sep+"ACJ_"+choice+".json", 'w+') as file:
json.dump(ACJDict, file, indent=4)
def decisionCount(self, reviewer):
c = 0
for dec in self.decisions:
if (dec.reviewer == reviewer):
c = c + 1
return c
def reviewerDict(self):
revs = {}
WMSs, _, _ = self.WMS()
for rev in self.reviewers:
revDict = {'decisions':self.decisionCount(rev), 'WMS':WMSs[rev]}
revs[str(rev)]= revDict
print(len(revs))
return revs
def scriptDict(self):
scr = {}
r = self.results()[0]
for i in range(len(r)):
scrDict = {"Score":r[i][1]}
scr[str(r[i][0])] = scrDict
return scr
def decisionList(self):
dec = []
for d in self.decisions:
dec.append(d.dict())
return dec
def rankings(self, value=True):
'''Returns current rankings
Default is by value but score can be used'''
if value:
return [np.asarray(self.data)[np.argsort(self.dat[3])], self.dat[3][np.argsort(self.dat[3])]]
else:
return self.data[np.argsort(self.dat[2])]
def results(self):
'''Prints a list of scripts and thier value scaled between 0 and 100'''
r = self.rankings()
rank = list(zip(r[0], (r[1]-r[1].min())*100/(r[1].max()-r[1].min())))
return [rank]
| python |
from setuptools import setup, find_packages
setup(
name = 'aes',
version = '1.0.0',
description = 'AES(Advanced Encryption Standard) in Python',
author = 'Donggeun Kwon',
author_email = 'donggeun.kwon@gmail.com',
url = 'https://github.com/DonggeunKwon/aes',
download_url = 'https://github.com/DonggeunKwon/aes/archive/1.0.tar.gz',
install_requires = [ ],
# packages = find_packages(exclude = ['docs']),
keywords = ['AES', 'Cipher', 'Advanced Encryption Standard'],
python_requires = '>=3',
classifiers = [
# 'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
]
)
### Build package
# python setup.py bdist_wheel
# twine upload dist/aes-1.0.0-py3-none-any.whl | python |
from typing import NamedTuple, Optional, Tuple
import numpy as np
from cgtasknet.tasks.reduce.reduce_task import (
_generate_random_intervals,
ReduceTaskCognitive,
ReduceTaskParameters,
)
class RomoTaskParameters(NamedTuple):
dt: float = ReduceTaskParameters().dt
trial_time: float = 0.25
answer_time: float = ReduceTaskParameters().answer_time
value: Tuple[float, float] = (None, None)
delay: float = 0.15
negative_shift_trial_time: float = ReduceTaskParameters().negative_shift_trial_time
positive_shift_trial_time: float = ReduceTaskParameters().positive_shift_trial_time
negative_shift_delay_time: float = ReduceTaskParameters().negative_shift_delay_time
positive_shift_delay_time: float = ReduceTaskParameters().positive_shift_delay_time
class RomoTaskRandomModParameters(NamedTuple):
romo: RomoTaskParameters = RomoTaskParameters()
n_mods: int = 2
class RomoTask(ReduceTaskCognitive):
"""
The challenge is for the subjects or the network to
remember the first stimulus. Then, after the delay time,
the second stimulus comes. The network must compare this
incentive and respond correctly.
Ref: https://www.nature.com/articles/20939
Args:
ReduceTaskCognitive ([type]): [description]
"""
def __init__(
self,
params: Optional[RomoTaskParameters] = RomoTaskParameters(),
batch_size: int = 1,
mode: str = "random",
enable_fixation_delay: bool = False,
uniq_batch: bool = False,
) -> None:
"""
Initialize the model .
Args:
params (dict): [description]
batch_size (int): [description]
mode (str, optional): [description]. Defaults to "random".
"""
if mode == "value" and (params.value[0] is None or params.value is None):
raise ValueError("params[values][0]([1]) is None")
super().__init__(
params=params,
batch_size=batch_size,
mode=mode,
enable_fixation_delay=enable_fixation_delay,
uniq_batch=uniq_batch,
)
self._ob_size = 2
self._act_size = 3
def _unique_every_batch(self):
max_length = 0
l_intputs = []
l_outputs = []
for _ in range(self._batch_size):
inputs, outputs = self._identical_batches(batch_size=1)
l_intputs.append(inputs)
l_outputs.append(outputs)
max_length = max(max_length, inputs.shape[0])
inputs, target_outputs = self._concatenate_batches(
l_intputs, l_outputs, max_length
)
return inputs, target_outputs
def _identical_batches(self, batch_size: int = 1):
dt = self._params.dt
trial_time = _generate_random_intervals(
dt,
self._params.trial_time,
self._params.negative_shift_trial_time,
self._params.positive_shift_trial_time,
)
delay = _generate_random_intervals(
dt,
self._params.delay,
self._params.negative_shift_delay_time,
self._params.positive_shift_delay_time,
)
answer_time = int(self._params.answer_time / dt)
if self._mode == "random":
values_first = np.random.uniform(0, 1, size=batch_size)
values_second = np.random.uniform(0, 1, size=batch_size)
elif self._mode == "value":
values_first = np.ones(batch_size) * self._params.value[0]
values_second = np.ones(batch_size) * self._params.value[1]
else:
values_first = np.zeros(batch_size)
values_second = np.zeros(batch_size)
inputs = np.zeros(
((2 * trial_time + delay + answer_time), batch_size, self._ob_size)
)
inputs[: 2 * trial_time + delay, :, 0] = 1
inputs[:trial_time, :, 1] = values_first
inputs[trial_time + delay : -answer_time, :, 1] = values_second
target_output = np.zeros(
((2 * trial_time + delay + answer_time), batch_size, self._act_size)
)
target_output[:, :, 0] = inputs[:, :, 0]
target_output[2 * trial_time + delay :, :, 1] = values_first < values_second
target_output[2 * trial_time + delay :, :, 2] = values_second < values_first
return inputs, target_output
def _one_dataset(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Returns a single dataset with the given size and target .
Returns:
Tuple[np.ndarray, np.ndarray]: [description]
"""
if self._uniq_batch:
return self._unique_every_batch()
else:
return self._identical_batches(self._batch_size)
def one_dataset(self):
"""
Return a single dataset containing only one dataset .
Returns:
[type]: [description]
"""
return self._one_dataset()
@property
def name(self):
return "RomoTask"
class RomoTaskRandomMod(RomoTask):
"""
Trial task that is used for a random mod .
Args:
RomoTask ([type]): [description]
"""
def __init__(
self,
params: Optional[RomoTaskRandomModParameters] = RomoTaskRandomModParameters(),
batch_size: int = 1,
mode: str = "random",
enable_fixation_delay: bool = False,
uniq_batch: bool = False,
) -> None:
"""
Initialize the model .
Args:
params (dict): [description]
batch_size (int): [description]
mode (str, optional): [description]. Defaults to "random".
n_mods (int, optional): [description]. Defaults to 1.
"""
super().__init__(
params=params.romo,
batch_size=batch_size,
mode=mode,
enable_fixation_delay=enable_fixation_delay,
uniq_batch=uniq_batch,
)
self._n_mods = params.n_mods
self._ob_size += self._n_mods - 1
def _one_dataset_mod(self, mode: int):
"""
Generate a single model .
Returns:
[type]: [description]
"""
temp, outputs = self._one_dataset()
T = temp.shape[0]
inputs = np.zeros((T, self._batch_size, self._ob_size))
inputs[:, :, 0] = temp[:, :, 0]
inputs[:, :, 1 + mode] = temp[:, :, 1]
return inputs, outputs
def one_dataset(self, mode: Optional[int] = None):
if mode is None:
mode = np.random.randint(0, self._n_mods)
return self._one_dataset_mod(mode)
@property
def name(self):
return "RomoTaskRandomMod"
@property
def params(self):
return RomoTaskRandomModParameters(self._params, n_mods=self._n_mods)
@params.setter
def params(self, new_params: RomoTaskRandomModParameters):
self._params = new_params.romo
self._n_mods = new_params.n_mods
class RomoTask1(RomoTaskRandomMod):
def one_dataset(self, mode=0):
return self._one_dataset_mod(mode)
@property
def name(self):
return "RomoTask1"
class RomoTask2(RomoTaskRandomMod):
def one_dataset(self, mode=1):
return self._one_dataset_mod(mode)
@property
def name(self):
return "RomoTask2"
| python |
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 09:24:08 2019
@author: zjrobbin
"""
w_dir='E:/Maca_Climate_Files_Sapps/'
## Librarys
from datetime import datetime, timedelta
from netCDF4 import num2date, date2num
import matplotlib.pyplot as plt
import geopandas
import rasterio as rt
import numpy as np
from netCDF4 import Dataset
from rasterio.mask import mask
from rasterio.crs import CRS
import pandas as pd
from rasterio.plot import show
import os
import time
##Function
def getFeatures(gdf):
"""Function to parse features from GeoDataFrame in such a manner that rasterio wants them"""
import json
return [json.loads(gdf.to_json())['features'][0]['geometry']]
files=os.listdir(w_dir)
#listofruns=('RCP45Tempmin','RCP85Tempmin','RCP45Tempmax','RCPT85Tempmax','RCP45PPT','RCP85PPT')
listofruns=('RCP45Tempmin')
#files=('macav2livneh_tasmin_GFDL-ESM2M_r1i1p1_rcp45_2006_2099_CONUS_daily_aggregated',
# 'macav2livneh_tasmin_GFDL-ESM2M_r1i1p1_rcp85_2006_2099_CONUS_daily_aggregated',
# 'macav2livneh_tasmax_GFDL-ESM2M_r1i1p1_rcp45_2006_2099_CONUS_daily_aggregated',
# 'macav2livneh_tasmax_GFDL-ESM2M_r1i1p1_rcp85_2006_2099_CONUS_daily_aggregated',
# 'macav2livneh_pr_GFDL-ESM2M_r1i1p1_rcp45_2006_2099_CONUS_daily_aggregated',
# 'macav2livneh_pr_GFDL-ESM2M_r1i1p1_rcp85_2006_2099_CONUS_daily_aggregated')
files=('macav2livneh_tasmin_GFDL-ESM2M_r1i1p1_rcp45_2006_2099_CONUS_daily_aggregated')
key=('air_temperature','air_temperature','air_temperature','air_temperature','precipitation','precipitation')
key=('air_temperature')
#files=('macav2livneh_pr_GFDL-ESM2M_r1i1p1_rcp45_2006_2099_CONUS_daily_aggregated',
# 'macav2livneh_pr_GFDL-ESM2M_r1i1p1_rcp85_2006_2099_CONUS_daily_aggregated')
#listofruns=('RCP45PPT','RCP85PPT')
#key=('precipitation','precipitation')
files=os.listdir(w_dir+"netCDFs/")
###Load in the Shapefile for the area in CRS: 4269 as climate outputs are.
Shapeys=("High_Elevation_Dissolve","Low_elevation_Dissolved","Mid_El_Montane_Dissolve","North_Montane_Dissolved",)
for SH in Shapeys:
AOI= geopandas.read_file((w_dir+'Climate_regions/'+SH+'.shp'))
start=time.time()
print(AOI)
coords=getFeatures(AOI)
###Loop through climate files.
for r in list(range(0,(len(files)))):
print(files[r])
file=files[r]
####Get the keys based on the file names
if "_pr_" in file:
key='precipitation'
model=file[16:]
model=model.replace('_2006_2099_CONUS_daily_aggregated.nc',"")
if "_tasmin_" in file:
key='air_temperature'
model=file[20:]
model=model.replace('_2006_2099_CONUS_daily_aggregated.nc',"")
if "_tasmax_" in file:
key='air_temperature'
model=file[20:]
model=model.replace('i1p1_rcp85_2006_2099_CONUS_daily_aggregated.nc',"")
if "_rcp85_" in file:
scenario="RCP85"
if "_rcp45_" in file:
scenario="RCP45"
#print((w_dir+'/netCDFs/'+files[r]+'.nc'))
### Load in the Net CDF file
Precip = Dataset((w_dir+'netCDFs/'+file), "r")
#print(Precip.variables)
#Precip['time']
#for i in Precip.variables:
#print(i)
#print(Precip.variables['time'])
#Get the array from the NETCDF
Array= np.array(Precip.variables[key])
### Get Variables
Time=np.array(Precip.variables['time'])
var=[key]
#print(var)
lat=np.array(Precip.variables['lat'])
lon=np.array(Precip.variables['lon'])
lon2=-(360-lon)
##Adjust dates
#days since 1900-01-01
### Set standard dates
dates = [datetime(1900,1,1)+n*timedelta(hours=24) for n in Time]
### Get meta data
out_meta={'crs':CRS.from_epsg(4269),
'driver': 'GTiff',
'count':34333,
'dtype': 'float32',
'height': len(lon2),
'nodata': None,
'transform':((max(lon2)-min(lon2))/len(lon2),0.0,min(lon2),0.0,-(max(lat)-min(lat))/len(lat),max(lat)),
#'transform': (min(lat), max(lat),(max(lat)-min(lat))/len(lat),min(lon),max(lon),(max(lon2)-min(lon2))/len(lon),max(lon)),
'width': len(lat)}
###Write array as raster stack
new_output=rt.open(w_dir+'All.tif', 'w', **out_meta)
new_output.write(Array)
new_output.close()
### Get the Rasterstack
Template=rt.open(w_dir+'All.tif')
print(Template)
### Create nulls
something=pd.DataFrame([[dates]],columns=["Timestep"])
Meansmoosh=pd.DataFrame([[dates]],columns=["Timestep"])
Varsmoosh=pd.DataFrame([[dates]],columns=["Timestep"])
###Mask
out_img,out_transform=mask(Template,shapes=coords,crop=True,nodata=-9999)
Template.bounds
coords
#More nulls
MeanStack=pd.DataFrame(columns=["Timestep"])
VarStack=pd.DataFrame(columns=["Timestep"])
StdStack=pd.DataFrame(columns=["Timestep"])
###Loop through dates to average
for i in list(range(1,len(dates))):
Timestep=dates[i-200]
#print(Timestep)
band1=out_img[i,:,:]
#print(band1)
### Fix temp K to C
meancalc=band1[band1!=-9999]
if key == 'air_temperature':
meancalc= meancalc-273.15
#print(np.mean(meancalc))
# print(meancalc)
### Get the Mean
mean=(np.mean(meancalc))
print(np.mean(mean))
### Variance
variance=(np.var(meancalc))
### Standard Deviation
STD=(np.std(meancalc))
###Create Outputs
Mean=pd.DataFrame([[Timestep,mean]],columns=["Timestep",key])
StTime=pd.DataFrame([[Timestep,STD]],columns=['Timestep',key+"STD"])
VarTime=pd.DataFrame([[Timestep,variance]],columns=['Timestep',(key+"VAR")])
###Append to list
MeanStack=MeanStack.append(Mean)
StdStack=StdStack.append(StTime)
VarStack=VarStack.append(VarTime)
#### Make into one dataframe
stepone=None
stepone=pd.merge(MeanStack,VarStack,how='inner', on='Timestep')
one_eco=pd.merge(stepone,StdStack, how='inner',on='Timestep')
one_eco.to_csv(w_dir+'Outputs/12_8/'+SH+'_'+model+scenario+key+'.csv')
Template.close()
end=time.time()
print("Minutes elapsed "+str((end-start)/60))
data=None
###endecoregion loop
daytomonth=daytomonth.append(oneday)
#os.remove(w_dir+'temp'+str(i)+'.tif')
Template.close()
monthtoyear=monthtoyear.append(daytomonth)
monthtoyear.head
monthtoyear.to_csv(work_dir+"Outputs/"+str(year)+Model+".csv")
#
Template.profile
#show(Template,1)
###Template['Affine']
#Template.bounds
Template.close()
6697870.5-6656859.0
41011.5/1439
| python |
from tortoise import fields
from tortoise.models import Model
from app.db.base import ModelTimeMixin
__all__ = ['Store']
class Store(Model, ModelTimeMixin):
"""店铺"""
id = fields.IntField(pk=True)
name = fields.CharField(unique=True, max_length=64, description='店铺名称')
desc = fields.CharField(null=True, max_length=255, description='店铺简介')
| python |
import numpy as np
from pymoo.algorithms.soo.nonconvex.es import ES
from pymoo.docs import parse_doc_string
from pymoo.core.survival import Survival
from pymoo.util.function_loader import load_function
class StochasticRankingSurvival(Survival):
def __init__(self, PR):
super().__init__(filter_infeasible=False)
self.PR = PR
def _do(self, problem, pop, *args, n_survive=None, **kwargs):
assert problem.n_obj == 1, "This stochastic ranking implementation only works for single-objective problems."
F, G = pop.get("F", "G")
f = F[:, 0]
if problem.n_constr == 0:
I = f.argsort()
else:
phi = (np.maximum(0, G) ** 2).sum(axis=1)
J = np.arange(len(phi))
I = load_function("stochastic_ranking")(f, phi, self.PR, J)
return pop[I][:n_survive]
class SRES(ES):
def __init__(self, PF=0.45, **kwargs):
"""
Stochastic Ranking Evolutionary Strategy (SRES)
Parameters
----------
PF: float
The stochastic ranking weight for choosing a random decision while doing the modified bubble sort.
"""
super().__init__(survival=StochasticRankingSurvival(PF), **kwargs)
self.PF = PF
parse_doc_string(SRES.__init__)
| python |
from ismo.ensemble import run_all_configurations
import json
import git
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="""
Runs the ensemble for M different runs (to get some statistics).
""")
parser.add_argument('--script_name', type=str, required=True,
help='Name of python script to run')
parser.add_argument('--source_folder', type=str, required=True,
help='Name of source folder')
parser.add_argument('--number_of_reruns', type=int, default=10,
help='Total number of reruns to get the ensemble')
parser.add_argument('--basename', type=str, default='ensemble_run',
help='Basename for the ensemble')
parser.add_argument('--compute_budget', type=int, default=512,
help='Maximum compute budget (in terms of number of samples that can be computed from simulator)')
parser.add_argument('--starting_sizes', type=int, nargs='+', default=[16, 32, 64],
help='Starting sizes to use')
parser.add_argument('--batch_size_factors', type=float, nargs='+', default=[0.25, 0.5, 1],
help='Batch sizes to use as a ratio of starting_size')
repo = git.Repo(search_parent_directories=True)
parser.add_argument('--repository_path', type=str, default=repo.working_dir,
help='Absolute path of the repository')
parser.add_argument('--dry_run', action='store_true',
help='Only do a dry run, no jobs are submitted or run')
parser.add_argument('--submitter', type=str, default='lsf',
help='Name of submitter to use, can be lsf or bash')
parser.add_argument('--only_missing', action='store_true',
help='Only run missing configurations')
parser.add_argument('--container_type', type=str, default=None,
help="Container type (none, docker, singularity)")
parser.add_argument('--container', type=str, default='docker://kjetilly/machine_learning_base:0.1.2',
help='Container name')
parser.add_argument('--generator', type=str, default="monte-carlo",
help="Generator to use (either 'monte-carlo' or 'sobol'")
parser.add_argument('--optimizer', type=str, default='L-BFGS-B',
help='Name of optimizer')
parser.add_argument('--do_not_draw_new_samples', action='store_true',
help='Reuse old optimization values for next iteration')
args = parser.parse_args()
# Save configuration for easy read afterwards
with open("ensemble_setup.json", 'w') as f:
json.dump(vars(args), f, indent=4)
run_all_configurations(**vars(args))
| python |
"""This module contains helper functions to use the Paho MQTT library with the
MQTT broker defined in a :class:`.MQTTConfig` object.
"""
import json
from paho.mqtt.publish import single
def auth_params(mqtt_config):
"""Return the authentication parameters from a :class:`.MQTTConfig`
object.
Args:
mqtt_config (:class:`.MQTTConfig`): The MQTT connection settings.
Returns:
dict: A dict {'username': username, 'password': password} with the
authentication parameters, or None if no authentication is used.
.. versionadded:: 0.6.0
"""
# Set up a dict containing authentication parameters for the MQTT client.
if mqtt_config.auth.username:
# The password can be None.
return {'username': mqtt_config.auth.username,
'password': mqtt_config.auth.password}
# Or use no authentication.
else:
return None
def host_port(mqtt_config):
"""Return the host and port from a :class:`.MQTTConfig` object.
Args:
mqtt_config (:class:`.MQTTConfig`): The MQTT connection settings.
Returns:
(str, int): A tuple with the host and port defined in the MQTT
connection settings.
.. versionadded:: 0.6.0
"""
host_port = mqtt_config.broker_address.split(':')
if mqtt_config.tls.hostname:
host = mqtt_config.tls.hostname
else:
host = host_port[0]
port = int(host_port[1])
return (host, port)
def tls_params(mqtt_config):
"""Return the TLS configuration parameters from a :class:`.MQTTConfig`
object.
Args:
mqtt_config (:class:`.MQTTConfig`): The MQTT connection settings.
Returns:
dict: A dict {'ca_certs': ca_certs, 'certfile': certfile,
'keyfile': keyfile} with the TLS configuration parameters, or None if
no TLS connection is used.
.. versionadded:: 0.6.0
"""
# Set up a dict containing TLS configuration parameters for the MQTT
# client.
if mqtt_config.tls.hostname:
return {'ca_certs': mqtt_config.tls.ca_file,
'certfile': mqtt_config.tls.client_cert,
'keyfile': mqtt_config.tls.client_key}
# Or don't use TLS.
else:
return None
def connect(client, mqtt_config, keepalive=60, bind_address=''):
"""Connect to an MQTT broker with the MQTT connection settings defined in
an :class:`.MQTTConfig` object.
Args:
client (`paho.mqtt.client.Client`_): The MQTT client object.
mqtt_config (:class:`.MQTTConfig`): The MQTT connection settings.
keepalive (int, optional): The maximum period in seconds allowed
between communications with the broker. Defaults to 60.
bind_address (str, optional): The IP address of a local network
interface to bind this client to, assuming multiple interfaces
exist. Defaults to ''.
.. _`paho.mqtt.client.Client`: https://www.eclipse.org/paho/clients/python/docs/#client
.. versionadded:: 0.6.0
"""
host, port = host_port(mqtt_config)
# Set up MQTT authentication.
auth = auth_params(mqtt_config)
if auth:
client.username_pw_set(auth['username'], auth['password'])
# Set up an MQTT TLS connection.
tls = tls_params(mqtt_config)
if tls:
client.tls_set(ca_certs=tls['ca_certs'],
certfile=tls['certfile'],
keyfile=tls['keyfile'])
client.connect(host, port, keepalive, bind_address)
def publish_single(mqtt_config, topic, payload=None, json_encode=True):
"""Publish a single message to the MQTT broker with the connection settings
defined in an :class:`.MQTTConfig` object, and then disconnect cleanly.
.. note:: The Paho MQTT library supports many more arguments when
publishing a single message. Other arguments than `topic` and `payload`
are not supported by this helper function: it’s aimed at just the
simplest use cases.
Args:
mqtt_config (:class:`.MQTTConfig`): The MQTT connection settings.
topic (str): The topic string to which the payload will be published.
payload (str, optional): The payload to be published. If '' or None, a
zero length payload will be published.
json_encode (bool, optional): Whether or not the payload is a dict
that will be encoded as a JSON string. The default value is
True. Set this to False if you want to publish a binary payload
as-is.
.. versionadded:: 0.6.0
"""
host, port = host_port(mqtt_config)
auth = auth_params(mqtt_config)
tls = tls_params(mqtt_config)
if json_encode:
payload = json.dumps(payload)
single(topic, payload, hostname=host, port=port, auth=auth, tls=tls)
| python |
import logging as log
import imp
from imagebot import pysix
class MonitorException(Exception):
pass
def start_tk_monitor(outpipe):
from imagebot.monitor_tk import Monitor #Tkinter will have to be imported in its own process for Tk to work
mon = Monitor(outpipe)
mon.start()
def start_gtk_monitor(outpipe):
from imagebot.monitor_gtk import Monitor
mon = Monitor(outpipe)
mon.start()
def get_monitor():
try:
imp.find_module('gi')
return start_gtk_monitor
except ImportError as e:
log.error(pysix.err_msg(e))
try:
imp.find_module(pysix.tkinter)
return start_tk_monitor
except ImportError as e:
log.error(pysix.err_msg(e))
raise MonitorException()
| python |
# -*- coding: utf-8 -*-
# @Time : 2020/3/7 10:39 PM
# @Author : zyk
# @Email : zhangyongke1105@163.com
# @File : my_test.py
# @Software : PyCharm
# 在列表之间移动元素
# 首先,创建一个待验证码用户列表,和一个用于存储已验证用户的空列表
unconfirmed_users = ['alic', 'brian', 'candace']
confirmed_users = []
# 验证每个用户,直到没有未验证的用户为止,并将每个验证过的用户都添加到已验证用户列表中
while unconfirmed_users:
current_user = unconfirmed_users.pop()
print("Verifying user: " + current_user.title())
confirmed_users.append(current_user)
# 显示所有已验证过的用户
print("\nThe following users have been confirmed:")
for confirmed_user in confirmed_users:
print(confirmed_user.title())
# # 列表反转,reversed返回一个迭代器,可以使用list将返回的对象转换为列表
# x = [1, 2, 3]
# print(list(reversed(x)))
# 元组和列表之间可以互相转换,使用tuple()转换成元组,使用list()转换成列表
#
# # 函数关键字实参
# def describe_pet(animal_type, pet_name):
# """显示宠物信息"""
# print("\nI have a " + animal_type + ".")
# print("My " + animal_type + "'s name is " + pet_name.title() + ".")
#
#
# describe_pet(animal_type='hamster', pet_name='hungry')
#
#
# # 函数默认值
# def describe_pet(pet_name, animal_type='dog'):
# """显示宠物信息"""
# print("\nI have a " + animal_type + ".")
# print("My " + animal_type + "'s name is " + pet_name.title() + ".")
#
#
# describe_pet(pet_name='willie')
# describe_pet('willie')
#
#
# # 函数返回值
# # def get_format_name(first_name, last_name):
# # """返回完整的姓名"""
# # full_name = first_name + ' ' + last_name
# # return full_name.title()
# #
# #
# # musician = get_format_name('jimi', 'hendrix')
# # print(musician)
#
#
# # 让实参变成可选的
# def get_format_name(first_name, last_name, middle_name=''):
# """返回整个的姓名"""
# if middle_name:
# full_name = first_name + ' ' + middle_name + ' ' + last_name
# else:
# full_name = first_name + ' ' + last_name
# return full_name.title()
#
#
# musician = get_format_name('jimi', 'hendrix')
# print(musician)
# musician = get_format_name('john', 'hooker', 'lee')
# print(musician)
#
#
# # 返回字典
# def build_person(first_name, last_name, age=''):
# """返回一个字典,其中包含一个人的信息"""
# person = {'first': first_name, 'last': last_name}
# if age:
# person['age'] = age
# return person
#
#
# musician = build_person('jimi', 'hendrix', age='17')
# print(musician)
#
#
# # 结合使用函数和while循环
# def get_format_name(first_name, last_name, middle_name=''):
# """返回整个的姓名"""
# if middle_name:
# full_name = first_name + ' ' + middle_name + ' ' + last_name
# else:
# full_name = first_name + ' ' + last_name
# return full_name.title()
#
#
# # 这是一个循环
# # 向函数传递列表
# def greet_users(names):
# """向列表中的每位用户都发出简单的问候"""
# for name in names:
# msg = "Hello," + name.title() + "!"
# print(msg)
#
#
# usernames = ['hannah', 'try', 'margot']
# greet_users(usernames)
#
#
# # 传递任意数量的实参
# def make_pizza(*toppings): # 形参名*toppings中的星号让python创建一个名为toppings的空元组,并将收到的所有值都封装到这个元组中
# """概述要制作的披萨"""
# print("\nMake a pizza with the following toppings:")
# for topping in toppings:
# print("- " + topping)
#
#
# make_pizza('pepperoni')
# make_pizza('mushrooms', 'green peppers', 'extra cheese')
#
#
# # 使用任意数量的关键字实参
# def build_profile(first, last, **user_info): # 形参**user_info中的两个星号让python创建一个名为user_info的空字典,并将收到的所有名称-值对都封装到这个字典中
# """创建一个字典,其中包含我们知道的有关用户的一切"""
# profile = {'first_name': first, 'last_name': last}
# for key, value in user_info.items():
# profile[key] = value
# return profile
#
#
# user_profile = build_profile('albert', 'einstein', location='princeton', field='physics')
# print(user_profile)
# pow(x, y)该函数表示,执行x的y次方,如下
# a = pow(2, 3)
# b = 10 + pow(2, 3*5)/3.0
# print(a, b)
#
# # abs计算绝对值,round将浮点数圆整为与之最接近的整数,2//3表示向下取整数,如下
# c = abs(-10)
# d = round(2/3)
# e = 2//3
# print(c, d, e)
| python |
from sklearn.preprocessing import StandardScaler as StdScaler
from niaaml.preprocessing.feature_transform.feature_transform_algorithm import (
FeatureTransformAlgorithm,
)
__all__ = ["StandardScaler"]
class StandardScaler(FeatureTransformAlgorithm):
r"""Implementation of feature standard scaling algorithm.
Date:
2020
Author:
Luka Pečnik
License:
MIT
Documentation:
https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
See Also:
* :class:`niaaml.preprocessing.feature_transform.FeatureTransformAlgorithm`
"""
Name = "Standard Scaler"
def __init__(self, **kwargs):
r"""Initialize StandardScaler."""
super(StandardScaler, self).__init__()
self.__std_scaler = StdScaler()
def fit(self, x, **kwargs):
r"""Fit implemented transformation algorithm.
Arguments:
x (pandas.core.frame.DataFrame): n samples to fit transformation algorithm.
"""
self.__std_scaler.fit(x)
def transform(self, x, **kwargs):
r"""Transforms the given x data.
Arguments:
x (pandas.core.frame.DataFrame): Data to transform.
Returns:
pandas.core.frame.DataFrame: Transformed data.
"""
return self.__std_scaler.transform(x)
def to_string(self):
r"""User friendly representation of the object.
Returns:
str: User friendly representation of the object.
"""
return FeatureTransformAlgorithm.to_string(self).format(
name=self.Name,
args=self._parameters_to_string(self.__std_scaler.get_params()),
)
| python |
import cairosvg
import cv2
import numpy as np
import sys
from PIL import Image
# board = 'stm32tiny'
board = 'HermitL'
board = 'HermitR'
board = 'ZoeaR'
layer = 'F_Paste'
layer = 'B_Paste'
root = '/Users/akihiro/repos/Hermit/{}/'.format( board )
path_png = root + 'layer/{}-{}.png'.format( board, layer )
path_bmp = root + 'layer/{}-{}.bmp'.format( board, layer )
if __name__ == '__main__':
png = Image.open( path_png )
w, h = png.size
print( f'png size = {w} x {h}' )
w2 = int( (w + 7) / 8 ) * 8
h2 = int( (h + 7) / 8 ) * 8
print( f'png size2 = {w2} x {h2}' )
r, g, b, a = png.split()
print( type( a ) )
# img = Image.merge("RGB", (r, g, b))
v = np.array( a )
v = 255 - v
a = Image.fromarray( v )
img = Image.merge("RGB", (a, a, a))
bmp = Image.new("RGB", (w2, h2), (255, 255, 255))
bmp.paste( img, ((w2 - w) >> 1, (h2 - h) >> 1) )
bmp.save( path_bmp )
| python |
import sys
import optparse
from .generate_pyt_meta import meta_toolbox
def parse_options(args=None, values=None):
"""
Define and parse `optparse` options for command-line usage.
"""
usage = """%prog [options] [TOOLBOX_PATH]"""
desc = "Generate ArcGIS Metadata from markdown'd toolbox code. "
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option("-y", "--yes", dest="yes", default=None, action='store_true',
help="Implicit confirmation to run")
(options, args) = parser.parse_args(args, values)
if len(args) == 0:
raise Exception("Input toolbox needed")
else:
input_file = args[0]
opts = {
'input': input_file,
'implicit_run': options.yes,
}
return opts
def run():
try:
import arcpy
except ImportError:
raise Exception("ArcPy is required to run this tool")
options = parse_options()
if options['implicit_run'] is None:
print("\n".join([
"",
"Your toolbox is imported using the imp module.",
"To avoid running unknown code, you should verify the toolbox contents prior to running this tool",
"",
]))
print("To confirm, re-run using the -y option.")
print(f"python -m PYT_Metadata {options['input']} -y")
sys.exit(1)
# Run
meta_toolbox(options['input'])
if __name__ == '__main__':
run() | python |
class readInfo:
| python |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 23 17:47:14 2021
@author: keikei
"""
"""
Given a string s containing just the characters '(', ')', '{', '}', '[' and ']',
determine if the input string is valid.
An input string is valid if:
Open brackets must be closed by the same type of brackets.
Open brackets must be closed in the correct order.
"""
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
starts = set('[{(')
pairs = (('[', ']'), ('{', '}'), ('(', ')'))
check = []
for i in s:
if i in starts:
check.append(i)
elif (len(check) == 0) or ((check[-1], i) not in pairs):
return False
else:
check.pop()
return not check
| python |
class Supplier:
def __init__(self, location, frequency):
self.location = location
self.frequency = frequency | python |
import pstat
import copy
import support
from typed_math import pow, sqrt, exp, abs, fabs, log, round, pi
####################################
####### FREQUENCY STATS ##########
####################################
def itemfreq(inlist:List(float))->List(List(float)):
"""
Returns a list of pairs. Each pair consists of one of the scores in inlist
and it's frequency count. Assumes a 1D list is passed.
Usage: litemfreq(inlist)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
return pstat.abut(scores, freq)
def scoreatpercentile (inlist:List(float), percent:float)->float:
"""
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
#print("\nDividing percent>1 by 100 in lscoreatpercentile().\n")
percent = percent / 100.0
targetcf = percent*len(inlist)
h, lrl, binsize, extras = histogram(inlist,10,[0,max(inlist)])
cumhist = support.cumsum(copy.deepcopy(h))
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def percentileofscore (inlist:List(float), score:int)->float:
"""
Returns the percentile value of a score relative to the distribution
given by inlist. Formula depends on the values used to histogram the data(!).
Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None)
"""
histbins=10 #bg: was default argument
defaultlimits=[0,max(inlist)] #None #bg: was a default argument
h, lrl, binsize, extras = histogram(inlist,histbins,defaultlimits)
cumhist = support.cumsum(copy.deepcopy(h))
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100
return pct
def histogram (inlist:List(float),numbins:int,defaultreallimits:(float,float))->(List(int),float,float,int):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
printextras=0 #bg: was default argument
if (defaultreallimits != None):
if type(defaultreallimits) not in [list,tuple] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.000001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth=(max(inlist)-min(inlist))/float(numbins) +1e-6 #1=>cover all
binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =',extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def cumfreq(inlist:List(float))->(List(int),float,float,int):
"""
Returns a cumulative frequency histogram, using the histogram function.
Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
numbins=10 #bg: was optional argument
defaultreallimits=[0,max(inlist)] #None #bg# was optional argument
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
cumhist = support.cumsum(copy.deepcopy(h))
return cumhist,l,b,e
def relfreq(inlist:List(float))->(List(float),float,float,int):
"""
Returns a relative frequency histogram, using the histogram function.
Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
numbins=10 #bg: was optional argument
defaultreallimits=[0,max(inlist)] #None #bg: was optional argument
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
#bg#h=dyn(h)
h = h
for i in range(len(h)):
h[i] = h[i]/float(len(inlist))
return h,l,b,e
| python |
from app_base import *
from app_data import *
import etk_helper
@api.route('/projects/<project_name>/actions/project_config')
class ActionProjectConfig(Resource):
@requires_auth
def post(self, project_name): # frontend needs to fresh to get all configs again
if project_name not in data:
return rest.not_found('project {} not found'.format(project_name))
try:
parse = reqparse.RequestParser()
parse.add_argument('file_data', type=werkzeug.FileStorage, location='files')
args = parse.parse_args()
# save to tmp path and test
tmp_project_config_path = os.path.join(get_project_dir_path(project_name),
'working_dir/uploaded_project_config.tar.gz')
tmp_project_config_extracted_path = os.path.join(get_project_dir_path(project_name),
'working_dir/uploaded_project_config')
args['file_data'].save(tmp_project_config_path)
with tarfile.open(tmp_project_config_path, 'r:gz') as tar:
tar.extractall(tmp_project_config_extracted_path)
# master_config
with open(os.path.join(tmp_project_config_extracted_path, 'master_config.json'), 'r') as f:
new_master_config = json.loads(f.read())
# TODO: validation and sanitizing
# overwrite indices
new_master_config['index'] = {
'sample': project_name,
'full': project_name + '_deployed',
'version': 0
}
# overwrite configuration
if 'configuration' not in new_master_config:
new_master_config['configuration'] = dict()
new_master_config['configuration']['sandpaper_sample_url'] \
= data[project_name]['master_config']['configuration']['sandpaper_sample_url']
new_master_config['configuration']['sandpaper_full_url'] \
= data[project_name]['master_config']['configuration']['sandpaper_full_url']
# overwrite previous master config
data[project_name]['master_config'] = new_master_config
update_master_config_file(project_name)
# replace dependencies
distutils.dir_util.copy_tree(
os.path.join(tmp_project_config_extracted_path, 'glossaries'),
os.path.join(get_project_dir_path(project_name), 'glossaries')
)
distutils.dir_util.copy_tree(
os.path.join(tmp_project_config_extracted_path, 'spacy_rules'),
os.path.join(get_project_dir_path(project_name), 'spacy_rules')
)
distutils.dir_util.copy_tree(
os.path.join(tmp_project_config_extracted_path, 'landmark_rules'),
os.path.join(get_project_dir_path(project_name), 'landmark_rules')
)
distutils.dir_util.copy_tree(
os.path.join(tmp_project_config_extracted_path, 'working_dir/generated_em'),
os.path.join(get_project_dir_path(project_name), 'working_dir/generated_em')
)
distutils.dir_util.copy_tree(
os.path.join(tmp_project_config_extracted_path, 'working_dir/additional_ems'),
os.path.join(get_project_dir_path(project_name), 'working_dir/additional_ems')
)
# etl config
tmp_etl_config = os.path.join(tmp_project_config_extracted_path,
'working_dir/etl_config.json')
if os.path.exists(tmp_etl_config):
shutil.copyfile(tmp_etl_config, os.path.join(get_project_dir_path(project_name),
'working_dir/etl_config.json'))
# landmark
tmp_landmark_config_path = os.path.join(tmp_project_config_extracted_path,
'working_dir/_landmark_config.json')
if os.path.exists(tmp_landmark_config_path):
with open(tmp_landmark_config_path, 'r') as f:
ActionProjectConfig.landmark_import(project_name, f.read())
return rest.created()
except Exception as e:
logger.exception('fail to import project config')
return rest.internal_error('fail to import project config')
finally:
# always clean up, or some of the files may affect new uploaded files
if os.path.exists(tmp_project_config_path):
os.remove(tmp_project_config_path)
if os.path.exists(tmp_project_config_extracted_path):
shutil.rmtree(tmp_project_config_extracted_path)
def get(self, project_name):
if project_name not in data:
return rest.not_found('project {} not found'.format(project_name))
export_path = os.path.join(get_project_dir_path(project_name), 'working_dir/project_config.tar.gz')
# tarzip file
with tarfile.open(export_path, 'w:gz') as tar:
tar.add(os.path.join(get_project_dir_path(project_name), 'master_config.json'),
arcname='master_config.json')
tar.add(os.path.join(get_project_dir_path(project_name), 'glossaries'),
arcname='glossaries')
tar.add(os.path.join(get_project_dir_path(project_name), 'spacy_rules'),
arcname='spacy_rules')
tar.add(os.path.join(get_project_dir_path(project_name), 'landmark_rules'),
arcname='landmark_rules')
tar.add(os.path.join(get_project_dir_path(project_name), 'working_dir/generated_em'),
arcname='working_dir/generated_em')
tar.add(os.path.join(get_project_dir_path(project_name), 'working_dir/additional_ems'),
arcname='working_dir/additional_ems')
# etl config
etl_config_path = os.path.join(get_project_dir_path(project_name),
'working_dir/etl_config.json')
if os.path.exists(etl_config_path):
tar.add(etl_config_path, arcname='working_dir/etl_config.json')
# landmark
landmark_config = ActionProjectConfig.landmark_export(project_name)
if len(landmark_config) > 0:
landmark_config_path = os.path.join(
get_project_dir_path(project_name), 'working_dir/_landmark_config.json')
write_to_file(json.dumps(landmark_config), landmark_config_path)
tar.add(landmark_config_path, arcname='working_dir/_landmark_config.json')
export_file_name = project_name + '_' + time.strftime("%Y%m%d%H%M%S") + '.tar.gz'
ret = send_file(export_path, mimetype='application/gzip',
as_attachment=True, attachment_filename=export_file_name)
ret.headers['Access-Control-Expose-Headers'] = 'Content-Disposition'
return ret
@staticmethod
def landmark_export(project_name):
try:
url = config['landmark']['export'].format(project_name=project_name)
resp = requests.post(url)
return resp.json()
except Exception as e:
logger.exception('landmark export error')
return list()
@staticmethod
def landmark_import(project_name, landmark_config):
try:
url = config['landmark']['import'].format(project_name=project_name)
resp = requests.post(url, data=landmark_config)
except Exception as e:
logger.exception('landmark import error')
# @api.route('/projects/<project_name>/actions/etk_filters')
# class ActionProjectEtkFilters(Resource):
# @requires_auth
# def post(self, project_name):
# if project_name not in data:
# return rest.not_found('project {} not found'.format(project_name))
#
# input = request.get_json(force=True)
# filtering_rules = input.get('filters', {})
#
# try:
# # validation
# for tld, rules in filtering_rules.items():
# if tld.strip() == '' or not isinstance(rules, list):
# return rest.bad_request('Invalid TLD')
# for rule in rules:
# if 'field' not in rule or rule['field'].strip() == '':
# return rest.bad_request('Invalid Field in TLD: {}'.format(tld))
# if 'action' not in rule or rule['action'] not in ('no_action', 'keep', 'discard'):
# return rest.bad_request('Invalid action in TLD: {}, Field {}'.format(tld, rule['field']))
# if 'regex' not in rule:
# return rest.bad_request('Invalid regex in TLD: {}, Field {}'.format(tld, rule['field']))
# try:
# re.compile(rule['regex'])
# except re.error:
# return rest.bad_request(
# 'Invalid regex in TLD: {}, Field: {}'.format(tld, rule['field']))
#
# # write to file
# dir_path = os.path.join(get_project_dir_path(project_name), 'working_dir')
# if not os.path.exists(dir_path):
# os.mkdir(dir_path)
# config_path = os.path.join(dir_path, 'etk_filters.json')
# write_to_file(json.dumps(input), config_path)
# return rest.created()
# except Exception as e:
# logger.exception('fail to import ETK filters')
# return rest.internal_error('fail to import ETK filters')
#
# def get(self, project_name):
# if project_name not in data:
# return rest.not_found('project {} not found'.format(project_name))
#
# ret = {'filters': {}}
# config_path = os.path.join(get_project_dir_path(project_name),
# 'working_dir/etk_filters.json')
# if os.path.exists(config_path):
# with open(config_path, 'r') as f:
# ret = json.loads(f.read())
#
# return ret
@api.route('/projects/<project_name>/actions/<action_name>')
class Actions(Resource):
@requires_auth
def post(self, project_name, action_name):
if project_name not in data:
return rest.not_found('project {} not found'.format(project_name))
# if action_name == 'add_data':
# return self._add_data(project_name)
if action_name == 'desired_num':
return self.update_desired_num(project_name)
elif action_name == 'extract':
return self.etk_extract(project_name)
elif action_name == 'recreate_mapping':
return self.recreate_mapping(project_name)
elif action_name == 'landmark_extract':
return self.landmark_extract(project_name)
elif action_name == 'reload_blacklist':
return self.reload_blacklist(project_name)
else:
return rest.not_found('action {} not found'.format(action_name))
@requires_auth
def get(self, project_name, action_name):
if project_name not in data:
return rest.not_found('project {} not found'.format(project_name))
if action_name == 'extract':
return self._get_extraction_status(project_name)
else:
return rest.not_found('action {} not found'.format(action_name))
@requires_auth
def delete(self, project_name, action_name):
if action_name == 'extract':
if not Actions._etk_stop(project_name):
return rest.internal_error('failed to kill_etk in ETL')
return rest.deleted()
@staticmethod
def _get_extraction_status(project_name):
ret = dict()
parser = reqparse.RequestParser()
parser.add_argument('value', type=str)
args = parser.parse_args()
if args['value'] is None:
args['value'] = 'all'
if args['value'] in ('all', 'etk_status'):
ret['etk_status'] = Actions._is_etk_running(project_name)
if args['value'] in ('all', 'tld_statistics'):
tld_list = dict()
with data[project_name]['locks']['status']:
for tld in data[project_name]['status']['total_docs'].keys():
if tld not in data[project_name]['status']['desired_docs']:
data[project_name]['status']['desired_docs'][tld] = 0
if tld in data[project_name]['status']['total_docs']:
tld_obj = {
'tld': tld,
'total_num': data[project_name]['status']['total_docs'][tld],
'es_num': 0,
'es_original_num': 0,
'desired_num': data[project_name]['status']['desired_docs'][tld]
}
tld_list[tld] = tld_obj
# query es count if doc exists
query = """
{
"aggs": {
"group_by_tld_original": {
"filter": {
"bool": {
"must_not": {
"term": {
"created_by": "etk"
}
}
}
},
"aggs": {
"grouped": {
"terms": {
"field": "tld.raw",
"size": 2147483647
}
}
}
},
"group_by_tld": {
"terms": {
"field": "tld.raw",
"size": 2147483647
}
}
},
"size":0
}
"""
es = ES(config['es']['sample_url'])
r = es.search(project_name, data[project_name]['master_config']['root_name'],
query, ignore_no_index=True, filter_path=['aggregations'])
if r is not None:
for obj in r['aggregations']['group_by_tld']['buckets']:
# check if tld is in uploaded file
tld = obj['key']
if tld not in tld_list:
tld_list[tld] = {
'tld': tld,
'total_num': 0,
'es_num': 0,
'es_original_num': 0,
'desired_num': 0
}
tld_list[tld]['es_num'] = obj['doc_count']
for obj in r['aggregations']['group_by_tld_original']['grouped']['buckets']:
# check if tld is in uploaded file
tld = obj['key']
if tld not in tld_list:
tld_list[tld] = {
'tld': tld,
'total_num': 0,
'es_num': 0,
'es_original_num': 0,
'desired_num': 0
}
tld_list[tld]['es_original_num'] = obj['doc_count']
ret['tld_statistics'] = list(tld_list.values())
return ret
@staticmethod
def _is_etk_running(project_name):
url = config['etl']['url'] + '/etk_status/' + project_name
resp = requests.get(url)
if resp.status_code // 100 != 2:
return rest.internal_error('error in getting etk_staus')
return resp.json()['etk_processes'] > 0
@staticmethod
def update_desired_num(project_name):
# {
# "tlds": {
# 'tld1': 100,
# 'tld2': 200
# }
# }
input = request.get_json(force=True)
tld_list = input.get('tlds', {})
for tld, desired_num in tld_list.items():
desired_num = max(desired_num, 0)
desired_num = min(desired_num, 999999999)
with data[project_name]['locks']['status']:
if tld not in data[project_name]['status']['desired_docs']:
data[project_name]['status']['desired_docs'][tld] = dict()
data[project_name]['status']['desired_docs'][tld] = desired_num
set_status_dirty(project_name)
return rest.created()
@staticmethod
def landmark_extract(project_name):
# {
# "tlds": {
# 'tld1': 100,
# 'tld2': 200
# }
# }
input = request.get_json(force=True)
tld_list = input.get('tlds', {})
payload = dict()
for tld, num_to_run in tld_list.items():
if tld in data[project_name]['data']:
# because the catalog can be huge, can not use a simple pythonic random here
num_to_select = min(num_to_run, len(data[project_name]['data'][tld]))
selected = set()
while len(selected) < num_to_select:
cand_num = random.randint(0, num_to_select - 1)
if cand_num not in selected:
selected.add(cand_num)
# construct payload
idx = 0
for doc_id, catalog_obj in data[project_name]['data'][tld].items():
if idx not in selected:
idx += 1
continue
# payload format
# {
# "tld1": {"documents": [{doc_id, raw_content_path, url}, {...}, ...]},
# }
payload[tld] = payload.get(tld, dict())
payload[tld]['documents'] = payload[tld].get('documents', list())
catalog_obj['doc_id'] = doc_id
payload[tld]['documents'].append(catalog_obj)
idx += 1
url = config['landmark']['create'].format(project_name=project_name)
resp = requests.post(url, json.dumps(payload), timeout=10)
if resp.status_code // 100 != 2:
return rest.internal_error('Landmark error: {}'.format(resp.status_code))
return rest.accepted()
@staticmethod
def _generate_etk_config(project_name):
glossary_dir = os.path.join(get_project_dir_path(project_name), 'glossaries')
inferlink_dir = os.path.join(get_project_dir_path(project_name), 'landmark_rules')
working_dir = os.path.join(get_project_dir_path(project_name), 'working_dir')
spacy_dir = os.path.join(get_project_dir_path(project_name), 'spacy_rules')
content = etk_helper.generate_base_etk_module(
data[project_name]['master_config'],
glossary_dir=glossary_dir,
inferlink_dir=inferlink_dir,
working_dir=working_dir,
spacy_dir=spacy_dir
)
revision = hashlib.sha256(content.encode('utf-8')).hexdigest().upper()[:6]
output_path = os.path.join(get_project_dir_path(project_name),
'working_dir/generated_em', 'em_base.py'.format(revision))
archive_output_path = os.path.join(get_project_dir_path(project_name),
'working_dir/generated_em', 'archive_em_{}.py'.format(revision))
additional_ems_path = os.path.join(get_project_dir_path(project_name), 'working_dir/additional_ems')
generated_additional_ems_path = os.path.join(get_project_dir_path(project_name),
'working_dir/generated_additional_ems')
etk_helper.generated_additional_ems(additional_ems_path, generated_additional_ems_path, glossary_dir,
inferlink_dir, working_dir, spacy_dir)
write_to_file(content, output_path)
write_to_file(content, archive_output_path)
@staticmethod
def recreate_mapping(project_name):
logger.info('recreate_mapping')
# 1. kill etk (and clean up previous queue)
data[project_name]['data_pushing_worker'].stop_adding_data = True
if not Actions._etk_stop(project_name, clean_up_queue=True):
return rest.internal_error('failed to kill_etk in ETL')
# 2. create etk config and snapshot
Actions._generate_etk_config(project_name)
# add config for etl
# when creating kafka container, group id is not there. set consumer to read from start.
etl_config_path = os.path.join(get_project_dir_path(project_name), 'working_dir/etl_config.json')
if not os.path.exists(etl_config_path):
etl_config = {
"input_args": {
"auto_offset_reset": "earliest",
"fetch_max_bytes": 52428800,
"max_partition_fetch_bytes": 10485760,
"max_poll_records": 10
},
"output_args": {
"max_request_size": 10485760,
"compression_type": "gzip"
}
}
write_to_file(json.dumps(etl_config, indent=2), etl_config_path)
# 3. sandpaper
# 3.1 delete previous index
url = '{}/{}'.format(
config['es']['sample_url'],
project_name
)
try:
resp = requests.delete(url, timeout=10)
except:
pass # ignore no index error
# 3.2 create new index
url = '{}/mapping?url={}&project={}&index={}&endpoint={}'.format(
config['sandpaper']['url'],
config['sandpaper']['ws_url'],
project_name,
data[project_name]['master_config']['index']['sample'],
config['es']['sample_url']
)
resp = requests.put(url, timeout=10)
if resp.status_code // 100 != 2:
return rest.internal_error('failed to create index in sandpaper')
# 3.3 switch index
url = '{}/config?url={}&project={}&index={}&endpoint={}'.format(
config['sandpaper']['url'],
config['sandpaper']['ws_url'],
project_name,
data[project_name]['master_config']['index']['sample'],
config['es']['sample_url']
)
resp = requests.post(url, timeout=10)
if resp.status_code // 100 != 2:
return rest.internal_error('failed to switch index in sandpaper')
# 4. clean up added data status
logger.info('re-add data')
with data[project_name]['locks']['status']:
if 'added_docs' not in data[project_name]['status']:
data[project_name]['status']['added_docs'] = dict()
for tld in data[project_name]['status']['added_docs'].keys():
data[project_name]['status']['added_docs'][tld] = 0
with data[project_name]['locks']['data']:
for tld in data[project_name]['data'].keys():
for doc_id in data[project_name]['data'][tld]:
data[project_name]['data'][tld][doc_id]['add_to_queue'] = False
set_status_dirty(project_name)
# 5. restart extraction
data[project_name]['data_pushing_worker'].stop_adding_data = False
return Actions.etk_extract(project_name)
@staticmethod
def reload_blacklist(project_name):
if project_name not in data:
return rest.not_found('project {} not found'.format(project_name))
# 1. kill etk
if not Actions._etk_stop(project_name):
return rest.internal_error('failed to kill_etk in ETL')
# 2. generate etk config
Actions._generate_etk_config(project_name)
# 3. fetch and re-add data
t = threading.Thread(target=Data._reload_blacklist_worker, args=(project_name,), name='reload_blacklist')
t.start()
data[project_name]['threads'].append(t)
return rest.accepted()
@staticmethod
def _reload_blacklist_worker(project_name):
# copy here to avoid modification while iteration
for field_name, field_obj in data[project_name]['master_config']['fields'].items():
if 'blacklists' not in field_obj or len(field_obj['blacklists']) == 0:
continue
# get all stop words and generate query
# only use the last blacklist if there are multiple blacklists
blacklist = data[project_name]['master_config']['fields'][field_name]['blacklists'][-1]
file_path = os.path.join(get_project_dir_path(project_name),
'glossaries', '{}.txt'.format(blacklist))
query_conditions = []
with open(file_path, 'r') as f:
for line in f:
key = line.strip()
if len(key) == 0:
continue
query_conditions.append(
'{{ "term": {{"knowledge_graph.{field_name}.key": "{key}"}} }}'
.format(field_name=field_name, key=key))
query = """
{{
"size": 1000,
"query": {{
"bool": {{
"should": [{conditions}]
}}
}},
"_source": ["doc_id", "tld"]
}}
""".format(conditions=','.join(query_conditions))
logger.debug(query)
# init query
scroll_alive_time = '1m'
es = ES(config['es']['sample_url'])
r = es.search(project_name, data[project_name]['master_config']['root_name'], query,
params={'scroll': scroll_alive_time}, ignore_no_index=False)
if r is None:
return
scroll_id = r['_scroll_id']
Actions._re_add_docs(r, project_name)
# scroll queries
while True:
# use the es object here directly
r = es.es.scroll(scroll_id=scroll_id, scroll=scroll_alive_time)
if r is None:
break
if len(r['hits']['hits']) == 0:
break
Actions._re_add_docs(r, project_name)
Actions.etk_extract(project_name)
@staticmethod
def _re_add_docs(resp, project_name):
input_topic = project_name + '_in'
for obj in resp['hits']['hits']:
doc_id = obj['_source']['doc_id']
tld = obj['_source']['tld']
try:
logger.info('re-add doc %s (%s)', doc_id, tld)
ret, msg = Actions._publish_to_kafka_input_queue(
doc_id, data[project_name]['data'][tld][doc_id], g_vars['kafka_producer'], input_topic)
if not ret:
logger.error('Error of re-adding data to Kafka: %s', msg)
except Exception as e:
logger.exception('error in re_add_docs')
@staticmethod
def etk_extract(project_name, clean_up_queue=False):
if Actions._is_etk_running(project_name):
return rest.exists('already running')
# etk_config_file_path = os.path.join(
# get_project_dir_path(project_name), 'working_dir/etk_config.json')
# if not os.path.exists(etk_config_file_path):
# return rest.not_found('No etk config')
# recreate etk config every time
Actions._generate_etk_config(project_name)
url = '{}/{}'.format(
config['es']['sample_url'],
project_name
)
try:
resp = requests.get(url, timeout=10)
if resp.status_code // 100 != 2:
return rest.not_found('No es index')
except Exception as e:
return rest.not_found('No es index')
url = config['etl']['url'] + '/run_etk'
payload = {
'project_name': project_name,
'number_of_workers': config['etl']['number_of_workers']
}
if clean_up_queue:
payload['input_offset'] = 'seek_to_end'
payload['output_offset'] = 'seek_to_end'
resp = requests.post(url, json.dumps(payload), timeout=config['etl']['timeout'])
if resp.status_code // 100 != 2:
return rest.internal_error('failed to run_etk in ETL')
return rest.accepted()
@staticmethod
def _etk_stop(project_name, wait_till_kill=True, clean_up_queue=False):
url = config['etl']['url'] + '/kill_etk'
payload = {
'project_name': project_name
}
if clean_up_queue:
payload['input_offset'] = 'seek_to_end'
payload['output_offset'] = 'seek_to_end'
resp = requests.post(url, json.dumps(payload), timeout=config['etl']['timeout'])
if resp.status_code // 100 != 2:
logger.error('failed to kill_etk in ETL')
return False
if wait_till_kill:
while True:
time.sleep(5)
if not Actions._is_etk_running(project_name):
break
return True
@staticmethod
def _publish_to_kafka_input_queue(doc_id, catalog_obj, producer, topic):
try:
with open(catalog_obj['json_path'], 'r', encoding='utf-8') as f:
doc_obj = json.loads(f.read())
with open(catalog_obj['raw_content_path'], 'r', encoding='utf-8') as f:
doc_obj['raw_content'] = f.read() # .decode('utf-8', 'ignore')
except Exception as e:
logger.exception('error in reading file from catalog')
return False, 'error in reading file from catalog'
try:
r = producer.send(topic, doc_obj)
r.get(timeout=60) # wait till sent
logger.info('sent %s to topic %s', doc_id, topic)
except Exception as e:
logger.exception('error in sending data to kafka queue')
return False, 'error in sending data to kafka queue'
return True, ''
class DataPushingWorker(threading.Thread):
def __init__(self, project_name, sleep_interval):
super(DataPushingWorker, self).__init__()
self.project_name = project_name
self.exit_signal = False
self.stop_adding_data = False
self.is_adding_data = False
self.sleep_interval = sleep_interval
# set up input kafka
self.producer = g_vars['kafka_producer']
self.input_topic = project_name + '_in'
def get_status(self):
return {
'stop_adding_data': self.stop_adding_data,
'is_adding_data': self.is_adding_data,
'sleep_interval': self.sleep_interval
}
def run(self):
logger.info('thread DataPushingWorker running... %s', self.project_name)
while not self.exit_signal:
if not self.stop_adding_data:
self._add_data_worker(self.project_name, self.producer, self.input_topic)
# wait interval
t = self.sleep_interval * 10
while t > 0 and not self.exit_signal:
time.sleep(0.1)
t -= 1
def _add_data_worker(self, project_name, producer, input_topic):
got_lock = data[project_name]['locks']['data'].acquire(False)
try:
if not got_lock or self.stop_adding_data:
return
for tld in data[project_name]['data'].keys():
if self.stop_adding_data:
break
with data[project_name]['locks']['status']:
if tld not in data[project_name]['status']['added_docs']:
data[project_name]['status']['added_docs'][tld] = 0
if tld not in data[project_name]['status']['desired_docs']:
data[project_name]['status']['desired_docs'][tld] = \
data[project_name]['master_config'].get('default_desired_num', 0)
if tld not in data[project_name]['status']['total_docs']:
data[project_name]['status']['total_docs'][tld] = 0
added_num = data[project_name]['status']['added_docs'][tld]
total_num = data[project_name]['status']['total_docs'][tld]
desired_num = data[project_name]['status']['desired_docs'][tld]
desired_num = min(desired_num, total_num)
# only add docs to queue if desired num is larger than added num
if desired_num > added_num:
self.is_adding_data = True
# update mark in catalog
num_to_add = desired_num - added_num
added_num_this_round = 0
for doc_id in data[project_name]['data'][tld].keys():
if not self.stop_adding_data:
# finished
if num_to_add <= 0:
break
# already added
if data[project_name]['data'][tld][doc_id]['add_to_queue']:
continue
# mark data
data[project_name]['data'][tld][doc_id]['add_to_queue'] = True
num_to_add -= 1
added_num_this_round += 1
# publish to kafka queue
ret, msg = Actions._publish_to_kafka_input_queue(
doc_id, data[project_name]['data'][tld][doc_id], producer, input_topic)
if not ret:
logger.error('Error of pushing data to Kafka: %s', msg)
# roll back
data[project_name]['data'][tld][doc_id]['add_to_queue'] = False
num_to_add += 1
added_num_this_round -= 1
self.is_adding_data = False
if added_num_this_round > 0:
with data[project_name]['locks']['status']:
data[project_name]['status']['added_docs'][tld] = added_num + added_num_this_round
set_catalog_dirty(project_name)
set_status_dirty(project_name)
except Exception as e:
logger.exception('exception in Actions._add_data_worker() data lock')
finally:
if got_lock:
data[project_name]['locks']['data'].release()
class MemoryDumpWorker(threading.Thread):
def __init__(self, project_name, sleep_interval, function, kwargs=dict()):
super(MemoryDumpWorker, self).__init__()
self.project_name = project_name
self.exit_signal = False
init_time = time.time()
self.file_timestamp = init_time
self.memory_timestamp = init_time
self.sleep_interval = sleep_interval
self.function = function
self.kwargs = kwargs
def get_status(self):
return {
'sleep_interval': self.sleep_interval,
'file_timestamp': self.file_timestamp,
'memory_timestamp': self.memory_timestamp,
'is_dirty': self.file_timestamp != self.memory_timestamp
}
def run_function(self):
memory_timestamp = self.memory_timestamp
if self.file_timestamp < memory_timestamp:
self.function(**self.kwargs)
self.file_timestamp = memory_timestamp
def run(self):
logger.info('thread MemoryDumpWorker (%s) running... %s', self.function.__name__, self.project_name)
while not self.exit_signal:
self.run_function()
# wait interval
t = self.sleep_interval * 10
while t > 0 and not self.exit_signal:
time.sleep(0.1)
t -= 1
# make sure memory data is dumped
self.run_function()
def start_threads_and_locks(project_name):
data[project_name]['locks']['data'] = threading.Lock()
data[project_name]['locks']['status'] = threading.Lock()
data[project_name]['locks']['catalog_log'] = threading.Lock()
data[project_name]['data_pushing_worker'] = DataPushingWorker(
project_name, config['data_pushing_worker_backoff_time'])
data[project_name]['data_pushing_worker'].start()
data[project_name]['status_memory_dump_worker'] = MemoryDumpWorker(
project_name, config['status_memory_dump_backoff_time'],
update_status_file, kwargs={'project_name': project_name})
data[project_name]['status_memory_dump_worker'].start()
data[project_name]['catalog_memory_dump_worker'] = MemoryDumpWorker(
project_name, config['catalog_memory_dump_backoff_time'],
update_catalog_file, kwargs={'project_name': project_name})
data[project_name]['catalog_memory_dump_worker'].start()
def stop_threads_and_locks(project_name):
try:
data[project_name]['data_pushing_worker'].exit_signal = True
data[project_name]['data_pushing_worker'].join()
data[project_name]['status_memory_dump_worker'].exit_signal = True
data[project_name]['status_memory_dump_worker'].join()
data[project_name]['catalog_memory_dump_worker'].exit_signal = True
data[project_name]['catalog_memory_dump_worker'].join()
logger.info('threads of project %s exited', project_name)
except:
pass
| python |
import json
from ..customlogging import CustomLog
class Tradier(object):
def __init__(self, httpclient, httpclient_streaming, token):
self.httpclient_streaming = httpclient_streaming
self.streams = Tradier.Streams(self)
self.httpclient = httpclient
self.token = token
self.user = Tradier.User(self)
self.accounts = Tradier.Accounts(self)
self.markets = Tradier.Markets(self)
self.fundamentals = Tradier.Fundamentals(self)
self.options = Tradier.Options(self)
self.watchlists = Tradier.Watchlists(self)
def request_streaming(
self,
method,
path,
headers=None,
params=None,
data=None,
callback=None):
log_msg = "callback", callback
headers = headers or {}
headers['Authorization'] = 'Bearer %s' % self.token
headers['Accept'] = 'application/json'
def base_callback(response):
if response.code != 200:
raise Exception(response.code, response.body)
return json.loads(response.body)
if callback == None:
cb = base_callback
else:
cb = lambda x: callback(base_callback(x))
log_msg = cb # <function <lambda> at 0x10a620b18>
log_msg = method # GET/POST
log_msg = path # markets/events/session
log_msg = headers # {'Accept': 'application/json', 'Authorization': u'Bearer JmIr55aKnCmigEeEsClRnUvMtPEK'}
log_msg = params # None
log_msg = data # None
return self.httpclient_streaming.request(
cb,
method,
path,
headers=headers,
params=params,
data=data)
def request(
self,
method,
path,
headers=None,
params=None,
data=None,
callback=None):
headers = headers or {}
headers['Authorization'] = 'Bearer %s' % self.token
headers['Accept'] = 'application/json'
def base_callback(response):
if response.code != 200:
raise Exception(response.code, response.body)
return json.loads(response.body)
if callback == None:
cb = base_callback
else:
cb = lambda x: callback(base_callback(x))
log_msg = cb # <function <lambda> at 0x10a620b18>
log_msg = method # GET
log_msg = path # markets/events/session
log_msg = headers # {'Accept': 'application/json', 'Authorization': u'Bearer JmIr55aKnCmigEeEsClRnUvMtPEK'}
log_msg = params # None
log_msg = data # None
return self.httpclient.request(
cb,
method,
path,
headers=headers,
params=params,
data=data)
class Streams(object):
# TESTING
def __init__(self, agent):
self.log = CustomLog()
self.agent = agent
def auth(self):
# Get the sessionid required for connecting to the stream
results = self.agent.request('POST', 'markets/events/session')
self.log.debug("Results: ".center(10, "-"))
self.log.debug(results)
return results['stream']['sessionid'].encode()
def start_stream(self, symbols):
def callback(response):
quote = response['quotes'].get('quote', [])
if not isinstance(quote, list):
quote = [quote]
return quote
# We're getting a stream with a POST
sessionid = self.auth()
log_msg = sessionid
response = self.agent.request_streaming(
'POST',
'markets/events',
params= \
{
'sessionid': sessionid,
'symbols': ','.join(x.upper() for x in symbols),
'filter': 'quote'
},
callback=callback)
return response
class User(object):
def __init__(self, agent):
self.agent = agent
def profile(self):
response = self.agent.request('GET', 'user/profile')
return response
def balances(self):
response = self.agent.request('GET', 'user/balances')
return response
class Accounts(object):
def __init__(self, agent):
self.agent = agent
def orders(self, account_id):
response = self.agent.request(
'GET', 'accounts/%s/orders' % account_id)
return response['orders']['order']
def order(self, account_id, order_id):
response = self.agent.request(
'GET', 'accounts/%s/orders/%s' % (account_id, order_id))
return response
class Markets(object):
def __init__(self, agent):
self.agent = agent
def quotes(self, symbols):
def callback(response):
quote = response['quotes'].get('quote', [])
if not isinstance(quote, list):
quote = [quote]
return quote
return self.agent.request(
'GET',
'markets/quotes',
params={'symbols': ','.join(symbols)},
callback=callback)
class Fundamentals(object):
def __init__(self, agent):
self.agent = agent
def calendars(self, symbols):
def callback(response):
return response
return self.agent.request(
'GET',
'markets/fundamentals/calendars',
params={'symbols': ','.join(x.upper() for x in symbols)},
callback=callback)
class Options(object):
def __init__(self, agent):
self.agent = agent
def expirations(self, symbol):
return self.agent.request(
'GET',
'markets/options/expirations',
params={'symbol': symbol},
callback=(lambda x: x['expirations']['date']))
def chains(self, symbol, expiration):
def callback(response):
if response['options']:
return response['options']['option']
return []
return self.agent.request(
'GET',
'markets/options/chains',
params={'symbol': symbol, 'expiration': expiration},
callback=callback)
class Watchlists(object):
def __init__(self, agent):
self.agent = agent
def __call__(self):
response = self.agent.request('GET', 'watchlists')
return response['watchlists']['watchlist']
def get(self, watchlist_id):
response = self.agent.request(
'GET', 'watchlists/%s' % watchlist_id)
return response['watchlist']
def create(self, name, *symbols):
response = self.agent.request(
'POST',
'watchlists',
params={'name': name, 'symbols': ','.join(list(symbols))})
return response['watchlist']
def delete(self, watchlist_id):
response = self.agent.request(
'DELETE', 'watchlists/%s' % watchlist_id)
return response['watchlists']['watchlist']
| python |
r"""
Core collapse supernova explosion engines: explodability as a function of
progenitor mass in solar masses as reported by the Sukhbold et al. (2016) [1]_
models.
**Signature**: from vice.yields.ccsne.engines import S16
.. versionadded:: 1.2.0
.. tip:: Instances of the ``engine`` class can be passed the keyword argument
``explodability`` to ``vice.yields.ccsne.fractional`` to calculate
IMF-averaged yields assuming a particular black hole landscape. The impact
of these assumptions is explored in Griffith et al. (2021) [2]_.
.. note:: For all explosion engines, progenitors with zero age main sequence
masses between 9 and 12 :math:`M_\odot` proceed according to the Z9.6
engine, while remaining masses explode or collapse according to the
associated engine. (See: Section 2.2.2 of Sukhbold et al. 2016)
Contents
--------
N20 : ``engine``
An engine characterized by the N20 explosion model.
S19p8 : ``engine``
An engine characterized by the S19p8 explosion model.
W15 : ``engine``
An engine characterized by the W15 explosion model.
W18 : ``engine``
An engine characterized by the W18 explosion model.
W20 : ``engine``
An engine characterized by the W20 explosion model.
.. [1] Sukhbold et al. (2016), ApJ, 821, 38
.. [2] Griffith et al. (2021), arxiv:2103.09837
"""
from __future__ import absolute_import
try:
__VICE_SETUP__
except NameError:
__VICE_SETUP__ = False
if not __VICE_SETUP__:
__all__ = ["N20", "S19p8", "W15", "W18", "W20", "test"]
from .N20 import N20
from .S19p8 import S19p8
from .W15 import W15
from .W18 import W18
from .W20 import W20
from .tests import test
# Instances of derived classes rather than derived classes themselves
N20 = N20()
S19p8 = S19p8()
W15 = W15()
W18 = W18()
W20 = W20()
else: pass
| python |
#!/usr/bin/env python
# this script should work with almost any python version, I think
import argparse
import glob
import json
def get_replacement_lines():
replacements = []
for file in glob.glob('./json/*.json'):
with open(file) as fp:
data = json.load(fp)
value = list(data.values())[0]
tags = value['bottle']['tags']
os_name = list(tags.keys())[0]
sha256 = tags[os_name]['sha256']
replacements.append(f' sha256 "{sha256}" => :{os_name}\n')
return replacements
def main():
parser = argparse.ArgumentParser()
parser.add_argument('formula_path')
parsed = parser.parse_args()
path = parsed.formula_path
replacements = get_replacement_lines()
assert len(replacements) > 0, 'No replacement lines found!'
to_emit = []
replaced = False
with open(path) as fp:
for line in fp:
if line.startswith(' # bottle hashes + versions go here'):
to_emit.extend(replacements)
replaced = True
else:
to_emit.append(line)
assert replaced, 'Never found the magic line to replace!'
with open(path, 'w') as fp:
fp.write(''.join(to_emit))
if __name__ == '__main__':
main()
| python |
from .models import Category
def common(request):
category=Category.objects.all()
context={
'category':category
}
return context | python |
#!/usr/bin/env python
import time
import argparse
import hashlib,binascii
import krbKeyCrack
import krbKeyGenerate
if __name__ == '__main__':
# Command line arguments
parser = argparse.ArgumentParser(description="Kerberos POC Benchmark")
parser.add_argument('wordlist', nargs='?', default = "/usr/share/wordlists/rockyou.txt", help='Input wordlist')
args = parser.parse_args()
if not args.wordlist:
parser.print_help()
sys.exit(2)
# Setup Static Info
username = "normal"
password = "password1"
domain = "internal.corp".upper() # case sensitive
wordlist = args.wordlist
# Generate Kerberos Keys
keys = krbKeyGenerate.main(username, password, domain, None)
# Loop through Keys and Record Time
for key in reversed(keys):
ts = time.time()
krbKeyCrack.main(wordlist, key)
te = time.time()
elapsed_time = te - ts
print "[+] Elapsed Time: %s\n" % str(elapsed_time)
| python |
default_app_config = "BICAPweb.apps.BICAPwebConfig"
| python |
import torch
import torch.nn as nn
import torch.nn.functional as F
class DQNetwork(nn.Module):
"""My Deep Q Network"""
# Go for an architecture that worked for the lunar lander mini project
# Had a simple architecture with two dropout layers.
def __init__( self, state_size, action_size, seed, fc_units = (128, 64, 32) ):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc_units ( tuple(int), dim = (3) ): Hidden Layers one to four: number of neurons
"""
super(DQNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc_units[0])
self.dr1 = nn.Dropout(p=0.3)
self.fc2 = nn.Linear(fc_units[0], fc_units[1])
self.dr2 = nn.Dropout(p=0.1)
self.fc3 = nn.Linear(fc_units[1], fc_units[2])
self.fc4 = nn.Linear(fc_units[2], action_size)
# Define forward propagation through the network
def forward(self, state):
"""Build a network that maps state -> action values."""
x = F.relu(self.fc1(state))
x = self.dr1(x)
x = F.relu(self.fc2(x))
x = self.dr2(x)
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
| python |
from __future__ import print_function, division
import numpy as np
import pandas as pd
import datetime
NAN = object()
def add_dal_fields(in_path, out_path):
ra = np.load(in_path)['x']
names = ra.dtype.names
columns = {nm : ra[nm] for nm in names}
df = pd.DataFrame(columns)
dates = []
dates = [(NAN if np.isnan(x) else datetime.datetime.utcfromtimestamp(x).strftime("%Y-%m-%d %H:%M:%S"))
for x in df['timestamp']]
df['date'] = dates
df['SOG'] = df.speed
df['LONGITUDE'] = df.lon
df['LATITUDE'] = df.lat
df['MMSI'] = df.mmsi
# We aren't using the distshore in either model so set to large value for comparision
df['distshore'] = 10000
mask = np.array([(x is not NAN) for x in dates])
df = df[mask]
times = [datetime.datetime.utcfromtimestamp(x).strftime("%Y%m%d_%H%M%OS")
for x in df['timestamp']]
df["TIME"] = times
#
df.to_csv(out_path)
for in_path, out_path in [("trawl", "trawler"),
("ps", "purse_seine"),
("longliner", "longliner")]:
print(in_path)
add_dal_fields("datasets/kristina_{}.measures.npz".format(in_path),
"datasets/kristina_{}.measures.from_npz.csv".format(out_path))
| python |
from django import forms
from django.forms import formset_factory
class UserRegistrationForm(forms.Form):
username = forms.CharField(
required = True,
label = 'Username',
max_length = 32
)
email = forms.CharField(
required = True,
label = 'Email',
max_length = 32,
)
password = forms.CharField(
required = True,
label = 'Password',
max_length = 32,
widget = forms.PasswordInput()
)
newsletter_signup = forms.BooleanField(
required = False,
label = 'Would you like to receive occasional emails?',
widget = forms.CheckboxInput()
)
class MorselCreationForm(forms.Form):
start_time = forms.DateTimeField(
required = False,
label = 'Start Time and Date',
widget = forms.DateTimeInput(attrs={'placeholder': 'MM//DD/YY HH:MM'})
)
end_time = forms.DateTimeField(
required = False,
label = 'End Time and Date',
widget = forms.DateTimeInput(attrs={'placeholder': 'MM//DD/YY HH:MM'})
)
name = forms.CharField(
required = False,
label = 'Morsel Name',
max_length = 200,
widget = forms.TextInput(attrs={'placeholder' : 'Name your hunt...'})
)
welcome_text = forms.CharField(
required = False,
label = 'Welcome Message',
max_length = 200,
widget = forms.TextInput(attrs={'placeholder' : 'Greetings, instructions and dragons!'})
)
completed_text = forms.CharField(
required = False,
label = 'Goodbye Message',
max_length = 200,
widget = forms.TextInput(attrs={'placeholder' : 'Be nice, say thank you to your players!'})
)
public_enabled = forms.BooleanField(
required = False,
label = 'Do you want to make this Crumble public? (Anyone will be able to join)',
widget = forms.CheckboxInput()
)
class QuestionAnswerCreationForm(forms.Form):
question_text = forms.CharField(
required = False,
label = 'Question',
max_length = 200,
widget = forms.TextInput(attrs={'placeholder' : 'Ask something fun!'})
)
answer_text = forms.CharField(
required = False,
label = 'Answer',
max_length = 200,
widget = forms.TextInput(attrs={'placeholder' : 'and the answer is...'})
)
class NewsletterSignupForm(forms.Form):
email = forms.CharField(
required = False
) | python |
"""
Static Data extractor
extract_human_gene_orthologues:
extract_phenotyping_centres:
extract_ontology_terms:
"""
import os
from typing import List
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.types import StructType, StructField, StringType, ArrayType
from owlready2 import get_ontology, Ontology, onto_path, ThingClass, Nothing, Thing, IRIS
from impc_etl.shared import utils
from impc_etl.shared.utils import convert_to_row
from impc_etl.config import OntologySchema
def extract_human_gene_orthologues(spark_session: SparkSession, file_path: str) -> DataFrame:
"""
:param spark_session:
:param file_path:
:return human_gene_orthologues_df: Dataframe with the human gene to mouse gene mapping
"""
file_string_fields = ['Human Marker Symbol', 'Human Entrez Gene ID', 'HomoloGene ID',
'Mouse Marker Symbol', 'MGI Marker Accession ID']
file_array_fields = ['High-level Mammalian Phenotype ID']
schema_fields = [StructField(field_name, StringType(), True) for field_name in
file_string_fields]
schema_fields.extend(
[StructField(field_name, ArrayType(StringType), True) for field_name in file_array_fields])
hmd_file_schema = StructType(schema_fields)
human_gene_orthologues_df = utils.extract_tsv(spark_session, file_path, hmd_file_schema)
return human_gene_orthologues_df
def extract_phenotyping_centres(spark_session: SparkSession, file_path: str) -> DataFrame:
"""
:param spark_session:
:param file_path:
:return:
"""
phenotyping_centres_df = utils.extract_tsv(spark_session, file_path)
return phenotyping_centres_df
def extract_ontology_terms(spark_session: SparkSession, ontologies_path: str) -> DataFrame:
"""
:param spark_session:
:param ontologies_path:
:return:
"""
directory = os.fsencode(ontologies_path)
ontology_terms = []
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".owl"):
onto_path.append(os.path.join(directory, filename))
ontology = get_ontology(None).load()
ontology_terms.extend(parse_ontology(ontology))
ontology_terms_df = spark_session.createDataFrame(
convert_to_row(term) for term in ontology_terms)
return ontology_terms_df
def parse_ontology(ontology: Ontology, schema=OntologySchema) -> List[dict]:
"""
Parse an ontology from owlready2.Ontology to a list of dicts with
the domain fields forOntologyTerm
By default it use the OBO Schema for the definition and synonyms annotations.
:param ontology: owlready2.Ontology to parse
:param schema: schema class extending OntologySchema
:return ontology_terms: list of dicts containing ontology terms
"""
ontology_terms = []
for ontology_class in ontology.classes():
ontology_id = ontology.name
ontology_term_id = ontology_class.name
term_label = ontology_class.label
term_definition = _collect_annotations(ontology_class, [schema.DEFINITION_ANNOTATION])
synonyms = _collect_annotations(ontology_class, schema.SYNONYM_ANNOTATIONS)
parents = [str(parent.name) for parent in ontology_class.is_a if
isinstance(parent, ThingClass)]
children = [str(child.name) for child in ontology_class.subclasses() if
isinstance(child, ThingClass)]
ontology_term = {
'ontologyId': ontology_id,
'ontologyTermId': ontology_term_id,
'label': term_label,
'description': term_definition,
'synonyms': synonyms,
'parents': parents,
'children': children
}
ontology_terms.append(ontology_term)
return ontology_terms
def _collect_annotations(ontology_class: ThingClass, annotation_iris: List[str]):
"""
Collects the values for one or several annotations for one specific class
:param ontology_class: owlready2.ThingClass
:param annotation_iris: list of annotation iris
:return annotations_values: list of values for the input annotations
"""
annotation_values = []
for annotation_iri in annotation_iris:
if IRIS[annotation_iri] is None or ontology_class in (Nothing, Thing):
continue
annotation_values.extend(IRIS[annotation_iri][ontology_class])
return annotation_values
| python |
# Generated by Django 3.2.7 on 2021-10-28 15:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("kite_runner", "0003_article_tag"),
]
operations = [
migrations.AddField(
model_name="profile",
name="favourites",
field=models.ManyToManyField(
related_name="favourited_by", to="kite_runner.Article"
),
),
]
| python |
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxAncestorDiff(self, root: TreeNode) -> int:
ans = [0]
def dfs(node, a, b):
if node:
a, b = min(a, node.val), max(b, node.val)
ans[0] = max(ans[0], b - a)
dfs(node.left, a, b)
dfs(node.right, a, b)
dfs(root, root.val, root.val)
return ans[0]
| python |
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import libs.model_common
# X:(M,T,N,N)=>(M*N,T,N), Y:(M,N,N)=>(M*N,N)
def placeholder(T, F_in, F_out):
samples = tf.compat.v1.placeholder(shape = (None,T, F_in), dtype = tf.float32,name="samples")
labels = tf.compat.v1.placeholder(shape = (None, F_out), dtype = tf.float32,name="lables")
return labels, samples
# X=(B,T,F)
def Model(args, mean, std, X, F_out):
output = libs.model_common.multi_lstm(X, args.units, type=args.RNN_Type) #(B,F)
# output = libs.model_common.multi_fc(output)
outputs = libs.model_common.multi_targets(output, std, mean, F_out)
return outputs
| python |
import os
import sys
import glob
import math
import collections
import itertools
import torch
from abc import ABC, abstractproperty
from deepsplines.datasets import init_dataset
from deepsplines.dataloader import DataLoader
from deepsplines.ds_utils import size_str
from deepsplines.ds_utils import dict_recursive_merge, flatten_structure
from deepsplines.ds_utils import json_load, json_dump
class Project(ABC):
train_results_json_filename = 'train_results.json'
test_results_json_filename = 'test_results.json'
train_sorting_key = 'latest_valid_acc'
test_sorting_key = 'test_acc'
def __init__(self, params, user_params):
self.params = params
self.user_params = user_params
self.training = (self.params["mode"] == 'train')
self.log_dir_model = os.path.join(self.params["log_dir"],
self.params["model_name"])
self.best_train_acc = 0.
self.best_valid_acc = 0.
if self.training:
self.start_epoch, self.global_step = 0, 0
self.dataset = init_dataset(**self.params['dataset'])
self.init_dataloader()
def init_dataloader(self):
"""
Initialize dataloader.
"""
# Load the data
print('\n==> Loading the data...')
self.dataloader = DataLoader(self.dataset, **self.params['dataloader'])
self.trainloader, self.validloader = \
self.dataloader.get_train_valid_loader()
self.testloader = self.dataloader.get_test_loader()
self.save_train_info()
def save_train_info(self):
""" """
assert (self.trainloader is not None)
if self.dataset.is_user_dataset is True:
self.num_train_samples = sum(
inputs.size(0) for inputs, _ in self.trainloader)
else:
self.num_train_samples = len(self.trainloader.sampler)
self.num_train_batches = \
math.ceil(self.num_train_samples / self.dataloader.batch_size)
# TODO: do this just with the model and optimizer states
@abstractproperty
def net(self):
pass
@abstractproperty
def main_optimizer(self):
pass
@abstractproperty
def main_scheduler(self):
pass
@abstractproperty
def aux_optimizer(self):
pass
@abstractproperty
def aux_scheduler(self):
pass
def init_log(self):
"""
Create Log directory for training the model as
self.params["log_dir"]/self.params["model_name"].
"""
if not os.path.isdir(self.log_dir_model):
os.makedirs(self.log_dir_model)
def init_device(self):
""" """
if self.params['device'].startswith('cuda'):
if torch.cuda.is_available():
self.device = 'cuda:0' # Using GPU0 by default
print('\nUsing GPU.')
else:
self.device = 'cpu'
print('\nCUDA not available. Using CPU.')
else:
self.device = 'cpu'
print('\nUsing CPU.')
@property
def results_json_filename(self):
"""
Name of json file with logged results.
"""
if self.training is True:
return self.train_results_json_filename
else:
return self.test_results_json_filename
@property
def sorting_key(self):
"""
Key for sorting models in json file.
"""
if self.training:
return self.train_sorting_key
else:
return self.test_sorting_key
def init_json(self):
"""
Init json file for train/test results.
"""
# initialize/verify json log file
self.results_json = os.path.join(self.params['log_dir'],
self.results_json_filename)
if not os.path.isfile(self.results_json):
results_dict = {}
else:
results_dict = json_load(self.results_json)
if self.params['model_name'] not in results_dict:
# initialize model log
results_dict[self.params['model_name']] = {}
# add minimal information for sorting models in results_json file
if self.sorting_key not in results_dict[self.params['model_name']]:
results_dict[self.params['model_name']][self.sorting_key] = 0.
json_dump(results_dict, self.results_json)
comb_list = list(
itertools.product(['latest', 'best'], ['train', 'valid'],
['acc', 'loss']))
self.info_list = ['_'.join(k)
for k in comb_list] + ['test_acc', 'test_loss']
def update_json(self, info, value):
"""
Update json file with latest/best validation/test accuracy/loss,
if training, and with test accuracy otherwise.
Args:
info (str):
e.g. 'latest_valid_loss', 'best_train_acc'.
value (float):
value for the given info.
"""
assert info in self.info_list, \
f'{info} should be in {self.info_list}...'
# save in json
results_dict = json_load(self.results_json)
if isinstance(value, dict):
if info not in self.params["model_name"]:
results_dict[self.params["model_name"]][info] = {}
for key, val in value.items():
results_dict[self.params["model_name"]][info][key] = \
float('{:.3f}'.format(val))
else:
results_dict[self.params["model_name"]][info] = \
float('{:.3f}'.format(value))
sorted_acc = sorted(results_dict.items(),
key=lambda kv: kv[1][self.sorting_key],
reverse=True)
sorted_results_dict = collections.OrderedDict(sorted_acc)
json_dump(sorted_results_dict, self.results_json)
@property
def load_ckpt(self):
"""
Returns True if loading a checkpoint and restoring its parameters,
for resuming training or testing a model. Otherwise, returns False.
"""
if (self.params["ckpt_filename"]
is not None) or (self.params["resume"] is True):
return True
else:
return False
def restore_ckpt_params(self):
"""
Attempts to restore a checkpoint if resuming training or testing
a model.
If successful, it gets the loaded checkpoint and merges the saved
parameters.
Returns True if a checkpoint was successfully loaded,
and False otherwise.
"""
if self.params["ckpt_filename"] is not None:
try:
self.load_merge_params(self.params["ckpt_filename"])
except FileNotFoundError:
print('\nCheckpoint file not found... Unable to '
'restore model.\n')
raise
except BaseException:
print('\nUnknown error in restoring model.')
raise
print('\nSuccessfully loaded ckpt ' + self.params["ckpt_filename"])
return True
elif self.params["resume"] is True:
log_dir_model = os.path.join(self.params["log_dir"],
self.params["model_name"])
if self.params["resume_from_best"] is True:
regexp_ckpt = os.path.join(log_dir_model,
'*_best_valid_acc.pth')
else:
regexp_ckpt = os.path.join(log_dir_model, '*_net_*.pth')
files = glob.glob(regexp_ckpt)
# sort by time from oldest to newest
files.sort(key=os.path.getmtime)
if len(files) > 0:
print('\nRestoring model from {}.'.format(files[-1]))
# restore from most recent file
self.load_merge_params(files[-1])
return True
else:
print('\nNo model saved to resume training. '
'Starting from scratch.')
return False
else:
print('\nStarting from scratch.')
return False
def load_merge_params(self, ckpt_filename):
"""
Load and merge the parameters from ckpt_filename into self.params
and save the loaded checkpoint (dictionary).
The parameters introduced by the user (via command-line arguments)
override the corresponding saved parameters. The ones not specified
by the user, are loaded from the checkpoint.
Args:
ckpt_filename (str): Name of checkpoint (.pth) file.
"""
torch.load(ckpt_filename, map_location=lambda storage, loc: storage)
ckpt = self.get_loaded_ckpt(ckpt_filename)
self.loaded_ckpt = ckpt # save loaded_ckpt for restore_model
saved_params = ckpt['params']
# merge w/ saved params
self.params = dict_recursive_merge(self.params, saved_params)
# merge w/ user params (precedence over saved)
self.params = dict_recursive_merge(self.params, self.user_params)
def restore_model(self):
""" """
self.load_model(self.loaded_ckpt)
if self.training and self.start_epoch == self.params["num_epochs"]:
print('\nTraining in this checkpoint is already completed. '
'Please increase the number of epochs.')
sys.exit()
def load_model(self, ckpt):
"""
Load model from a loaded checkpoint.
Args:
ckpt (dictionary): loaded checkpoint.
"""
print('\n==> Resuming from checkpoint...')
self.net.load_state_dict(ckpt['model_state'],
strict=(self.training is True))
self.best_train_acc = ckpt['best_train_acc']
self.best_valid_acc = ckpt['best_valid_acc']
if self.training:
self.start_epoch = ckpt['num_epochs_finished']
self.global_step = ckpt['global_step']
self.main_optimizer.load_state_dict(ckpt['main_optimizer_state'])
if ckpt['aux_optimizer_state'] is not None:
self.aux_optimizer.load_state_dict(ckpt['aux_optimizer_state'])
if 'main_scheduler_state' in ckpt:
self.main_scheduler.load_state_dict(
ckpt['main_scheduler_state'])
if ckpt['aux_scheduler_state'] is not None:
self.aux_scheduler.load_state_dict(
ckpt['aux_scheduler_state'])
return
@staticmethod
def get_loaded_ckpt(ckpt_filename):
"""
Returns a loaded checkpoint (ckpt dictionary)
from ckpt_filename, if it exists.
Args:
ckpt_filename (str): Name of checkpoint (.pth) file.
"""
try:
# TODO: Check if model is always loaded on cpu.
# Use net.to(device) after.
ckpt = torch.load(ckpt_filename,
map_location=lambda storage, loc: storage)
except FileNotFoundError:
print('\nCheckpoint file not found... Unable '
'to load checkpoint.\n')
raise
except BaseException:
print('\nUnknown error in loading checkpoint parameters.')
raise
return ckpt
@classmethod
def load_ckpt_params(cls, ckpt_filename, flatten=False):
"""
Returns the ckpt dictionary and the parameters saved
in a checkpoint file.
Args:
ckpt_filename (str):
Name of checkpoint (.pth) file.
flatten (bool):
whether to flatten the structure of the parameters dictionary
into a single level
(see structure in struct_default_values.py).
"""
ckpt = cls.get_loaded_ckpt(ckpt_filename)
params = ckpt['params']
if flatten is True:
params = flatten_structure(params)
return ckpt, params
@staticmethod
def get_ckpt_from_log_dir_model(log_dir_model):
"""
Get last ckpt from log_dir_model (log_dir/model_name).
"""
regexp_ckpt = os.path.join(log_dir_model, '*_net_*.pth')
files = glob.glob(regexp_ckpt)
files.sort(key=os.path.getmtime) # sort by time from oldest to newest
if len(files) > 0:
ckpt_filename = files[-1]
print(f'Restoring {ckpt_filename}')
return ckpt_filename
else:
print(f'No ckpt found in {log_dir_model}...')
return None
@classmethod
def load_results_dict(cls, log_dir, mode='train'):
"""
Load train or test results from the corresponding
json file in log_dir.
Args:
log_dir (str):
log directory where results json file is located.
mode (str):
'train' or 'test'.
Returns:
results_dict (dict): dictionary with train/test results.
"""
assert mode in ['train', 'test'], 'mode should be "train" or "test"...'
if mode == 'train':
results_json_filename = cls.train_results_json_filename
else:
results_json_filename = cls.test_results_json_filename
results_json = os.path.join(log_dir, results_json_filename)
results_dict = json_load(results_json)
return results_dict
@classmethod
def dump_results_dict(cls, results_dict, log_dir, mode='train'):
"""
Dump results dictionary in the train or test results json file
in log_dir.
Args:
results_dict (dict):
dictionary with train/test results.
log_dir (str):
log directory where results json file is located.
mode (str):
'train' or 'test'.
"""
assert mode in ['train', 'test'], 'mode should be "train" or "test"...'
if mode == 'train':
results_json_filename = cls.train_results_json_filename
else:
results_json_filename = cls.test_results_json_filename
results_json = os.path.join(log_dir, results_json_filename)
json_dump(results_dict, results_json)
@classmethod
def get_best_model(cls, log_dir, mode='train'):
"""
Get the name and checkpoint filename of the best model
(best validation/test) from the train/test results json file.
Args:
log_dir (str):
log directory where results json file is located.
mode (str):
'train' or 'test'.
"""
results_dict = cls.load_results_dict(log_dir, mode)
# models are ordered by validation accuracy; choose first one.
best_model_name = next(iter(results_dict))
log_dir_best_model = os.path.join(log_dir, best_model_name)
ckpt_filename = cls.get_ckpt_from_log_dir_model(log_dir_best_model)
return best_model_name, ckpt_filename
def train_log_step(self, epoch, batch_idx, train_acc, losses_dict):
"""
Log the training.
Args:
epoch (int):
current epoch.
batch_idx (int):
current batch.
train_acc (float):
computed train accuracy.
losses_dict (dict):
A dictionary of the form {loss name (str) : loss value (float)}
"""
print('[{:3d}, {:6d} / {:6d}] '.format(epoch + 1, batch_idx + 1,
self.num_train_batches),
end='')
for key, value in losses_dict.items():
print('{}: {:7.3f} | '.format(key, value), end='')
print('train acc: {:7.3f}%'.format(train_acc))
self.update_json('latest_train_loss', losses_dict)
self.update_json('latest_train_acc', train_acc)
if train_acc > self.best_train_acc:
self.best_train_acc = train_acc
self.update_json('best_train_acc', train_acc)
def valid_log_step(self, epoch, valid_acc, losses_dict):
"""
Log the validation.
Args:
epoch (int):
current epoch.
valid_acc (float):
computed validation accuracy.
losses_dict (dict):
A dictionary of the form {loss name (str) : loss value (float)}
"""
print('\nvalidation_step : ', end='')
for key, value in losses_dict.items():
print('{}: {:7.3f} | '.format(key, value), end='')
print('valid acc: {:7.3f}%'.format(valid_acc), '\n')
self.update_json('latest_valid_loss', losses_dict)
self.update_json('latest_valid_acc', valid_acc)
if valid_acc > self.best_valid_acc:
self.best_valid_acc = valid_acc
self.update_json('best_valid_acc', valid_acc)
def ckpt_log_step(self, epoch, valid_acc):
"""
Save the model in a checkpoint.
Only allow at most params['ckpt_nmax_files'] checkpoints.
Delete the oldest checkpoint, if necessary.
Also log the best results so far in a separate checkpoint.
Args:
epoch (int):
current epoch.
valid_acc (float):
computed validation accuracy.
"""
base_ckpt_filename = os.path.join(
self.log_dir_model,
self.params["model_name"] + '_net_{:04d}'.format(epoch + 1))
regexp_ckpt = os.path.join(self.log_dir_model, "*_net_*.pth")
regexp_best_valid_acc_ckpt = os.path.join(self.log_dir_model,
"*_best_valid_acc.pth")
# save checkpoint as *_net_{epoch+1}.pth
ckpt_filename = base_ckpt_filename + '.pth'
# remove best_valid_acc ckpt from files
files = list(
set(glob.glob(regexp_ckpt)) -
set(glob.glob(regexp_best_valid_acc_ckpt)))
# sort from newest to oldest
files.sort(key=os.path.getmtime, reverse=True)
if (not self.params["ckpt_nmax_files"] < 0) and \
(len(files) >= self.params["ckpt_nmax_files"]):
assert len(files) == (self.params["ckpt_nmax_files"]), \
'There are more than (ckpt_nmax_files+1) ' \
'*_net_*.pth checkpoints.'
filename = files[-1]
os.remove(filename)
self.save_network(ckpt_filename, epoch, valid_acc)
if valid_acc == self.best_valid_acc:
# if valid_acc = best_valid_acc, also save checkpoint as
# *_net_{global_step}_best_valid_acc.pth
# and delete previous best_valid_acc checkpoint
best_valid_acc_ckpt_filename = \
base_ckpt_filename + '_best_valid_acc.pth'
files = glob.glob(regexp_best_valid_acc_ckpt)
if len(files) > 0:
assert len(files) == 1, \
'More than one *_best_valid_acc.pth checkpoint.'
os.remove(files[0])
self.save_network(best_valid_acc_ckpt_filename, epoch, valid_acc)
return
def save_network(self, ckpt_filename, epoch, valid_acc):
"""
Save the network in a checkpoint.
Args:
ckpt_filename (str):
Name of checkpoint (.pth) file.
epoch (int):
current epoch.
valid_acc (float):
computed validation accuracy.
"""
state = {
'model_state': self.net.state_dict(),
'main_optimizer_state': self.main_optimizer.state_dict(),
'main_scheduler_state': self.main_scheduler.state_dict(),
'params': self.params,
'best_train_acc': self.best_train_acc,
'best_valid_acc': self.best_valid_acc,
'valid_acc': valid_acc,
'num_epochs_finished': epoch + 1,
'global_step': self.global_step
}
if self.aux_optimizer is not None:
state['aux_optimizer_state'] = self.aux_optimizer.state_dict()
state['aux_scheduler_state'] = self.aux_scheduler.state_dict()
else:
state['aux_optimizer_state'] = None
state['aux_scheduler_state'] = None
torch.save(state, ckpt_filename)
return
def print_train_info(self):
""" """
assert (self.validloader is not None)
assert hasattr(self, 'num_train_samples')
assert hasattr(self, 'num_train_batches')
if self.dataset.is_user_dataset is True:
num_valid_samples = sum(
inputs.size(0) for inputs, _ in self.validloader)
sample_data, sample_target = self.trainloader[0]
else:
num_valid_samples = len(self.validloader.sampler)
# dataloader iterator to get next sample
dataiter = iter(self.trainloader)
sample_data, sample_target = dataiter.next()
num_valid_batches = \
math.ceil(num_valid_samples / self.dataloader.batch_size)
print('\n==> Train info:')
print('batch (data, target) size : '
f'({size_str(sample_data)}, {size_str(sample_target)}).')
print('no. of (train, valid) samples : '
f'({self.num_train_samples}, {num_valid_samples}).')
print('no. of (train, valid) batches : '
f'({self.num_train_batches}, {num_valid_batches}).')
def print_test_info(self):
""" """
assert (self.testloader is not None)
if self.dataset.is_user_dataset is True:
num_test_samples = sum(
inputs.size(0) for inputs, _ in self.testloader)
sample_data, sample_target = self.testloader[0]
else:
num_test_samples = len(self.testloader.dataset)
# dataloader iterator to get next sample
dataiter = iter(self.testloader)
sample_data, sample_target = dataiter.next()
num_test_batches = math.ceil(num_test_samples /
self.dataloader.batch_size)
print('\n==> Test info:')
print('batch (data, target) size : '
f'({size_str(sample_data)}, {size_str(sample_target)}).')
print(f'no. of test samples : {num_test_samples}.')
print(f'no. of test batches : {num_test_batches}.')
def print_optimization_info(self):
""" """
print('\n==> Optimizer info:')
print('--Main Optimizer:')
print(self.main_optimizer)
if self.aux_optimizer is not None:
print('--Aux Optimizer :')
print(self.aux_optimizer)
# scheduler
scheduler_list = [self.main_scheduler, self.aux_scheduler]
scheduler_name_list = ['Main', 'Aux']
for scheduler, aux_str in zip(scheduler_list, scheduler_name_list):
if scheduler is not None:
print('--' + aux_str + ' Scheduler : ')
print(f'class - {type(scheduler).__name__}; '
f'milestones - {scheduler.milestones}; '
f'gamma - {scheduler.gamma}.')
def log_additional_info(self):
""" Log additional information to self.results_json
"""
# TODO: Review this
if not self.params['additional_info']: # empty list
return
results_dict = json_load(self.results_json)
if 'sparsity' in self.params['additional_info']:
results_dict[self.params['model_name']]['sparsity'] = \
'{:d}'.format(self.net.compute_sparsity())
if 'lipschitz_bound' in self.params['additional_info']:
results_dict[self.params['model_name']]['lipschitz_bound'] = \
'{:.3f}'.format(self.net.lipschitz_bound())
json_dump(results_dict, self.results_json)
| python |
"""
Let's get the relationships yo
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.utils.rnn import PackedSequence
from lib.resnet import resnet_l4
from config import BATCHNORM_MOMENTUM, IM_SCALE
from lib.fpn.nms.functions.nms import apply_nms
# from lib.relationship_feat import RelationshipFeats
# from lib.decoder_rnn import DecoderRNN, lstm_factory, LockedDropout
from lib.lstm.decoder_rnn import DecoderRNN
from lib.lstm.highway_lstm_cuda.alternating_highway_lstm import AlternatingHighwayLSTM
from lib.fpn.box_utils import bbox_overlaps, center_size
from lib.get_union_boxes import UnionBoxesAndFeats
from lib.fpn.proposal_assignments.rel_assignments import rel_assignments
from lib.object_detector import ObjectDetector, gather_res, load_vgg
from lib.pytorch_misc import transpose_packed_sequence_inds, to_onehot, arange, enumerate_by_image, diagonal_inds, Flattener, get_ort_embeds, intersect_2d
from lib.sparse_targets import FrequencyBias
from lib.surgery import filter_dets
from lib.word_vectors import obj_edge_vectors
from lib.fpn.roi_align.functions.roi_align import RoIAlignFunction
from lib.self_attention_refind import Message_Passing4OBJ
import math
from lib.self_attention_refind import LayerNorm
from lib.tail_classifier import EndCell
from math import pi, atan
MODES = ('sgdet', 'sgcls', 'predcls','preddet')
def smooth_one_hot(input):
c = (1 / pi) * atan(10) + 0.5
diff = input[:, None, :] - input[:, :, None]
one_hot = ((1/pi)*torch.atan(1e6*(diff + (1e-5))) + 0.5).prod(1) / c
return one_hot
def nms_overlaps(boxes):
""" get overlaps for each channel"""
assert boxes.dim() == 3
N = boxes.size(0)
nc = boxes.size(1)
max_xy = torch.min(boxes[:, None, :, 2:].expand(N, N, nc, 2),
boxes[None, :, :, 2:].expand(N, N, nc, 2))
min_xy = torch.max(boxes[:, None, :, :2].expand(N, N, nc, 2),
boxes[None, :, :, :2].expand(N, N, nc, 2))
inter = torch.clamp((max_xy - min_xy + 1.0), min=0)
# n, n, 151
inters = inter[:,:,:,0]*inter[:,:,:,1]
boxes_flat = boxes.view(-1, 4)
areas_flat = (boxes_flat[:,2]- boxes_flat[:,0]+1.0)*(
boxes_flat[:,3]- boxes_flat[:,1]+1.0)
areas = areas_flat.view(boxes.size(0), boxes.size(1))
union = -inters + areas[None] + areas[:, None]
return inters / union
def bbox_transform_inv(boxes, gt_boxes, weights=(1.0, 1.0, 1.0, 1.0)):
"""Inverse transform that computes target bounding-box regression deltas
given proposal boxes and ground-truth boxes. The weights argument should be
a 4-tuple of multiplicative weights that are applied to the regression
target.
In older versions of this code (and in py-faster-rcnn), the weights were set
such that the regression deltas would have unit standard deviation on the
training dataset. Presently, rather than computing these statistics exactly,
we use a fixed set of weights (10., 10., 5., 5.) by default. These are
approximately the weights one would get from COCO using the previous unit
stdev heuristic.
"""
ex_widths = boxes[:, 2] - boxes[:, 0] + 1.0
ex_heights = boxes[:, 3] - boxes[:, 1] + 1.0
ex_ctr_x = boxes[:, 0] + 0.5 * ex_widths
ex_ctr_y = boxes[:, 1] + 0.5 * ex_heights
gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + 1.0
gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + 1.0
gt_ctr_x = gt_boxes[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_boxes[:, 1] + 0.5 * gt_heights
wx, wy, ww, wh = weights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.stack((targets_dx, targets_dy, targets_dw,
targets_dh), -1)
return targets
def get_spt_features(boxes1, boxes2, boxes_u, width, height):
# boxes_u = boxes_union(boxes1, boxes2)
spt_feat_1 = get_box_feature(boxes1, width, height)
spt_feat_2 = get_box_feature(boxes2, width, height)
spt_feat_12 = get_pair_feature(boxes1, boxes2)
spt_feat_1u = get_pair_feature(boxes1, boxes_u)
spt_feat_u2 = get_pair_feature(boxes_u, boxes2)
return torch.cat((spt_feat_12, spt_feat_1u, spt_feat_u2, spt_feat_1, spt_feat_2), -1)
def get_pair_feature(boxes1, boxes2):
delta_1 = bbox_transform_inv(boxes1, boxes2)
delta_2 = bbox_transform_inv(boxes2, boxes1)
spt_feat = torch.cat((delta_1, delta_2[:, :2]), -1)
return spt_feat
def get_box_feature(boxes, width, height):
f1 = boxes[:, 0] / width
f2 = boxes[:, 1] / height
f3 = boxes[:, 2] / width
f4 = boxes[:, 3] / height
f5 = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1) / (width * height)
return torch.stack((f1, f2, f3, f4, f5), -1)
class Boxes_Encode(nn.Module):
def __init__(self, output_dims):
super(Boxes_Encode, self).__init__()
self.spt_feats = nn.Sequential(
nn.Linear(28, 64),
nn.LeakyReLU(0.1),
nn.Linear(64, 64),
nn.LeakyReLU(0.1))
def spo_boxes(self, boxes, rel_inds):
s_boxes = boxes[rel_inds[:, 1]]
o_boxes = boxes[rel_inds[:, 2]]
union_boxes = torch.cat((
torch.min(s_boxes[:, 0:2], o_boxes[:, 0:2]),
torch.max(s_boxes[:, 2:], o_boxes[:, 2:])
), 1)
return s_boxes, o_boxes, union_boxes
def forward(self, boxes, rel_inds):
s_boxes, o_boxes, u_boxes = self.spo_boxes(boxes, rel_inds)
spt_feats = get_spt_features(s_boxes, o_boxes, u_boxes, IM_SCALE, IM_SCALE)
return self.spt_feats(spt_feats)
class LinearizedContext(nn.Module):
"""
Module for computing the object contexts and edge contexts
"""
def __init__(self, classes, rel_classes, mode='sgdet',
embed_dim=200, hidden_dim=256, obj_dim=2048,
nl_obj=2, nl_edge=2, dropout_rate=0.2, order='confidence',
pass_in_obj_feats_to_decoder=True,
pass_in_obj_feats_to_edge=True):
super(LinearizedContext, self).__init__()
self.classes = classes
self.rel_classes = rel_classes
assert mode in MODES
self.mode = mode
self.nl_obj = nl_obj
self.nl_edge = nl_edge
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.obj_dim = obj_dim
self.dropout_rate = dropout_rate
self.pass_in_obj_feats_to_decoder = pass_in_obj_feats_to_decoder
self.pass_in_obj_feats_to_edge = pass_in_obj_feats_to_edge
assert order in ('size', 'confidence', 'random', 'leftright')
self.order = order
# EMBEDDINGS
self.decoder_lin = nn.Linear(self.hidden_dim, self.num_classes)
@property
def num_classes(self):
return len(self.classes)
@property
def num_rels(self):
return len(self.rel_classes)
def forward(self, obj_dists1 ,obj_feats, obj_labels=None, box_priors=None, boxes_per_cls=None):
"""
Forward pass through the object and edge context
:param obj_priors:
:param obj_fmaps:
:param im_inds:
:param obj_labels:
:param boxes:
:return:
"""
# UNSURE WHAT TO DO HERE
if self.mode == 'predcls':
obj_dists2 = Variable(to_onehot(obj_labels.data, self.num_classes))
else:
obj_dists2 = self.decoder_lin(obj_feats) + obj_dists1
if self.mode == 'sgdet' and not self.training:
# NMS here for baseline
is_overlap = nms_overlaps(boxes_per_cls.data).view(
boxes_per_cls.size(0), boxes_per_cls.size(0), boxes_per_cls.size(1)
).cpu().numpy() >= 0.5
probs = F.softmax(obj_dists2, 1).data.cpu().numpy()
probs[:, 0] = 0
obj_preds = obj_dists2.data.new(obj_dists2.shape[0]).long().fill_(0)
for i in range(obj_preds.size(0)):
box_ind, cls_ind = np.unravel_index(probs.argmax(), probs.shape)
obj_preds[int(box_ind)] = int(cls_ind)
probs[is_overlap[box_ind,:,cls_ind], cls_ind] = 0.0
probs[box_ind] = -1.0
obj_preds = Variable(obj_preds.view(-1))
else:
obj_preds = obj_labels if obj_labels is not None else obj_dists2[:,1:].max(1)[1] + 1
return obj_dists2, obj_preds
class RelModel(nn.Module):
"""
RELATIONSHIPS
"""
def __init__(self, classes, rel_classes, mode='sgdet', num_gpus=1, use_vision=True, require_overlap_det=True,
embed_dim=200, hidden_dim=256, pooling_dim=2048,
nl_obj=1, nl_edge=2, use_resnet=False, order='confidence', thresh=0.01,
use_proposals=False, pass_in_obj_feats_to_decoder=True,
pass_in_obj_feats_to_edge=True, rec_dropout=0.0, use_bias=True, use_tanh=True,
limit_vision=True):
"""
:param classes: Object classes
:param rel_classes: Relationship classes. None if were not using rel mode
:param mode: (sgcls, predcls, or sgdet)
:param num_gpus: how many GPUS 2 use
:param use_vision: Whether to use vision in the final product
:param require_overlap_det: Whether two objects must intersect
:param embed_dim: Dimension for all embeddings
:param hidden_dim: LSTM hidden size
:param obj_dim:
"""
super(RelModel, self).__init__()
self.classes = classes
self.rel_classes = rel_classes
self.num_gpus = num_gpus
assert mode in MODES
self.mode = mode
self.pooling_size = 7
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.obj_dim = 2048 if use_resnet else 4096
self.pooling_dim = pooling_dim
self.use_bias = use_bias
self.use_vision = use_vision
self.use_tanh = use_tanh
self.limit_vision=limit_vision
self.require_overlap = require_overlap_det and self.mode == 'sgdet'
self.hook_for_grad = False
self.gradients = []
self.detector = ObjectDetector(
classes=classes,
mode=('proposals' if use_proposals else 'refinerels') if mode == 'sgdet' else 'gtbox',
use_resnet=use_resnet,
thresh=thresh,
max_per_img=64,
)
self.ort_embedding = torch.autograd.Variable(get_ort_embeds(self.num_classes, 200).cuda())
embed_vecs = obj_edge_vectors(self.classes, wv_dim=self.embed_dim)
self.obj_embed = nn.Embedding(self.num_classes, self.embed_dim)
self.obj_embed.weight.data = embed_vecs.clone()
# This probably doesn't help it much
self.pos_embed = nn.Sequential(*[
nn.BatchNorm1d(4, momentum=BATCHNORM_MOMENTUM / 10.0),
nn.Linear(4, 128),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
])
self.context = LinearizedContext(self.classes, self.rel_classes, mode=self.mode,
embed_dim=self.embed_dim, hidden_dim=self.hidden_dim,
obj_dim=self.obj_dim,
nl_obj=nl_obj, nl_edge=nl_edge, dropout_rate=rec_dropout,
order=order,
pass_in_obj_feats_to_decoder=pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=pass_in_obj_feats_to_edge)
# Image Feats (You'll have to disable if you want to turn off the features from here)
self.union_boxes = UnionBoxesAndFeats(pooling_size=self.pooling_size, stride=16,
dim=1024 if use_resnet else 512)
self.merge_obj_feats = nn.Sequential(nn.Linear(self.obj_dim + self.embed_dim + 128, self.hidden_dim), nn.ReLU())
# self.trans = nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim//4),
# LayerNorm(self.hidden_dim//4), nn.ReLU(),
# nn.Linear(self.hidden_dim//4, self.hidden_dim))
self.get_phr_feats = nn.Linear(self.pooling_dim, self.hidden_dim)
self.embeddings4lstm = nn.Embedding(self.num_classes, self.embed_dim)
self.lstm = nn.LSTM(input_size=self.hidden_dim+self.embed_dim, hidden_size=self.hidden_dim, num_layers=1)
self.obj_mps1 = Message_Passing4OBJ(self.hidden_dim)
# self.obj_mps2 = Message_Passing4OBJ(self.hidden_dim)
self.get_boxes_encode = Boxes_Encode(64)
if use_resnet:
self.roi_fmap = nn.Sequential(
resnet_l4(relu_end=False),
nn.AvgPool2d(self.pooling_size),
Flattener(),
)
else:
roi_fmap = [
Flattener(),
load_vgg(use_dropout=False, use_relu=False, use_linear=pooling_dim == 4096, pretrained=False).classifier,
]
if pooling_dim != 4096:
roi_fmap.append(nn.Linear(4096, pooling_dim))
self.roi_fmap = nn.Sequential(*roi_fmap)
self.roi_fmap_obj = load_vgg(pretrained=False).classifier
###################################
# self.obj_classify_head = nn.Linear(self.pooling_dim, self.num_classes)
# self.post_emb_s = nn.Linear(self.pooling_dim, self.pooling_dim//2)
# self.post_emb_s.weight = torch.nn.init.xavier_normal(self.post_emb_s.weight, gain=1.0)
# self.post_emb_o = nn.Linear(self.pooling_dim, self.pooling_dim//2)
# self.post_emb_o.weight = torch.nn.init.xavier_normal(self.post_emb_o.weight, gain=1.0)
# self.merge_obj_high = nn.Linear(self.hidden_dim, self.pooling_dim//2)
# self.merge_obj_high.weight = torch.nn.init.xavier_normal(self.merge_obj_high.weight, gain=1.0)
# self.merge_obj_low = nn.Linear(self.pooling_dim + 5 + self.embed_dim, self.pooling_dim//2)
# self.merge_obj_low.weight = torch.nn.init.xavier_normal(self.merge_obj_low.weight, gain=1.0)
# self.rel_compress = nn.Linear(self.pooling_dim//2 + 64, self.num_rels, bias=True)
# self.rel_compress.weight = torch.nn.init.xavier_normal(self.rel_compress.weight, gain=1.0)
# self.freq_gate = nn.Linear(self.pooling_dim//2 + 64, self.num_rels, bias=True)
# self.freq_gate.weight = torch.nn.init.xavier_normal(self.freq_gate.weight, gain=1.0)
self.post_emb_s = nn.Linear(self.pooling_dim, self.pooling_dim)
self.post_emb_s.weight = torch.nn.init.xavier_normal(self.post_emb_s.weight, gain=1.0)
self.post_emb_o = nn.Linear(self.pooling_dim, self.pooling_dim)
self.post_emb_o.weight = torch.nn.init.xavier_normal(self.post_emb_o.weight, gain=1.0)
self.merge_obj_high = nn.Linear(self.hidden_dim, self.pooling_dim)
self.merge_obj_high.weight = torch.nn.init.xavier_normal(self.merge_obj_high.weight, gain=1.0)
self.merge_obj_low = nn.Linear(self.pooling_dim + 5 + self.embed_dim, self.pooling_dim)
self.merge_obj_low.weight = torch.nn.init.xavier_normal(self.merge_obj_low.weight, gain=1.0)
self.rel_compress = nn.Linear(self.pooling_dim + 64, self.num_rels, bias=True)
self.rel_compress.weight = torch.nn.init.xavier_normal(self.rel_compress.weight, gain=1.0)
self.freq_gate = nn.Linear(self.pooling_dim + 64, self.num_rels, bias=True)
self.freq_gate.weight = torch.nn.init.xavier_normal(self.freq_gate.weight, gain=1.0)
# self.ranking_module = nn.Sequential(nn.Linear(self.pooling_dim + 64, self.hidden_dim), nn.ReLU(), nn.Linear(self.hidden_dim, 1))
if self.use_bias:
self.freq_bias = FrequencyBias()
@property
def num_classes(self):
return len(self.classes)
@property
def num_rels(self):
return len(self.rel_classes)
# def fixed_obj_modules(self):
# for p in self.detector.parameters():
# p.requires_grad = False
# for p in self.obj_embed.parameters():
# p.requires_grad = False
# for p in self.pos_embed.parameters():
# p.requires_grad = False
# for p in self.context.parameters():
# p.requires_grad = False
# for p in self.union_boxes.parameters():
# p.requires_grad = False
# for p in self.merge_obj_feats.parameters():
# p.requires_grad = False
# for p in self.get_phr_feats.parameters():
# p.requires_grad = False
# for p in self.embeddings4lstm.parameters():
# p.requires_grad = False
# for p in self.lstm.parameters():
# p.requires_grad = False
# for p in self.obj_mps1.parameters():
# p.requires_grad = False
# for p in self.roi_fmap_obj.parameters():
# p.requires_grad = False
# for p in self.roi_fmap.parameters():
# p.requires_grad = False
def save_grad(self, grad):
self.gradients.append(grad)
def visual_rep(self, features, rois, pair_inds):
"""
Classify the features
:param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4]
:param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1].
:param pair_inds inds to use when predicting
:return: score_pred, a [num_rois, num_classes] array
box_pred, a [num_rois, num_classes, 4] array
"""
assert pair_inds.size(1) == 2
uboxes = self.union_boxes(features, rois, pair_inds)
return self.roi_fmap(uboxes)
def visual_obj(self, features, rois, pair_inds):
assert pair_inds.size(1) == 2
uboxes = self.union_boxes(features, rois, pair_inds)
return uboxes
def get_rel_inds(self, rel_labels, im_inds, box_priors):
# Get the relationship candidates
if self.training:
rel_inds = rel_labels[:, :3].data.clone()
else:
rel_cands = im_inds.data[:, None] == im_inds.data[None]
rel_cands.view(-1)[diagonal_inds(rel_cands)] = 0
# Require overlap for detection
if self.require_overlap:
rel_cands = rel_cands & (bbox_overlaps(box_priors.data,
box_priors.data) > 0)
# if there are fewer then 100 things then we might as well add some?
amt_to_add = 100 - rel_cands.long().sum()
rel_cands = rel_cands.nonzero()
if rel_cands.dim() == 0:
rel_cands = im_inds.data.new(1, 2).fill_(0)
rel_inds = torch.cat((im_inds.data[rel_cands[:, 0]][:, None], rel_cands), 1)
return rel_inds
def union_pairs(self, im_inds):
rel_cands = im_inds.data[:, None] == im_inds.data[None]
rel_cands.view(-1)[diagonal_inds(rel_cands)] = 0
rel_inds = rel_cands.nonzero()
rel_inds = torch.cat((im_inds[rel_inds[:,0]][:,None].data, rel_inds), -1)
return rel_inds
def obj_feature_map(self, features, rois):
"""
Gets the ROI features
:param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4] (features at level p2)
:param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1].
:return: [num_rois, #dim] array
"""
feature_pool = RoIAlignFunction(self.pooling_size, self.pooling_size, spatial_scale=1 / 16)(
features, rois)
return self.roi_fmap_obj(feature_pool.view(rois.size(0), -1))
def forward(self, x, im_sizes, image_offset,
gt_boxes=None, gt_classes=None, gt_rels=None, proposals=None, train_anchor_inds=None,
return_fmap=False):
"""
Forward pass for detection
:param x: Images@[batch_size, 3, IM_SIZE, IM_SIZE]
:param im_sizes: A numpy array of (h, w, scale) for each image.
:param image_offset: Offset onto what image we're on for MGPU training (if single GPU this is 0)
:param gt_boxes:
Training parameters:
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:param train_anchor_inds: a [num_train, 2] array of indices for the anchors that will
be used to compute the training loss. Each (img_ind, fpn_idx)
:return: If train:
scores, boxdeltas, labels, boxes, boxtargets, rpnscores, rpnboxes, rellabels
if test:
prob dists, boxes, img inds, maxscores, classes
"""
result = self.detector(x, im_sizes, image_offset, gt_boxes, gt_classes, gt_rels, proposals,
train_anchor_inds, return_fmap=True)
# rel_feat = self.relationship_feat.feature_map(x)
if result.is_none():
return ValueError("heck")
im_inds = result.im_inds - image_offset
boxes = result.rm_box_priors
if self.training and result.rel_labels is None:
assert self.mode == 'sgdet'
result.rel_labels = rel_assignments(im_inds.data, boxes.data, result.rm_obj_labels.data,
gt_boxes.data, gt_classes.data, gt_rels.data,
image_offset, filter_non_overlap=True,
num_sample_per_gt=1)
rel_inds = self.get_rel_inds(result.rel_labels, im_inds, boxes)
spt_feats = self.get_boxes_encode(boxes, rel_inds)
pair_inds = self.union_pairs(im_inds)
if self.hook_for_grad:
rel_inds = gt_rels[:, :-1].data
if self.hook_for_grad:
fmap = result.fmap
fmap.register_hook(self.save_grad)
else:
fmap = result.fmap.detach()
rois = torch.cat((im_inds[:, None].float(), boxes), 1)
result.obj_fmap = self.obj_feature_map(fmap, rois)
# result.obj_dists_head = self.obj_classify_head(obj_fmap_rel)
obj_embed = F.softmax(result.rm_obj_dists, dim=1) @ self.obj_embed.weight
obj_embed_lstm = F.softmax(result.rm_obj_dists, dim=1) @ self.embeddings4lstm.weight
pos_embed = self.pos_embed(Variable(center_size(boxes.data)))
obj_pre_rep = torch.cat((result.obj_fmap, obj_embed, pos_embed), 1)
obj_feats = self.merge_obj_feats(obj_pre_rep)
# obj_feats=self.trans(obj_feats)
obj_feats_lstm = torch.cat((obj_feats, obj_embed_lstm), -1).contiguous().view(1, obj_feats.size(0), -1)
# obj_feats = F.relu(obj_feats)
phr_ori = self.visual_rep(fmap, rois, pair_inds[:, 1:])
vr_indices = torch.from_numpy(intersect_2d(rel_inds[:, 1:].cpu().numpy(), pair_inds[:, 1:].cpu().numpy()).astype(np.uint8)).cuda().max(-1)[1]
vr = phr_ori[vr_indices]
phr_feats_high = self.get_phr_feats(phr_ori)
obj_feats_lstm_output, (obj_hidden_states, obj_cell_states) = self.lstm(obj_feats_lstm)
rm_obj_dists1 = result.rm_obj_dists + self.context.decoder_lin(obj_feats_lstm_output.squeeze())
obj_feats_output = self.obj_mps1(obj_feats_lstm_output.view(-1, obj_feats_lstm_output.size(-1)), \
phr_feats_high, im_inds, pair_inds)
obj_embed_lstm1 = F.softmax(rm_obj_dists1, dim=1) @ self.embeddings4lstm.weight
obj_feats_lstm1 = torch.cat((obj_feats_output, obj_embed_lstm1), -1).contiguous().view(1, \
obj_feats_output.size(0), -1)
obj_feats_lstm_output, _ = self.lstm(obj_feats_lstm1, (obj_hidden_states, obj_cell_states))
rm_obj_dists2 = rm_obj_dists1 + self.context.decoder_lin(obj_feats_lstm_output.squeeze())
obj_feats_output = self.obj_mps1(obj_feats_lstm_output.view(-1, obj_feats_lstm_output.size(-1)), \
phr_feats_high, im_inds, pair_inds)
# Prevent gradients from flowing back into score_fc from elsewhere
result.rm_obj_dists, result.obj_preds = self.context(
rm_obj_dists2,
obj_feats_output,
result.rm_obj_labels if self.training or self.mode == 'predcls' else None,
boxes.data, result.boxes_all)
obj_dtype = result.obj_fmap.data.type()
obj_preds_embeds = torch.index_select(self.ort_embedding, 0, result.obj_preds).type(obj_dtype)
tranfered_boxes = torch.stack((boxes[:, 0]/IM_SCALE, boxes[:, 3]/IM_SCALE, boxes[:, 2]/IM_SCALE, boxes[:, 1]/IM_SCALE, ((boxes[:, 2] - boxes[:, 0])*(boxes[:, 3]-boxes[:, 1]))/(IM_SCALE**2)), -1).type(obj_dtype)
obj_features = torch.cat((result.obj_fmap, obj_preds_embeds, tranfered_boxes), -1)
obj_features_merge = self.merge_obj_low(obj_features) + self.merge_obj_high(obj_feats_output)
# Split into subject and object representations
result.subj_rep = self.post_emb_s(obj_features_merge)[rel_inds[:, 1]]
result.obj_rep = self.post_emb_o(obj_features_merge)[rel_inds[:, 2]]
prod_rep = result.subj_rep * result.obj_rep
# obj_pools = self.visual_obj(result.fmap.detach(), rois, rel_inds[:, 1:])
# rel_pools = self.relationship_feat.union_rel_pooling(rel_feat, rois, rel_inds[:, 1:])
# context_pools = torch.cat([obj_pools, rel_pools], 1)
# merge_pool = self.merge_feat(context_pools)
# vr = self.roi_fmap(merge_pool)
# vr = self.rel_refine(vr)
prod_rep = prod_rep * vr
if self.use_tanh:
prod_rep = F.tanh(prod_rep)
prod_rep = torch.cat((prod_rep, spt_feats), -1)
freq_gate = self.freq_gate(prod_rep)
freq_gate = F.sigmoid(freq_gate)
result.rel_dists = self.rel_compress(prod_rep)
# result.rank_factor = self.ranking_module(prod_rep).view(-1)
if self.use_bias:
result.rel_dists = result.rel_dists + freq_gate * self.freq_bias.index_with_labels(torch.stack((
result.obj_preds[rel_inds[:, 1]],
result.obj_preds[rel_inds[:, 2]],
), 1))
if self.training:
return result
twod_inds = arange(result.obj_preds.data) * self.num_classes + result.obj_preds.data
result.obj_scores = F.softmax(result.rm_obj_dists, dim=1).view(-1)[twod_inds]
# Bbox regression
if self.mode == 'sgdet':
bboxes = result.boxes_all.view(-1, 4)[twod_inds].view(result.boxes_all.size(0), 4)
else:
# Boxes will get fixed by filter_dets function.
bboxes = result.rm_box_priors
rel_rep = F.softmax(result.rel_dists, dim=1)
# rel_rep = smooth_one_hot(rel_rep)
# rank_factor = F.sigmoid(result.rank_factor)
return filter_dets(bboxes, result.obj_scores,
result.obj_preds, rel_inds[:, 1:], rel_rep)
def __getitem__(self, batch):
""" Hack to do multi-GPU training"""
batch.scatter()
if self.num_gpus == 1:
return self(*batch[0])
replicas = nn.parallel.replicate(self, devices=list(range(self.num_gpus)))
outputs = nn.parallel.parallel_apply(replicas, [batch[i] for i in range(self.num_gpus)])
if self.training:
return gather_res(outputs, 0, dim=0)
return outputs | python |
import codecs
import re
import string
from markdown import markdown
from django.utils.safestring import mark_safe
bracket_extract = re.compile(r"<.*?>(.*?)<\/.*?>")
class MarkDownView(object):
"""
allows for a basic view where a markdown files is read in and rendered
Give the class a markdown_loc variable which is the filepath to the markdown files.
use self.get_markdown() to retrieve markdown text. If using clean, it is avaliable as
'markdown' in the template.
"""
markdown_loc = ""
def get_markdown(self):
f = codecs.open(self.__class__.markdown_loc, "rb", "cp1252")
txt = f.read()
md = markdown(txt, extensions=['markdown.extensions.tables'])
lines = md.split("\n")
final = []
for l in lines:
if l[:2].lower() == "<h":
contents = bracket_extract.search(l).groups()[0]
contents = contents.replace(" ","-").lower()
contents = u"".join([x for x in contents if x in string.ascii_lowercase + "-"])
final.append('<a name="{0}"></a>'.format(contents))
final.append(l)
md = "\n".join(final)
md = mark_safe(md)
return md
def view(self,request):
return {"markdown":self.get_markdown()}
| python |
#
# PySNMP MIB module ONEACCESS-ACL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ONEACCESS-ACL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:24:52 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
oacEventText, oacEventSeverityLevel = mibBuilder.importSymbols("ONEACCESS-EVENTS-MIB", "oacEventText", "oacEventSeverityLevel")
oacMIBModules, oacExpIMIpAcl = mibBuilder.importSymbols("ONEACCESS-GLOBAL-REG", "oacMIBModules", "oacExpIMIpAcl")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
Gauge32, ObjectIdentity, Counter32, Unsigned32, Integer32, NotificationType, Counter64, ModuleIdentity, Bits, iso, TimeTicks, MibIdentifier, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "ObjectIdentity", "Counter32", "Unsigned32", "Integer32", "NotificationType", "Counter64", "ModuleIdentity", "Bits", "iso", "TimeTicks", "MibIdentifier", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
oacAclMIBModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 13191, 1, 100, 669))
oacAclMIBModule.setRevisions(('2011-06-15 00:00', '2010-07-08 10:00',))
if mibBuilder.loadTexts: oacAclMIBModule.setLastUpdated('201106150000Z')
if mibBuilder.loadTexts: oacAclMIBModule.setOrganization(' OneAccess ')
class InterfaceType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("mainInterface", 1), ("subInterface", 2))
oacExpIMIpAclStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1))
oacExpIMIpAclNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 2))
oacExpIMIpAccountingStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3))
oacAclNotificationMaximumSessionReached = NotificationType((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 2, 1))
if mibBuilder.loadTexts: oacAclNotificationMaximumSessionReached.setStatus('current')
oacAclNotificationWarningSessionReachingLimit = NotificationType((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 2, 2))
if mibBuilder.loadTexts: oacAclNotificationWarningSessionReachingLimit.setStatus('current')
oacAclNotificationMaximumHalfSessionReached = NotificationType((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 2, 3))
if mibBuilder.loadTexts: oacAclNotificationMaximumHalfSessionReached.setStatus('current')
oacAclNotificationWarningHalfSessionReachingLimit = NotificationType((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 2, 4))
if mibBuilder.loadTexts: oacAclNotificationWarningHalfSessionReachingLimit.setStatus('current')
oacAclNotificationMaximumSessionReachedPerHost = NotificationType((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 2, 5)).setObjects(("ONEACCESS-EVENTS-MIB", "oacEventText"), ("ONEACCESS-EVENTS-MIB", "oacEventSeverityLevel"))
if mibBuilder.loadTexts: oacAclNotificationMaximumSessionReachedPerHost.setStatus('current')
oacAclNotificationMaximumHalfSessionReachedPerHost = NotificationType((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 2, 6))
if mibBuilder.loadTexts: oacAclNotificationMaximumHalfSessionReachedPerHost.setStatus('current')
oacAclStatObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 1))
oacAclStatNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 2))
oacAclStatConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 3))
oacAclStatGlobal = MibIdentifier((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 1, 1))
oacAclMaxSessions = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 1, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacAclMaxSessions.setStatus('current')
oacAclActiveSessions = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 1, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacAclActiveSessions.setStatus('current')
oacAclSessionsClosed = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacAclSessionsClosed.setStatus('current')
oacAclDynamicAllocFailures = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacAclDynamicAllocFailures.setStatus('current')
oacAclInboundPkts = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacAclInboundPkts.setStatus('current')
oacAclOutboundPkts = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacAclOutboundPkts.setStatus('current')
oacAclInboundPktsDropped = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacAclInboundPktsDropped.setStatus('current')
oacAclOutboundPktsDropped = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacAclOutboundPktsDropped.setStatus('current')
oacIpAccountingTable = MibTable((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 1), )
if mibBuilder.loadTexts: oacIpAccountingTable.setStatus('current')
oacIpAccountingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 1, 1), ).setIndexNames((0, "ONEACCESS-ACL-MIB", "oacIpAccountingIndex"))
if mibBuilder.loadTexts: oacIpAccountingEntry.setStatus('current')
oacIpAccountingIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 1, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingIndex.setStatus('current')
oacIpAccountingIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 1, 1, 2), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingIfIndex.setStatus('current')
oacIpAccountingIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 1, 1, 3), InterfaceType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingIfType.setStatus('current')
oacIpAccountingStatTable = MibTable((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 2), )
if mibBuilder.loadTexts: oacIpAccountingStatTable.setStatus('current')
oacIpAccountingStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 2, 1), ).setIndexNames((0, "ONEACCESS-ACL-MIB", "oacIpAccountingIndex"))
if mibBuilder.loadTexts: oacIpAccountingStatEntry.setStatus('current')
oacIpAccountingStatIpSource = MibTableColumn((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 2, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingStatIpSource.setStatus('current')
oacIpAccountingStatIpDest = MibTableColumn((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 2, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingStatIpDest.setStatus('current')
oacIpAccountingStatNbPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingStatNbPackets.setStatus('current')
oacIpAccountingStatNbBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingStatNbBytes.setStatus('current')
oacIpAccoutingGlobal = MibIdentifier((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 3))
oacIpAccountingMaxSessions = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 3, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingMaxSessions.setStatus('current')
oacIpAccountingCurrentSessions = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 3, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingCurrentSessions.setStatus('current')
oacIpAccountingAge = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 3, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingAge.setStatus('current')
oacIpAccountingNbNotAnalysedBytes = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 3, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingNbNotAnalysedBytes.setStatus('current')
oacIpAccountingNbNotAnalysedPackets = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 3, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: oacIpAccountingNbNotAnalysedPackets.setStatus('current')
oacIpAccoutingClear = MibScalar((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 3, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: oacIpAccoutingClear.setStatus('current')
oacAclStatGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 3, 1))
oacAclStatCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 3, 2))
oacAclStatCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 3, 2, 1)).setObjects(("ONEACCESS-ACL-MIB", "oacAclStatGeneralGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
oacAclStatCompliance = oacAclStatCompliance.setStatus('current')
oacAclStatGeneralGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 13191, 10, 3, 1, 2, 1, 3, 1, 1)).setObjects(("ONEACCESS-ACL-MIB", "oacAclMaxSessions"), ("ONEACCESS-ACL-MIB", "oacAclActiveSessions"), ("ONEACCESS-ACL-MIB", "oacAclSessionsClosed"), ("ONEACCESS-ACL-MIB", "oacAclDynamicAllocFailures"), ("ONEACCESS-ACL-MIB", "oacAclInboundPkts"), ("ONEACCESS-ACL-MIB", "oacAclOutboundPkts"), ("ONEACCESS-ACL-MIB", "oacAclInboundPktsDropped"), ("ONEACCESS-ACL-MIB", "oacAclOutboundPktsDropped"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
oacAclStatGeneralGroup = oacAclStatGeneralGroup.setStatus('current')
mibBuilder.exportSymbols("ONEACCESS-ACL-MIB", oacAclNotificationMaximumHalfSessionReached=oacAclNotificationMaximumHalfSessionReached, oacAclInboundPkts=oacAclInboundPkts, oacAclOutboundPktsDropped=oacAclOutboundPktsDropped, oacAclMaxSessions=oacAclMaxSessions, oacIpAccountingNbNotAnalysedPackets=oacIpAccountingNbNotAnalysedPackets, oacAclStatGroups=oacAclStatGroups, oacIpAccountingStatIpSource=oacIpAccountingStatIpSource, oacIpAccountingNbNotAnalysedBytes=oacIpAccountingNbNotAnalysedBytes, oacIpAccountingStatEntry=oacIpAccountingStatEntry, oacAclNotificationWarningSessionReachingLimit=oacAclNotificationWarningSessionReachingLimit, oacAclStatGeneralGroup=oacAclStatGeneralGroup, oacAclStatGlobal=oacAclStatGlobal, oacIpAccountingAge=oacIpAccountingAge, oacAclStatObjects=oacAclStatObjects, oacIpAccountingStatNbPackets=oacIpAccountingStatNbPackets, oacAclSessionsClosed=oacAclSessionsClosed, oacAclStatCompliance=oacAclStatCompliance, oacIpAccountingIfType=oacIpAccountingIfType, oacExpIMIpAccountingStatistics=oacExpIMIpAccountingStatistics, oacIpAccountingIfIndex=oacIpAccountingIfIndex, oacAclActiveSessions=oacAclActiveSessions, oacIpAccountingStatIpDest=oacIpAccountingStatIpDest, oacAclDynamicAllocFailures=oacAclDynamicAllocFailures, oacIpAccountingEntry=oacIpAccountingEntry, InterfaceType=InterfaceType, oacAclInboundPktsDropped=oacAclInboundPktsDropped, oacIpAccountingMaxSessions=oacIpAccountingMaxSessions, oacIpAccoutingGlobal=oacIpAccoutingGlobal, oacAclStatNotifications=oacAclStatNotifications, oacExpIMIpAclStatistics=oacExpIMIpAclStatistics, oacIpAccoutingClear=oacIpAccoutingClear, PYSNMP_MODULE_ID=oacAclMIBModule, oacAclOutboundPkts=oacAclOutboundPkts, oacAclMIBModule=oacAclMIBModule, oacIpAccountingStatTable=oacIpAccountingStatTable, oacIpAccountingTable=oacIpAccountingTable, oacIpAccountingIndex=oacIpAccountingIndex, oacIpAccountingStatNbBytes=oacIpAccountingStatNbBytes, oacAclNotificationMaximumSessionReachedPerHost=oacAclNotificationMaximumSessionReachedPerHost, oacIpAccountingCurrentSessions=oacIpAccountingCurrentSessions, oacAclStatCompliances=oacAclStatCompliances, oacAclNotificationWarningHalfSessionReachingLimit=oacAclNotificationWarningHalfSessionReachingLimit, oacAclNotificationMaximumSessionReached=oacAclNotificationMaximumSessionReached, oacExpIMIpAclNotifications=oacExpIMIpAclNotifications, oacAclStatConformance=oacAclStatConformance, oacAclNotificationMaximumHalfSessionReachedPerHost=oacAclNotificationMaximumHalfSessionReachedPerHost)
| python |
from __future__ import absolute_import
import logging
from flask import Blueprint, request, g, abort
from huskar_api import settings
from huskar_api.extras.concurrent_limiter import (
check_new_request, release_request, ConcurrencyExceededError)
from huskar_api.switch import switch, SWITCH_ENABLE_CONCURRENT_LIMITER
bp = Blueprint('middlewares.concurrent_limit', __name__)
logger = logging.getLogger(__name__)
@bp.before_app_request
def check_concurrent_limit():
if not switch.is_switched_on(SWITCH_ENABLE_CONCURRENT_LIMITER):
return
if g.get('auth'):
anonymous = False
username = g.auth.username
else:
anonymous = True
username = request.remote_addr
config = get_limiter_config(
settings.CONCURRENT_LIMITER_SETTINGS, username, anonymous=anonymous)
if not config:
return
ttl, capacity = config['ttl'], config['capacity']
try:
result = check_new_request(username, ttl, capacity)
except ConcurrencyExceededError:
abort(429, 'Too Many Requests, only allow handling {} requests '
'in {} seconds'.format(capacity, ttl))
else:
if result is not None:
key, sub_item = result
g.concurrent_limiter_data = {'key': key, 'sub_item': sub_item}
@bp.after_app_request
def release_concurrent_limiter_data(response):
if (g.get('concurrent_limiter_data') and
(response.status_code != 200 or
request.endpoint != 'api.long_polling')):
data = g.concurrent_limiter_data
release_request(data['key'], data['sub_item'])
g.concurrent_limiter_data = None
return response
def get_limiter_config(configs, username, anonymous):
if username in configs:
return configs[username]
if anonymous and '__anonymous__' in configs:
return configs['__anonymous__']
return configs.get('__default__')
| python |
import bmtrain as bmt
def main():
bmt.init_distributed()
bmt.print_rank("======= All Gather =======")
bmt.benchmark.all_gather()
bmt.print_rank("===== Reduce Scatter =====")
bmt.benchmark.reduce_scatter()
if __name__ == '__main__':
main() | python |
from django.db import models
# Create your models here.
# a cleaned up version of the old comments model, django-ready.
class Comment(models.Model):
CID = models.IntegerField(primary_key=True,unique=True,editable=False,)
requestTime = models.DateTimeField()
name = models.CharField(max_length=120,null=True,blank=True,default='Anonymous')
email = models.EmailField(max_length=180,null=True,blank=True)
completed = models.BooleanField()
completedTime = models.DateTimeField(null=True,blank=True)
completerComment = models.TextField(null=True,blank=True)
completingName = models.CharField(max_length=120,null=True,blank=True)
completingServer = models.CharField(max_length=120,null=True,blank=True)
isDeleted = models.BooleanField()
deleterIP = models.IPAddressField(null=True,blank=True)
deletedTime = models.DateTimeField(null=True,blank=True)
request = models.TextField()
server = models.CharField(max_length=60, null=True,blank=True)
requestIP = models.IPAddressField(max_length=64,null=True,blank=True)
# ala facebook
Likes = models.IntegerField(default=0)
def __unicode__(self):
return self.request
def save(self,*args,**kwargs):
if not self.CID:
i = Comment.objects.raw('SELECT * FROM requests_comment ORDER BY CID DESC LIMIT 1')[0]
self.CID = i.CID+1
super(Comment,self).save(*args,**kwargs) | python |
"""Example demonstrating a basic usage of choke package."""
from time import sleep
from redis import StrictRedis
from choke import RedisChokeManager, CallLimitExceededError
REDIS = StrictRedis() # Tweak this to reflect your setup
CHOKE_MANAGER = RedisChokeManager(redis=REDIS)
# Example configuration: enforce limit of no more than 10 calls in two seconds window
@CHOKE_MANAGER.choke(limit=10, window_length=2)
def foo(x, y):
"""Just print something to show that foo was called."""
print(f'foo called with ({x}, {y})')
if __name__ == '__main__':
# We expect pattern of 10 successes followed by 10 failures followed again by 10 successes
# Some deviations from this pattern may obviously occur as calling foo takes nonzero time
for i in range(30):
try:
foo(i, y=i ** 2)
except CallLimitExceededError:
print('Foo not called. Limit exceeded!')
sleep(0.1)
| python |
# -*- coding: utf-8 -*-
#
from __future__ import absolute_import, unicode_literals
import uuid
import pytest
import mock
import avalon.cache
import avalon.models
def test_get_frozen_mapping():
mapping = {'foo': set(['zing', 'zam', 'zowey'])}
frozen = avalon.cache.get_frozen_mapping(mapping)
assert 'foo' in frozen
assert frozen['foo'] == frozenset(['zing', 'zam', 'zowey'])
assert isinstance(frozen['foo'], frozenset)
with pytest.raises(AttributeError):
frozen['foo'].add('blah')
class TestIdLookupCache(object):
def test_get_album_id_exists(self):
"""Test that we can translate an album name to ID"""
model1 = avalon.models.Album()
model1.id = uuid.UUID("2d24515c-a459-552a-b022-e85d1621425a")
model1.name = 'Dookie'
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = [model1]
dao.get_all_artists.return_value = []
dao.get_all_genres.return_value = []
cache = avalon.cache.IdLookupCache(dao).reload()
assert uuid.UUID("2d24515c-a459-552a-b022-e85d1621425a") == \
cache.get_album_id('Dookie')
def test_get_album_id_does_not_exist(self):
"""Test that an album that does not exist returns None"""
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = []
dao.get_all_artists.return_value = []
dao.get_all_genres.return_value = []
cache = avalon.cache.IdLookupCache(dao).reload()
assert None is cache.get_album_id('Dookie')
def test_get_album_id_case_insensitive(self):
"""Test that we can translate an album name to ID in a case insensitive fasion"""
model1 = avalon.models.Album()
model1.id = uuid.UUID("2d24515c-a459-552a-b022-e85d1621425a")
model1.name = 'Dookie'
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = [model1]
dao.get_all_artists.return_value = []
dao.get_all_genres.return_value = []
cache = avalon.cache.IdLookupCache(dao).reload()
assert uuid.UUID("2d24515c-a459-552a-b022-e85d1621425a") == \
cache.get_album_id('DOOKIE')
def test_get_artist_id_exists(self):
"""Test that we can translate an artist name to ID"""
model1 = avalon.models.Album()
model1.id = uuid.UUID("5cede078-e88e-5929-b8e1-cfda7992b8fd")
model1.name = 'Bad Religion'
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = []
dao.get_all_artists.return_value = [model1]
dao.get_all_genres.return_value = []
cache = avalon.cache.IdLookupCache(dao).reload()
assert uuid.UUID("5cede078-e88e-5929-b8e1-cfda7992b8fd") == \
cache.get_artist_id('Bad Religion')
def test_get_artist_id_does_not_exist(self):
"""Test that an artist that does not exist returns None"""
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = []
dao.get_all_artists.return_value = []
dao.get_all_genres.return_value = []
cache = avalon.cache.IdLookupCache(dao).reload()
assert None is cache.get_album_id('Bad Religion')
def test_get_artist_id_case_insensitive(self):
"""Test that we can translate an artist name to ID in a case insensitive fashion"""
model1 = avalon.models.Artist()
model1.id = uuid.UUID("5cede078-e88e-5929-b8e1-cfda7992b8fd")
model1.name = 'Bad Religion'
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = []
dao.get_all_artists.return_value = [model1]
dao.get_all_genres.return_value = []
cache = avalon.cache.IdLookupCache(dao).reload()
assert uuid.UUID("5cede078-e88e-5929-b8e1-cfda7992b8fd") == \
cache.get_artist_id('BaD RELIGION')
def test_get_genre_id_exists(self):
"""Test that we can translate an genre name to ID"""
model1 = avalon.models.Genre()
model1.id = uuid.UUID("8794d7b7-fff3-50bb-b1f1-438659e05fe5")
model1.name = 'Punk'
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = []
dao.get_all_artists.return_value = []
dao.get_all_genres.return_value = [model1]
cache = avalon.cache.IdLookupCache(dao).reload()
assert uuid.UUID("8794d7b7-fff3-50bb-b1f1-438659e05fe5") == \
cache.get_genre_id('Punk')
def test_get_genre_id_does_not_exist(self):
"""Test that an genre that does not exist returns None"""
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = []
dao.get_all_artists.return_value = []
dao.get_all_genres.return_value = []
cache = avalon.cache.IdLookupCache(dao).reload()
assert None is cache.get_album_id('Punks')
def test_get_genre_id_case_insensitive(self):
"""Test that we can translate an genre name to ID in a case insensitive fashion"""
model1 = avalon.models.Genre()
model1.id = uuid.UUID("8794d7b7-fff3-50bb-b1f1-438659e05fe5")
model1.name = 'Punk'
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = []
dao.get_all_artists.return_value = []
dao.get_all_genres.return_value = [model1]
cache = avalon.cache.IdLookupCache(dao).reload()
assert uuid.UUID("8794d7b7-fff3-50bb-b1f1-438659e05fe5") == \
cache.get_genre_id('PUNK')
def test_reload_calls_dao_methods(self):
"""Ensure that the .reload() method calls the DAO methods again"""
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = []
dao.get_all_artists.return_value = []
dao.get_all_genres.return_value = []
avalon.cache.IdLookupCache(dao).reload()
class TestIdNameStore(object):
def test_get_by_id(self):
model1 = avalon.models.Album()
model1.id = uuid.UUID("2d24515c-a459-552a-b022-e85d1621425a")
model1.name = 'Dookie'
model2 = avalon.models.Album()
model2.id = uuid.UUID("b3c204e4-445d-5812-9366-28de6770c4e1")
model2.name = 'Insomniac'
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = [model1, model2]
cache = avalon.cache.AlbumStore(dao).reload()
res = cache.get_by_id(uuid.UUID("2d24515c-a459-552a-b022-e85d1621425a"))
assert 1 == len(res)
for dookie in res:
assert 'Dookie' == dookie.name
def test_get_all(self):
model1 = avalon.models.Album()
model1.id = uuid.UUID("2d24515c-a459-552a-b022-e85d1621425a")
model1.name = 'Dookie'
model2 = avalon.models.Album()
model2.id = uuid.UUID("b3c204e4-445d-5812-9366-28de6770c4e1")
model2.name = 'Insomniac'
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_albums.return_value = [model1, model2]
names = set(['Dookie', 'Insomniac'])
cache = avalon.cache.AlbumStore(dao).reload()
res = cache.get_all()
assert 2 == len(res)
for album in res:
assert album.name in names
class TestTrackStore(object):
def setup(self):
album = avalon.models.Album()
album.id = uuid.UUID("350c49d9-fa38-585a-a0d9-7343c8b910ed")
album.name = 'Ruiner'
artist = avalon.models.Artist()
artist.id = uuid.UUID("aa143f55-65e3-59f3-a1d8-36eac7024e86")
artist.name = 'A Wilhelm Scream'
genre = avalon.models.Genre()
genre.id = uuid.UUID("8794d7b7-fff3-50bb-b1f1-438659e05fe5")
genre.name = 'Punk'
song = avalon.models.Track()
song.id = uuid.UUID("ca2e8303-69d7-53ec-907e-2f111103ba29")
song.name = 'The Pool'
song.length = 150
song.track = 3
song.year = 2005
song.album_id = album.id
song.artist_id = artist.id
song.genre_id = genre.id
song.album = album
song.artist = artist
song.genre = genre
self.song = song
def test_get_by_album(self):
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_tracks.return_value = [self.song]
cache = avalon.cache.TrackStore(dao).reload()
songs = cache.get_by_album(uuid.UUID("350c49d9-fa38-585a-a0d9-7343c8b910ed"))
for song in songs:
assert uuid.UUID("ca2e8303-69d7-53ec-907e-2f111103ba29") == song.id
def test_get_by_album_missing(self):
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_tracks.return_value = [self.song]
cache = avalon.cache.TrackStore(dao).reload()
songs = cache.get_by_album(uuid.UUID('daa612e8-daa8-49a0-8b14-6ee85720fb1c'))
assert 0 == len(songs)
def test_get_by_artist(self):
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_tracks.return_value = [self.song]
cache = avalon.cache.TrackStore(dao).reload()
songs = cache.get_by_artist(uuid.UUID("aa143f55-65e3-59f3-a1d8-36eac7024e86"))
for song in songs:
assert uuid.UUID("ca2e8303-69d7-53ec-907e-2f111103ba29") == song.id
def test_get_by_artist_missing(self):
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_tracks.return_value = [self.song]
cache = avalon.cache.TrackStore(dao).reload()
songs = cache.get_by_artist(uuid.UUID('a15dfab4-75e6-439f-b621-5a3a9cf905d2'))
assert 0 == len(songs)
def test_get_by_genre(self):
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_tracks.return_value = [self.song]
cache = avalon.cache.TrackStore(dao).reload()
songs = cache.get_by_genre(uuid.UUID("8794d7b7-fff3-50bb-b1f1-438659e05fe5"))
for song in songs:
assert uuid.UUID("ca2e8303-69d7-53ec-907e-2f111103ba29") == song.id
def test_get_by_genre_missing(self):
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_tracks.return_value = [self.song]
cache = avalon.cache.TrackStore(dao).reload()
songs = cache.get_by_genre(uuid.UUID('cf16d2d9-35da-4c2f-9f35-e52fb952864e'))
assert 0 == len(songs)
def test_get_by_id(self):
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_tracks.return_value = [self.song]
cache = avalon.cache.TrackStore(dao).reload()
songs = cache.get_by_id(uuid.UUID("ca2e8303-69d7-53ec-907e-2f111103ba29"))
for song in songs:
assert uuid.UUID("ca2e8303-69d7-53ec-907e-2f111103ba29") == song.id
def test_get_by_id_missing(self):
dao = mock.Mock(spec=avalon.models.ReadOnlyDao)
dao.get_all_tracks.return_value = [self.song]
cache = avalon.cache.TrackStore(dao).reload()
songs = cache.get_by_id(uuid.UUID('72e2e340-fabc-4712-aa26-8a8f122999e8'))
assert 0 == len(songs)
| python |
from pythonforandroid.recipe import Recipe
from pythonforandroid.logger import shprint
from pythonforandroid.util import current_directory
from os.path import join
import sh
class SnappyRecipe(Recipe):
version = '1.1.7'
url = 'https://github.com/google/snappy/archive/{version}.tar.gz'
built_libraries = {'libsnappy.so': '.'}
def build_arch(self, arch):
env = self.get_recipe_env(arch)
source_dir = self.get_build_dir(arch.arch)
with current_directory(source_dir):
shprint(sh.cmake, source_dir,
'-DANDROID_ABI={}'.format(arch.arch),
'-DANDROID_NATIVE_API_LEVEL={}'.format(self.ctx.ndk_api),
'-DCMAKE_TOOLCHAIN_FILE={}'.format(
join(self.ctx.ndk_dir, 'build', 'cmake',
'android.toolchain.cmake')),
'-DBUILD_SHARED_LIBS=1',
_env=env)
shprint(sh.make, _env=env)
recipe = SnappyRecipe()
| python |
import os
os.system("cls")
def both():
folder = input("Enter path to directory: ")
os.system("cls")
print(f"WARNING, this will rename every file in the directory: {folder}!")
name = input(f"Enter new name for files: ")
os.system("cls")
print("WARNING, this could cause problems if file extention is invalid!")
file_ext = input("Enter new file extention: ")
for count, filename in enumerate(os.listdir(folder)):
dst = f"{name} {str(count+1)}.{file_ext}"
src = f"{folder}/{filename}"
dst = f"{folder}/{dst}"
os.rename(src, dst) | python |
import sys
import unittest
from unittest import mock
from unittest.mock import MagicMock, Mock
sys.modules['w1thermsensor'] = MagicMock()
from sensors.ground_temperature_sensor import GroundTemperatureSensor
class TestGroundTemperatureSensor(unittest.TestCase):
@mock.patch('sensors.ground_temperature_sensor.Sensor.__init__')
@mock.patch('sensors.ground_temperature_sensor.logging')
@mock.patch('sensors.ground_temperature_sensor.W1ThermSensor')
def setUp(self, mock_sensor, mock_logging, mock_super):
test_id = 'test_id'
test_type = 'test_type'
mock_sensor.return_value.id = test_id
mock_sensor.return_value.type_name = test_type
self.test_sensor = GroundTemperatureSensor()
self.assertIsNotNone(self.test_sensor)
mock_sensor.assert_called_once()
mock_logging.debug.assert_called_once_with(msg=f'[{GroundTemperatureSensor.__name__}] Started W1ThermSensor with id "{test_id}".')
mock_super.assert_called_once_with()
def test_when_getting_readings_expected_method_should_be_called(self):
# arrange
test_temperature = 45
mock_sensor = Mock()
mock_sensor.get_temperature.return_value = test_temperature
self.test_sensor.sensor = mock_sensor
# act
self.assertEqual(self.test_sensor.get_reading(), [test_temperature])
# assert
mock_sensor.get_temperature.assert_called_once()
if __name__ == '__main__':
unittest.main()
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/11/23 下午3:09
from api.channel import channel
from util.data_util import data_pool
from util.faker_util import fakerist
# 创建报名活动并开启
def add_channel_random():
kwargs = data_pool.supply('channel.yml', 'add_channel')[0]
fake = "Asctrio" + fakerist.month_name()
kwargs['name'] = fake
kwargs['code'] = fake
res1 = channel.add_channel(**kwargs)
return fake
if __name__ == '__main__':
pass
| python |
import hashlib
from Crypto.Cipher import AES
class Crypto:
SALT = "@uhooinc.com"
def __init__(self, clientCode):
self.key = hashlib.md5(
clientCode.encode("utf-8")
).digest() # initialization key
self.length = AES.block_size # Initialize the block size
self.aes = AES.new(
self.key, AES.MODE_ECB
) # Initialize AES, an instance of ECB mode
# Truncate function to remove padded characters
self.unpad = lambda date: date[0 : -ord(date[-1])]
def pad(self, text):
"""
Fill the function so that the bytecode length of the encrypted data is an integer multiple of block_size
"""
text = str(text, encoding="utf-8")
count = len(text)
add = self.length - (count % self.length)
entext = text + (chr(add) * add)
return bytes(entext, encoding="utf-8")
def encrypt(self, uid, password):
passwordSalted = uid + password + Crypto.SALT
passwordHashed = (
hashlib.sha256(passwordSalted.encode("utf-8")).hexdigest().encode("utf-8")
)
res = self.aes.encrypt(self.pad(passwordHashed))
return res
def decrypt(self, decrData):
res = decrData
msg = self.aes.decrypt(res).decode("utf8")
return self.unpad(msg)
| python |
#!/usr/bin/env python3
import shlex
import shutil
import pwncat
from pwncat.modules import Bool, List, Status, Argument, BaseModule, ModuleFailed
from pwncat.platform.windows import Windows, PowershellError
class Module(BaseModule):
"""
Load the Invoke-BloodHound cmdlet and execute it. Automatically download the
resulting zip file to a defined location and remove it from the target.
"""
PLATFORM = [Windows]
ARGUMENTS = {
"CollectionMethod": Argument(
List(str),
default=None,
help="Specifies the collection method(s) to be used.",
),
"Stealth": Argument(
Bool,
default=None,
help="Use the stealth collection options (default: false)",
),
"Domain": Argument(
str,
default=None,
help="Specifies the domain to enumerate (default: current)",
),
"WindowsOnly": Argument(
Bool,
default=None,
help="Limits computer collection to systems that have an operatingsystem attribute that matches *Windows",
),
"ZipFilename": Argument(
str, help="Name for the zip file output by data collection"
),
"NoSaveCache": Argument(
Bool,
default=None,
help="Don't write the cache file to disk. Caching will still be performed in memory.",
),
"EncryptZip": Argument(
Bool, default=None, help="Encrypt the zip file with a random password"
),
"InvalidateCache": Argument(
Bool, default=None, help="Invalidate and rebuild the cache file"
),
"SearchBase": Argument(
str,
default=None,
help="DistinguishedName at which to start LDAP searches. Equivalent to the old -Ou option",
),
"LdapFilter": Argument(
str,
default=None,
help="Append this ldap filter to the search filter to further filter the results enumerated",
),
"DomainController": Argument(
str,
default=None,
help="Domain controller to which to connect. Specifying this can result in data loss",
),
"LdapPort": Argument(
int,
default=None,
help="Port LDAP is running on (default: 389/686 for LDAPS)",
),
"SecureLDAP": Argument(
Bool,
default=None,
help="Connect to LDAPS (LDAP SSL) instead of regular LDAP",
),
"DisableKerberosSigning": Argument(
Bool,
default=None,
help="Disables kerberos signing/sealing, making LDAP traffic viewable",
),
"LdapUsername": Argument(
str,
default=None,
help="Username for connecting to LDAP. Use this if you're using a non-domain account for connecting to computers",
),
"LdapPassword": Argument(
str, default=None, help="Password for connecting to LDAP"
),
"SkipPortScan": Argument(
Bool, default=None, help="Skip SMB port checks when connecting to computers"
),
"PortScanTimeout": Argument(
int, default=None, help="Timeout for SMB port checks"
),
"ExcludeDomainControllers": Argument(
Bool,
default=None,
help="Exclude domain controllers from enumeration (useful to avoid Microsoft ATP/ATA)",
),
"Throttle": Argument(
int, default=None, help="Throttle requests to computers (in milliseconds)"
),
"Jitter": Argument(int, default=None, help="Add jitter to throttle"),
"OverrideUserName": Argument(
str, default=None, help="Override username to filter for NetSessionEnum"
),
"NoRegistryLoggedOn": Argument(
Bool,
default=None,
help="Disable remote registry check in LoggedOn collection",
),
"DumpComputerStatus": Argument(
Bool,
default=None,
help="Dumps error codes from attempts to connect to computers",
),
"RealDNSName": Argument(
str, default=None, help="Overrides the DNS name used for API calls"
),
"CollectAllProperties": Argument(
Bool, default=None, help="Collect all string LDAP properties on objects"
),
"StatusInterval": Argument(
int, default=None, help="Interval for displaying status in milliseconds"
),
"Loop": Argument(
Bool, default=None, help="Perform looping for computer collection"
),
"LoopDuration": Argument(
str, default=None, help="Duration to perform looping (default: 02:00:00)"
),
"LoopInterval": Argument(
str,
default=None,
help="Interval to sleep between loops (default: 00:05:00)",
),
}
SHARPHOUND_URL = "https://raw.githubusercontent.com/BloodHoundAD/BloodHound/master/Collectors/SharpHound.ps1"
def run(self, session: "pwncat.manager.Session", **kwargs):
# First, we need to load BloodHound
try:
yield Status("importing Invoke-BloodHound cmdlet")
session.run("manage.powershell.import", path=self.SHARPHOUND_URL)
except (ModuleFailed, PowershellError) as exc:
raise ModuleFailed(f"while importing Invoke-BloodHound: {exc}")
# Try to create a temporary file. We're just going to delete it, but
# this gives us a tangible temporary path to put the zip file.
yield Status("locating a suitable temporary file location")
with session.platform.tempfile(suffix="zip", mode="w") as filp:
file_path = filp.name
path = session.platform.Path(file_path)
path.unlink()
# Note the local path to the downloaded zip file and set it to our temp
# file path we just created/deleted.
output_path = kwargs["ZipFilename"]
kwargs["ZipFilename"] = path.parts[-1]
kwargs["OutputDirectory"] = str(path.parent)
# Build the arguments
bloodhound_args = {k: v for k, v in kwargs.items() if v is not None}
argument_list = ["Invoke-BloodHound"]
for k, v in bloodhound_args.items():
if isinstance(v, bool) and v:
argument_list.append(f"-{k}")
elif not isinstance(v, bool):
argument_list.append(f"-{k}")
argument_list.append(str(v))
powershell_command = shlex.join(argument_list)
# Execute BloodHound
try:
yield Status("executing bloodhound collector")
session.platform.powershell(powershell_command)
except (ModuleFailed, PowershellError) as exc:
raise ModuleFailed(f"Invoke-BloodHound: {exc}")
output_name = path.parts[-1]
path_list = list(path.parent.glob(f"**_{output_name}"))
if not path_list:
raise ModuleFailed("unable to find bloodhound output")
# There should only be one result
path = path_list[0]
# Download the contents of the zip file
try:
yield Status(f"downloading results to {output_path}")
with open(output_path, "wb") as dst:
with path.open("rb") as src:
shutil.copyfileobj(src, dst)
except (FileNotFoundError, PermissionError) as exc:
if output_path in str(exc):
try:
path.unlink()
except FileNotFoundError:
pass
raise ModuleFailed(f"permission error: {output_path}") from exc
raise ModuleFailed("bloodhound failed or access to output was denied")
# Delete the zip from the target
yield Status("deleting collected results from target")
path.unlink()
| python |
import Cptool.config
from Cptool.gaMavlink import GaMavlink
if __name__ == '__main__':
GaMavlink.extract_from_log_path(f"./log/{Cptool.config.MODE}")
| python |
from __future__ import print_function
import os, sys
import numpy as np
np.random.seed(1234) # for reproducibility?
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import lasagne
os.environ["THEANO_FLAGS"] = "cuda.root=/usr/local/cuda,device=gpu,floatX=float32"
# specifying the gpu to use
import theano.sandbox.cuda
theano.sandbox.cuda.use('gpu1')
import theano
import theano.tensor as T
# from http://blog.christianperone.com/2015/08/convolutional-neural-networks-and-feature-extraction-with-python/
# import matplotlib
# import matplotlib.pyplot as plt
# import matplotlib.cm as cm
import numpy as np
import logging
import formatting
logger_lip = logging.getLogger('lipreading')
logger_lip.setLevel(logging.DEBUG)
FORMAT = '[$BOLD%(filename)s$RESET:%(lineno)d][%(levelname)-5s]: %(message)s '
formatter = logging.Formatter(formatting.formatter_message(FORMAT, False))
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger_lip.addHandler(ch)
# User - created files
import train_lipreading # load training functions
import buildNetworks
import preprocessLipreading
import general_tools
import lasagne.layers as L
import lasagne.objectives as LO
batch_sizes = [32]
networks = ["resnet50"]
justTest = True
viseme = False
def main():
for batch_size, network_type in zip(batch_sizes, networks):
print(batch_size, network_type)
# BN parameters
# batch_size = 100
logger_lip.info("batch_size = %s",batch_size)
# alpha is the exponential moving average factor
alpha = .1
logger_lip.info("alpha = %s",alpha)
epsilon = 1e-4
logger_lip.info("epsilon = %s",epsilon)
# activation
activation = T.nnet.relu
logger_lip.info("activation = T.nnet.relu")
# Training parameters
num_epochs = 20
logger_lip.info("num_epochs = %s", num_epochs)
# Decaying LR
LR_start = 0.001
logger_lip.info("LR_start = %s", LR_start)
LR_fin = 0.0000003
logger_lip.info("LR_fin = %s",LR_fin)
#LR_decay = (LR_fin / LR_start) ** (1. / num_epochs)
LR_decay = 0.5 # sqrt(0.5)
logger_lip.info("LR_decay = %s",LR_decay)
# BTW, LR decay might good for the BN moving average...
shuffle_parts = 1
logger_lip.info("shuffle_parts = %s",shuffle_parts)
oneHot = False
##############################################
if viseme: nbClasses = 12
else: nbClasses = 39
# get the database
# If it's small (lipspeakers) -> generate X_train, y_train etc here
# otherwise we need to load and generate each speaker seperately in the training loop
dataset = "TCDTIMIT"
root_dir = os.path.join(os.path.expanduser('~/TCDTIMIT/lipreading/'+dataset))
results_dir = root_dir + "/results/CNN";
if not os.path.exists(results_dir): os.makedirs(results_dir)
if viseme: database_binaryDir = root_dir + '/binaryViseme'
else: database_binaryDir = root_dir + '/binary'
datasetType = "lipspeakers" #"volunteers" # lipspeakers or volunteers"
##############################################
if datasetType == "lipspeakers":
loadPerSpeaker = False # only lipspeakers small enough to fit in CPU RAM, generate X_train etc here
storeProcessed = True
processedDir = database_binaryDir + "_allLipspeakersProcessed"
# pkl_path = processedDir + os.sep + datasetType + ".pkl"
# if not os.path.exists(pkl_path):
# logger_lip.info("dataset not yet processed. Processing...")
# preprocessLipreading.prepLip_all(data_path=database_binaryDir, store_path=pkl_path, trainFraction=0.7, validFraction=0.1,
# testFraction=0.2,
# nbClasses=nbClasses, onehot=oneHot, type=datasetType, verbose=True)
#datasetFiles = general_tools.unpickle(pkl_path)
# if this doesn't succeed, you probably have to generate the files with datasetToPkl_fromCombined.py
X_train, y_train = unpickle(os.path.expanduser("~/TCDTIMIT/lipreading/TCDTIMIT/binary/allLipspeakersTrain.pkl"))
X_val, y_val = unpickle(os.path.expanduser("~/TCDTIMIT/lipreading/TCDTIMIT/binary/allLipspeakersVal.pkl"))
X_test, y_test = unpickle(os.path.expanduser("~/TCDTIMIT/lipreading/TCDTIMIT/binary/allLipspeakersTest.pkl"))
datasetFiles = [X_train, y_train, X_val, y_val, X_test, y_test]
else: # we need to load and preprocess each speaker before we evaluate, because dataset is too large and doesn't fit in CPU RAM
loadPerSpeaker = True
storeProcessed = True #if you have about 10GB hdd space, you can increase the speed by not reprocessing it each iteration
processedDir = database_binaryDir + "_finalProcessed"
# you can just run this program and it will generate the files the first time it encounters them, or generate them manually with datasetToPkl.py
# just get the names
testVolunteerNumbers = ["13F", "15F", "21M", "23M", "24M", "25M", "28M", "29M", "30F", "31F", "34M", "36F",
"37F", "43F", "47M", "51F", "54M"];
testVolunteers = [str(testNumber) + ".pkl" for testNumber in testVolunteerNumbers];
lipspeakers = ["Lipspkr1.pkl", "Lipspkr2.pkl", "Lipspkr3.pkl"];
allSpeakers = [f for f in os.listdir(database_binaryDir) if
os.path.isfile(os.path.join(database_binaryDir, f)) and os.path.splitext(f)[1] == ".pkl"]
trainVolunteers = [f for f in allSpeakers if not (f in testVolunteers or f in lipspeakers)];
trainVolunteers = [vol for vol in trainVolunteers if vol is not None]
if datasetType == "combined":
trainingSpeakerFiles = trainVolunteers + lipspeakers
testSpeakerFiles = testVolunteers
elif datasetType == "volunteers":
trainingSpeakerFiles = trainVolunteers
testSpeakerFiles = testVolunteers
else:
raise Exception("invalid dataset entered")
datasetFiles = [trainingSpeakerFiles, testSpeakerFiles]
model_name = datasetType + "_" + network_type + "_" + ("viseme" if viseme else "phoneme")+str(nbClasses)
model_save_name = os.path.join(results_dir,model_name)
# log file
logFile = results_dir + os.sep + model_name + '.log'
# if os.path.exists(logFile):
# fh = logging.FileHandler(logFileT) # append to existing log
# else:
fh = logging.FileHandler(logFile, 'w') # create new logFile
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger_lip.addHandler(fh)
logger_lip.info('Building the CNN...')
# Prepare Theano variables for inputs and targets
inputs = T.tensor4('inputs')
if oneHot: targets = T.matrix('targets')
else: targets = T.ivector('targets')
LR = T.scalar('LR', dtype=theano.config.floatX)
# get the network structure
if network_type == "google":
cnnDict, l_out = buildNetworks.build_network_google(activation, alpha, epsilon, inputs, nbClasses) # 7.176.231 params
elif network_type == "cifar10":
cnn, l_out = buildNetworks.build_network_cifar10(input=inputs, nbClasses=nbClasses, activation=activation, alpha=alpha, epsilon=epsilon)
elif network_type == "cifar10_v2":
cnn, l_out = buildNetworks.build_network_cifar10_v2(input=inputs,nbClasses=nbClasses)
elif network_type == "resnet50":
cnn, l_out = buildNetworks.build_network_resnet50(inputs, nbClasses)
# print het amount of network parameters
logger_lip.info("Using the %s network", network_type)
logger_lip.info("The number of parameters of this network: %s", L.count_params(l_out))
logger_lip.info("loading %s", model_save_name + '.npz')
load_model(model_save_name +'.npz', l_out)
# a = '/home/matthijs/TCDTIMIT/lipreading/TCDTIMIT/results/thirty.npz'
# logger_lip.info("loading %s", a)
# load_model(a, l_out)
logger_lip.info("* COMPILING FUNCTIONS...")
# for validation: disable dropout etc layers -> deterministic
test_network_output = L.get_output(l_out, deterministic=True)
test_acc = T.mean(T.eq(T.argmax(test_network_output, axis=1), targets),
dtype=theano.config.floatX) # T.zeros((1,))
test_loss = LO.categorical_crossentropy(test_network_output, targets);
test_loss = test_loss.mean()
# Top k accuracy
k = 3
# topk_acc = T.mean( T.any(T.eq(T.argsort(test_network_output, axis=1)[:, -k:], targets.dimshuffle(0, 'x')), axis=1),
# dtype=theano.config.floatX)
topk_acc = T.mean(lasagne.objectives.categorical_accuracy(test_network_output, targets.flatten(), top_k=k))
topk_acc_fn = theano.function([inputs, targets], topk_acc)
val_fn = theano.function([inputs, targets], [test_loss, test_acc, topk_acc])
# For training, use nondeterministic output
network_output = L.get_output(l_out, deterministic=False)
out_fn = theano.function([inputs], network_output)
# cross-entropy loss
loss = LO.categorical_crossentropy(network_output, targets);
loss = loss.mean()
# # Also add weight decay to the cost function
weight_decay = 1e-5
weightsl2 = lasagne.regularization.regularize_network_params(l_out, lasagne.regularization.l2)
loss += weight_decay * weightsl2
# acc
err = T.mean(T.eq(T.argmax(network_output, axis=1), targets), dtype=theano.config.floatX)
# set all params to trainable
params = L.get_all_params(l_out, trainable=True)
updates = lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR)
# Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
# and returning the corresponding training loss:
train_fn = theano.function([inputs, targets, LR], loss, updates=updates)
logger_lip.info('Training...')
train_lipreading.train(
train_fn=train_fn, val_fn=val_fn, out_fn=out_fn, topk_acc_fn = topk_acc_fn, k=k,
network_output_layer=l_out,
batch_size=batch_size,
LR_start=LR_start, LR_decay=LR_decay,
num_epochs=num_epochs,
dataset=datasetFiles,
database_binaryDir=database_binaryDir,
storeProcessed=storeProcessed,
processedDir=processedDir,
loadPerSpeaker=loadPerSpeaker, justTest =justTest,
save_name=model_save_name,
shuffleEnabled=True)
def unpickle(file):
import cPickle
fo = open(file, 'rb')
a = cPickle.load(fo)
fo.close()
return a
def load_model(model_path, network_output_layer, logger=logger_lip):
try:
logger.info("Loading stored model...")
# restore network weights
with np.load(model_path) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
try:
lasagne.layers.set_all_param_values(network_output_layer, param_values)
# print(len(param_values));
# for layer in lasagne.layers.get_all_layers(network_output_layer):
# print(layer)
#import pdb; pdb.set_trace();
except:
if roundParams: lasagne.layers.set_all_param_values(network_output_layer, round_params(*param_values))
else: lasagne.layers.set_all_param_values(network_output_layer, *param_values)
logger.info("Loading parameters successful.")
return 0
except IOError as e:
logger.info("%s", os.strerror(e.errno))
logger.info('Model: %s not found. No weights loaded', model_path)
return -1
def round_params(param_values):
print("ROUND_PARAMS")
for i in range(len(param_values)):
param_values[i] = param_values[i].astype(np.float16)
param_values[i] = param_values[i].astype(np.float32)
return param_values
if __name__ == "__main__":
main()
| python |
# -* encoding: utf-8 *-
import logging
from collections import OrderedDict
from typing import Tuple, Dict, Optional
from django.contrib.auth import hashers
from django.core.exceptions import ValidationError
from django.http import HttpRequest
from django.utils.translation import ugettext_lazy as _
from typing import Union
from mailauth.models import Domain, EmailAlias, MNUser, MNServiceUser
# noinspection PyUnresolvedReferences
from passlib.hash import sha256_crypt
_log = logging.getLogger(__name__)
class UnixCryptCompatibleSHA256Hasher(object):
"""
This uses passlib to implement a Django password hasher that encodes passwords using
the Debian mkpasswd supported "lowest common denominator but still secure" password
storage algorithm SHA256_crypt. **Unlike** Django's hashers, however, this hasher
stores the password string in modular crypt format, this way making the database
entry compatible with other tools reading directly from the database.
"""
# double the default
rounds = 1070000 # type: int
# algorithm must be non-empty for hackish compatibility with django.contrib.auth.hashers so
# identify_hasher can find us
algorithm = "sha256_passlib" # type: str
def _split_encoded(self, encoded: str) -> Tuple[int, str, str]:
_, five, rounds, salt, hash = encoded.split("$")
if five != "5":
raise ValueError("Not a SHA256 crypt hash %s" % encoded)
if not rounds.startswith("rounds="):
raise ValueError("Rounds parameter not found or garbled %s" % encoded)
roundcount = int(rounds[len("rounds="):])
return roundcount, salt, hash
def salt(self) -> str:
"""
Generates a cryptographically secure nonce salt in ASCII
"""
return hashers.get_random_string()
def verify(self, password: str, encoded: str) -> bool:
"""
Checks if the given password is correct
"""
# we get passed the value modified by the password getter in MNUser, so we need to remove
# the fake algorithm identification string
if encoded.startswith(self.algorithm):
encoded = encoded[len(self.algorithm):]
return sha256_crypt.verify(password, encoded)
def encode(self, password: str, salt: str) -> str:
"""
Creates an encoded database value
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
return sha256_crypt.encrypt(password, salt=salt, rounds=UnixCryptCompatibleSHA256Hasher.rounds)
def safe_summary(self, encoded: str) -> Dict[str, str]:
"""
Returns a summary of safe values
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
roundcount, salt, hash = self._split_encoded(encoded)
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('iterations'), str(roundcount)),
(_('salt'), hashers.mask_hash(salt)),
(_('hash'), hashers.mask_hash(hash)),
])
def must_update(self, encoded: str) -> bool:
return False
def harden_runtime(self, password: str, encoded: str) -> None:
"""
Bridge the runtime gap between the work factor supplied in `encoded`
and the work factor suggested by this hasher.
Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and
`self.iterations` is 30000, this method should run password through
another 10000 iterations of PBKDF2. Similar approaches should exist
for any hasher that has a work factor. If not, this method should be
defined as a no-op to silence the warning.
"""
roundcount, salt, hash = self._split_encoded(encoded)
extra_rounds = UnixCryptCompatibleSHA256Hasher.rounds - roundcount
if extra_rounds > 0:
sha256_crypt.encrypt(password, salt=salt, rounds=extra_rounds)
class MNUserAuthenticationBackend(object):
def authenticate(self, request: HttpRequest, username: str=None, password: str=None) -> Optional[MNUser]:
# the argument names must be 'username' and 'password' because the authenticator interface is tightly coupled
# to the parameter names between login forms and authenticators
if username is None:
return None
tocheck_password = None # type: Optional[str]
if "@" not in username or username.count("@") > 1:
try:
service_user = MNServiceUser.objects.get(username=username)
except (MNServiceUser.DoesNotExist, ValidationError):
try:
user = MNUser.objects.get(identifier=username)
except MNUser.DoesNotExist:
_log.debug("No user found %s for identifier login", username)
return None
# if the user is a staff user, they may also log in using their identifier
if user.is_staff:
_log.debug("User %s is staff, allowing identifier login", username)
if hashers.check_password(password, user.password):
_log.debug("User %s logged in with correct password", username)
return user
else:
_log.debug("Incorrect password for user %s (%s)", username, user.password)
else:
_log.debug("Must provide an email address. %s is not an email address", username)
return None
else:
# It's a valid MNServiceUser
_log.debug("Logging in service user %s as %s", service_user.username, service_user.user.identifier)
tocheck_password = service_user.password
user = service_user.user
else:
_log.debug("logging in email alias %s", username)
mailprefix, domain = username.split("@")
if Domain.objects.filter(name=domain).count() == 0:
_log.debug("Domain %s does not exist", domain)
return None
try:
user = EmailAlias.objects.get(mailprefix__istartswith=mailprefix, domain__name=domain).user
except EmailAlias.DoesNotExist:
return None
else:
tocheck_password = user.password
if hashers.check_password(password, tocheck_password):
return user
else:
return None
def get_user(self, user_id: str) -> Optional[MNUser]:
try:
return MNUser.objects.get(uuid=user_id)
except MNUser.DoesNotExist:
return None
| python |
import torch
import numpy as np
import random
import torch.utils.data as data
import sys
sys.path.append("../../../")
"""
Dataset class for creating the shuffling dataset.
"""
class SetShufflingDataset(data.Dataset):
def __init__(self, set_size, train=True, val=False, test=False, **kwargs):
self.set_size = set_size
self.num_classes = set_size
self.shuffle_set = None
if val or test:
np.random.seed(123 if val else 101)
num_shuffles = 32768
self.shuffle_set = np.stack([self._generate_shuffle() for _ in range(num_shuffles)])
def __len__(self):
return int(1e8) if self.shuffle_set is None else self.shuffle_set.shape[0]
def __getitem__(self, idx):
if self.shuffle_set is None:
return self._generate_shuffle()
else:
return self.shuffle_set[idx]
def _generate_shuffle(self):
# For permutation-invariant models, shuffling the elements does not make a difference
# We apply it here for safety
return np.random.permutation(self.set_size)
@staticmethod
def get_vocab_size(set_size):
return set_size
def calc_optimum(seq_len):
# The optimal distribution can be expressed as an autoregressive:
# Given first N numbers, the next one can be one out of seq_len-N with a uniform distribution
# => log2(seq_len-N)
class_bpd = sum([np.log2(i) for i in range(1,seq_len+1)])/seq_len
return class_bpd
def calc_random(seq_len):
return np.log2(seq_len)
if __name__ == '__main__':
for seq_len in [2, 3, 4, 8, 16, 32, 64, 128]:
print("Optimum for sequence length %i: %5.4f vs %5.4f (random)" % ( seq_len, calc_optimum(seq_len), calc_random(seq_len) ) ) | python |
from flask_restful import Resource, reqparse
from models import hotel
from models.hotel import HotelModel
hoteis = [
{
'hotel_id': 'alpha',
'nome': 'Alpha Hotel',
'estrelas': 4.3,
'diaria': 420.34,
'cidade': 'Rio de Janeiro'
},
{
'hotel_id': 'bravo',
'nome': 'Bravo Hotel',
'estrelas': 4.4,
'diaria': 380.90,
'cidade': 'Santa Catarina'
},
{
'hotel_id': 'charlie',
'nome': 'Charlie Hotel',
'estrelas': 3.9,
'diaria': 320.20,
'cidade': 'Santa Catarina'
}
]
class Hoteis(Resource):
def get(self):
return {'result': hoteis}, 200
class Hotel(Resource):
arguments = reqparse.RequestParser()
arguments.add_argument('nome')
arguments.add_argument('estrelas')
arguments.add_argument('diaria')
arguments.add_argument('cidade')
def search(hotel_id):
for hotel in hoteis:
if hotel['hotel_id'] == hotel_id:
return hotel
return None
def get(self, hotel_id):
hotel = Hotel.search(hotel_id)
if hotel is not None:
return {'result': hotel}
return {'result': 'hotel not found.'}, 404
def post(self, hotel_id):
request = Hotel.arguments.parse_args()
hotel = Hotel.search(hotel_id)
if hotel is None:
hotel_object = HotelModel(hotel_id, **request)
new_hotel = hotel_object.json()
hoteis.append(new_hotel)
return {'result': 'hotel created'}, 201
return {'result': 'hotel_id already exists'}, 404
def put(self, hotel_id):
request = Hotel.arguments.parse_args()
hotel = Hotel.search(hotel_id)
if hotel is not None:
hotel_object = HotelModel(hotel_id, **request)
new_hotel = hotel_object.json()
hotel.update(new_hotel)
return {'result': 'updated hotel'}, 200
return {'result': 'hotel_id does not exist'}, 404
def delete(self, hotel_id):
hotel = Hotel.search(hotel_id)
if hotel is not None:
global hoteis
hoteis = [hotel for hotel in hoteis if hotel['hotel_id'] != hotel_id]
return{'result': 'deleted hotel'}, 200
return {'result': 'hotel_id does not exist'}, 404
| python |
#!/usr/bin/env python3
# coding=utf-8
"""
Benchmark helper for triggers. Each benchmark is linked to a trigger class from lib.trigger
"""
from abc import abstractmethod, ABCMeta
from contextlib import suppress
import logging
import multiprocessing
import os
import subprocess
import timeit
import time
from lib.helper import launch_and_log, show_progress
from lib.parsers.configuration import get_global_conf
__author__ = "Benjamin Schubert, benjamin.schubert@epfl.ch"
class RawBenchmark(metaclass=ABCMeta):
"""
The base benchmarking class. Defines the bare minimum to run the benchmarks
"""
def __init__(self, trigger):
self.trigger = trigger
@abstractmethod
def run(self, *args, **kwargs) -> int:
"""
Called to run the benchmark
:param args: additional arguments
:param kwargs: additional keyword arguments
:return: 0|1|None on success|failure|unexpected ending
"""
pass
# noinspection PyMethodMayBeStatic
def pre_benchmark_run(self) -> None:
"""
Is called before the benchmark is run in order to setup things if needed (changing command line, etc)
"""
pass
@property
def expected_results(self) -> int:
""" The number of positive results awaited """
return get_global_conf().getint("benchmark", "wanted_results")
@property
def maximum_tries(self) -> int:
""" The maximum number of tries to do before declaring a failure """
return get_global_conf().getint("benchmark", "maximum_tries")
@property
def kept_runs(self) -> int:
""" The total number of run kept """
return get_global_conf().getint("benchmark", "kept_runs")
class BaseBenchmark(RawBenchmark):
"""
Basic benchmarking class for program that require nothing external to trigger
"""
def benchmark_helper(self) -> None:
"""
Launches the trigger command
:raise subprocess.CalledProcessError
"""
subprocess.check_call(self.trigger.cmd.split(" "), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
def run(self, *args, **kwargs) -> int:
"""
Benchmarks the execution 20 times and stores the last 10 results (to avoid side effects) in self.trigger.result.
Runs at most 100 times before deciding the run is a failure.
:param args: additional arguments
:param kwargs: additional keyword arguments
:return: 0|1 on success|failure
"""
logging.verbose(self.trigger.cmd)
results = []
tries = 0
while len(results) < self.expected_results and tries < self.maximum_tries:
try:
results += timeit.repeat(self.benchmark_helper, repeat=1, number=1)
except subprocess.CalledProcessError:
logging.warning("A trigger failed, retrying one more time")
tries += 1
show_progress(len(results), self.expected_results, section="trigger")
if tries >= 100:
# We failed in 100 iterations
return 1
logging.verbose("Run times : %(time)s secs", dict(time=results))
self.trigger.returned_information = results[self.expected_results - self.kept_runs:]
return 0
class BenchmarkWithHelper(RawBenchmark):
"""
Benchmarking class for program with a client-server scheme
"""
def __init__(self, trigger) -> None:
super().__init__(trigger)
self.triggers = []
def client_run(self) -> None:
"""
Launches all client threads and waits for them to finish
:trigger lib.trigger.RawTrigger
"""
for thread in self.triggers:
thread.start()
for thread in self.triggers:
thread.join()
def run(self, *args, **kwargs) -> int:
"""
Benchmarks the execution time of 20 runs and stores the last 10 results (to avoid side effects) in
self.trigger.result.
Runs at most 100 times before deciding the run is a failure.
:param args: additional arguments
:param kwargs: additional keyword arguments
:return: 0|1 on success|failure
"""
results = []
tries = 0
while len(results) < self.expected_results and tries < self.maximum_tries:
tries += 1
try:
proc_start = self.trigger.Server(self.trigger.cmd)
proc_start.start()
time.sleep(self.trigger.delay)
results_queue = multiprocessing.Queue() # pylint: disable=no-member
self.triggers = []
for command in self.trigger.helper_commands:
self.triggers.append(
self.trigger.helper(command, results=results_queue, **self.trigger.named_helper_args)
)
result = timeit.repeat(self.client_run, number=1, repeat=1)
finally:
with suppress(subprocess.CalledProcessError):
launch_and_log(self.trigger.stop_cmd.split(" "))
for thread in self.triggers:
thread.terminate()
values = []
for _ in self.triggers:
values.append(results_queue.get_nowait())
if self.trigger.check_success(values) != 0:
logging.warning("Trigger did not work, retrying")
continue
results += result
show_progress(len(results), self.expected_results, section="trigger")
time.sleep(2)
if tries >= 100:
return 1
logging.verbose("Run times : {} secs".format(results))
self.trigger.returned_information = results[self.expected_results - self.kept_runs:]
return 0
class ApacheBenchmark(RawBenchmark):
"""
Benchmarking class specific to Apache, using apache-bench utility
"""
def run(self, *args, run_number: int=0, **kwargs) -> int:
"""
Benchmarks the number of requests per second an apache server can handle
Runs at most 100 times before deciding the run is a failure
:param args: additional arguments
:param run_number: the number of time the benchmark has run
:param kwargs: additional keyword arguments
:return: 0|1|None on success|failure|unexpected result
"""
proc_start = self.trigger.Server(self.trigger.cmd)
proc_start.start()
time.sleep(self.trigger.delay)
cmd = "ab -n 30000 -c 1 {}".format(self.trigger.benchmark_url).split(" ")
logging.verbose(cmd)
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, **kwargs)
except subprocess.CalledProcessError as exc:
for line in exc.output.decode().split("\n"):
logging.debug(line)
return self.retry(*args, run_number=run_number, **kwargs)
else:
success = self.trigger.check_success()
if success:
return self.retry(*args, run_number=run_number, **kwargs)
self.trigger.result = []
for line in output.decode().split("\n"):
if line.startswith("Requests per second:"):
self.trigger.returned_information = [float(line.split(":")[1].strip().split(" ")[0])]
with suppress(subprocess.CalledProcessError):
launch_and_log(self.trigger.stop_cmd.split(" "))
if len(self.trigger.returned_information) == 0:
return self.retry(*args, run_number=run_number, **kwargs)
logging.verbose("Requests per second : {}".format(self.trigger.returned_information[0]))
return success
def retry(self, *args, run_number: int=0, **kwargs) -> int:
"""
Updates the number of time the program has run and relaunches it
:param args: additional arguments
:param run_number: the number of time the benchmark has run
:param kwargs: additional keyword arguments
:return: 0|1|None on success|failure|unexpected result
"""
with suppress(subprocess.CalledProcessError):
launch_and_log(self.trigger.stop_cmd.split(" "))
with suppress(FileNotFoundError), \
open(os.path.join(self.trigger.conf.getdir("install", "install_directory"))) as httpd_pid:
pid = int(httpd_pid.read())
launch_and_log(["kill", str(pid)])
run_number += 1
if run_number > self.maximum_tries:
return 1
logging.warning("An error occurred while launching apache, retrying")
self.trigger.clean_logs()
return self.run(*args, run_number=run_number, **kwargs)
| python |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains overloads to convert TF to equivalent NumPy code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from pyctr.overloads import py_defaults
from pyctr.overloads import staging
import tensorflow as tf
import torch
init = py_defaults.init
assign = py_defaults.assign
if_stmt = py_defaults.if_stmt
for_stmt = py_defaults.for_stmt
while_stmt = py_defaults.while_stmt
def read(var):
assert isinstance(var, py_defaults.Variable)
if tf.is_tensor(var.val):
return var.val.numpy()
return py_defaults.read(var)
call = staging.RewritingCallOverload(py_defaults.call)
@call.replaces(tf.transpose)
def transpose(x, axes):
return np.transpose(x, axes)
@call.replaces(tf.reduce_max)
def amax(x):
return np.amax(x)
@call.replaces(tf.concat)
def concat(inputs, axis):
return np.concatenate(inputs, axis)
@call.replaces(tf.tanh)
def tanh(x):
return np.tanh(x)
@call.replaces(tf.linalg.matmul)
def matmul(x, y):
return np.dot(x, y)
| python |
def convert(pth_path, wts_path, device_type='cuda'):
import struct
import torch
from viclassifier.utils import dev_opt
device = dev_opt.usingDevice(device_type)
model = torch.load(pth_path, map_location=device)
model.to(device)
# 测试时不启用 BatchNormalization 和 Dropout
model.eval()
# print('model: ', model)
# print('state dict: ', model.state_dict().keys())
# # 生成数据测试
# tmp = torch.ones(1, 3, 224, 224).to(device)
# print('input: ', tmp)
# out = model(tmp)
# print('output:', out)
f = open(wts_path, 'w')
f.write("{}\n".format(len(model.state_dict().keys())))
for k, v in model.state_dict().items():
# print('key: ', k)
# print('value: ', v.shape)
vr = v.reshape(-1).cpu().numpy() #在CPU上执行
f.write("{} {}".format(k, len(vr)))
print("{} {}".format(k, len(vr)))
for vv in vr:
f.write(" ")
# print(" ")
f.write(struct.pack(">f", float(vv)).hex())
# print(struct.pack(">f", float(vv)).hex())
f.write("\n")
# print("\n")
if __name__ == "__main__":
import os, sys
viclassifier_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
print('viclassifier_dir:', viclassifier_dir)
sys.path.append(viclassifier_dir)
pth_path = r'../examples/model.pth'
wts_path = r'../examples/model.wts'
convert(pth_path, wts_path)
| python |
import sys,time,os,random,fonction,string
from pystyle import *
listbye = [""" ___ _ _
| _ )_ _ ___ ______ ___ _ _ ___ _ _ | |__ _| |_ ___ _ _
| _ \ || / -_)_ (_-< -_) -_) | || / _ \ || | | / _` | _/ -_) '_|
|___/\_, \___( ) /__|___\___| \_, \___/\_,_| |_\__,_|\__\___|_|(_)
|__/ |/ |__/ \n\n\nPRESS ENTER""", """ ___ _ _ ___ _ _ _
/ __|___ ___ __| | |__ _ _ ___ |_ _| | |_ ___ _ __ ___ | |_ ___ ______ ___ _ _ ___ _ _ __ _ __ _ __ _(_)_ _
| (_ / _ \/ _ \/ _` | '_ \ || / -_) | | | ' \/ _ \ '_ \/ -_) | _/ _ \ (_-< -_) -_) | || / _ \ || | / _` / _` / _` | | ' \ _
\___\___/\___/\__,_|_.__/\_, \___| |___| |_||_\___/ .__/\___| \__\___/ /__|___\___| \_, \___/\_,_| \__,_\__, \__,_|_|_||_(_)
|__/ |_| |__/ |___/ \n\n\nPRESS ENTER""", """ ___ _ _ _ _ _ _
/ __|___ ___ __| | __| |__ _ _ _ | |_ ___ _ _ ___ _ _ __ ___ _ __ ___ | |__ __ _ __| |__ | |_ ___ ______ ___ _ _ ___
| (_ / _ \/ _ \/ _` | / _` / _` | || | | _/ _ \ | || / _ \ || | / _/ _ \ ' \/ -_) | '_ \/ _` / _| / / | _/ _ \ (_-< -_) -_) | || (_-<_
\___\___/\___/\__,_| \__,_\__,_|\_, | \__\___/ \_, \___/\_,_| \__\___/_|_|_\___| |_.__/\__,_\__|_\_\ \__\___/ /__|___\___| \_,_/__(_)
|__/ |__/ \n\n\nPRESS ENTER"""]
def Generate_Msg():
RanMsg = random.randint(0, 2)
if RanMsg == 0:
fonction.typewriter(listbye[0])
elif RanMsg == 1:
fonction.typewriter(listbye[1])
else:
fonction.typewriter(listbye[2])
time.sleep(1.5)
def Generate_Msg_RGB():
RanmsgRGB = random.randint(0,2)
if RanmsgRGB == 0:
Anime.Fade(Center.Center(listbye[0]), Colors.white_to_red, Colorate.Horizontal, enter=True)
elif RanmsgRGB == 1:
Anime.Fade(Center.Center(listbye[1]), Colors.blue_to_green, Colorate.Horizontal, enter=True)
else:
Anime.Fade(Center.Center(listbye[2]), Colors.blue_to_red, Colorate.Horizontal, enter=True)
def Generate_Password(num):
password = ''
for n in range(num):
x = random.randint(0, 94)
password += string.printable[x]
return Colorate.Horizontal(Colors.red_to_yellow, password)
banner1 ="""
///////////
/////////////////////
.///////////////////////////,
%/////////% //////////
////////* ////////(
///////( ////////
//////// ///////#
//////// ///////*
//////// ///////(
//////// ///////(
//////// ///////(
//////// ///////(
(/////////////////////////////////////////////////%
///////////////////////////////////////////////////////
//////////////////////////////////&//////////////////////
./////////////////////////////////@@@@@@@@////////////////
.////////////////////////////////@@@@@@@@/////////////////
.///////////////////////////////@@@@@@@@//////////////////
.//////////////////////////////@@@@@@@@///////////////////
./////////////////////////////@@@@@@@@////////////////////
.//////////////////@@@@@/////@@@@@@@@/////////////////////
.////////////////#@@@@@@@@@&@@@@@@@@//////////////////////
./////////////////#@@@@@@@@@@@@@@@@///////////////////////
./////////////////////@@@@@@@@@@@@////////////////////////
./////////////////////////@@@@@@&/////////////////////////
////////////////////////////&@%//////////////////////////
*///////////////////////////////////////////////////////
(///////////////////////////////////////////////////(
///////////////////////////////////////////(/
"""
Title = """ ██▓███ ▄▄▄ ██████ ██████ █ █░ ▒█████ ██▀███ ▓█████▄ ▄████ ▓█████ ███▄ █
▓██░ ██▒▒████▄ ▒██ ▒ ▒██ ▒ ▓█░ █ ░█░▒██▒ ██▒▓██ ▒ ██▒▒██▀ ██▌ ██▒ ▀█▒▓█ ▀ ██ ▀█ █
▓██░ ██▓▒▒██ ▀█▄ ░ ▓██▄ ░ ▓██▄ ▒█░ █ ░█ ▒██░ ██▒▓██ ░▄█ ▒░██ █▌ ▒██░▄▄▄░▒███ ▓██ ▀█ ██▒
▒██▄█▓▒ ▒░██▄▄▄▄██ ▒ ██▒ ▒ ██▒░█░ █ ░█ ▒██ ██░▒██▀▀█▄ ░▓█▄ ▌ ░▓█ ██▓▒▓█ ▄ ▓██▒ ▐▌██▒
▒██▒ ░ ░ ▓█ ▓██▒▒██████▒▒▒██████▒▒░░██▒██▓ ░ ████▓▒░░██▓ ▒██▒░▒████▓ ░▒▓███▀▒░▒████▒▒██░ ▓██░
▒▓▒░ ░ ░ ▒▒ ▓▒█░▒ ▒▓▒ ▒ ░▒ ▒▓▒ ▒ ░░ ▓░▒ ▒ ░ ▒░▒░▒░ ░ ▒▓ ░▒▓░ ▒▒▓ ▒ ░▒ ▒ ░░ ▒░ ░░ ▒░ ▒ ▒
░▒ ░ ▒ ▒▒ ░░ ░▒ ░ ░░ ░▒ ░ ░ ▒ ░ ░ ░ ▒ ▒░ ░▒ ░ ▒░ ░ ▒ ▒ ░ ░ ░ ░ ░░ ░░ ░ ▒░
░░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░ """
message = "\nHow many characters, Do you want in your password. (8 Character Minimum) \n--> "
ErrorMessage = "** 8 Character Minimum **"
ContinueMsg = "\n\n\n--Do you want to continue--\n1 - Yes\n2 - No\n"
ErrorMessageContinue = "**Invalid**"
redirectionMsg = "Redirection . . . . . . . . . . . . . . . . . . ."
retryMsg = "\n\n\n--Do you want retry--\n1 - Yes\n2 - No\n"
Anime.Fade(Center.Center(banner1), Colors.green_to_black, Colorate.Diagonal, enter=True)
while True:
print(Colorate.Diagonal(Colors.red_to_purple,Title, 1))
num = fonction.typewriter(message)
num = input()
num = int(num)
print("\n\n\n")
if num >= 8:
print(Generate_Password(num))
time.sleep(0.05)
continueQ = fonction.typewriter(ContinueMsg)
continueQ = input()
continueQ = int(continueQ)
if continueQ == 1:
os.system("cls")
elif continueQ == 2:
Generate_Msg_RGB()
break
else:
ErrorContinueQ = fonction.typewriter(ErrorMessageContinue)
print("\n")
time.sleep(1)
redirection = fonction.typewriter(redirectionMsg)
os.system("cls")
print("\n")
continueQ = fonction.typewriter(retryMsg)
continueQ = input()
continueQ = int(continueQ)
if continueQ == 1:
os.system("cls")
elif continueQ == 2:
Generate_Msg_RGB()
else:
fonction.typewriter(ErrorMessage)
print("\n")
time.sleep(1)
fonction.typewriter(redirectionMsg)
os.system("cls") | python |
from setuptools import setup
setup(
name='alpha_vantage_proxy',
version='0.0.4',
description='A plugin to interface with alphavantage api',
url='https://github.com/kburd/alpha-vantage-proxy',
author='Kaleb Burd',
author_email='kalebmburd@gmail.com',
license='MIT',
packages=['alpha_vantage_proxy'],
zip_safe=False
)
| python |
import torch
import numpy as np
from torch import nn
from torch import optim
from torch.nn import functional as F
from cl_gym.algorithms import ContinualAlgorithm
from cl_gym.algorithms.utils import flatten_grads, assign_grads
from cl_gym.algorithms.utils import flatten_weights, assign_weights
class MCSGD(ContinualAlgorithm):
"""
| Mode Connectivity SGD
| By Mirzadeh et al. :https://openreview.net/forum?id=Fmg_fQYUejf
"""
def __init__(self, backbone, benchmark, params):
super(MCSGD, self).__init__(backbone, benchmark, params, requires_memory=True)
self.w_bar_prev = None
self.w_hat_curr = None
self.num_samples_on_line = self.params.get('mcsgd_line_samples', 10)
self.alpha = self.params.get('mcsgd_alpha', 0.25)
def calculate_line_loss(self, w_start, w_end, loader):
line_samples = np.arange(0.0, 1.01, 1.0 / float(self.num_samples_on_line))
accum_grad = None
for t in line_samples:
grads = []
w_mid = w_start + (w_end - w_start) * t
m = assign_weights(self.backbone, w_mid)
clf_loss = self.calculate_point_loss(m, loader)
clf_loss.backward()
for name, param in m.named_parameters():
grads.append(param.grad.view(-1))
grads = torch.cat(grads)
if accum_grad is None:
accum_grad = grads
else:
accum_grad += grads
return accum_grad
def calculate_point_loss(self, net, loader):
criterion = self.prepare_criterion(-1)
device = self.params['device']
net.eval()
total_loss, total_count = 0.0, 0.0
for (inp, targ, task_ids) in loader:
inp, targ, task_ids = inp.to(device), targ.to(device), task_ids.to(device)
pred = net(inp, task_ids)
total_count += len(targ)
total_loss += criterion(pred, targ)
total_loss /= total_count
return total_loss
def _prepare_mode_connectivity_optimizer(self, model):
return torch.optim.SGD(model.parameters(),
lr=self.params['mcsgd_line_optim_lr'],
momentum=self.params['momentum'])
def find_connected_minima(self, task):
mc_model = assign_weights(self.backbone, self.w_bar_prev + (self.w_hat_curr - self.w_bar_prev) * self.alpha)
optimizer = self._prepare_mode_connectivity_optimizer(mc_model)
loader_prev, _ = self.benchmark.load_memory_joint(task-1, batch_size=self.params['batch_size_memory'],
num_workers=self.params.get('num_dataloader_workers', 0))
loader_curr, _ = self.benchmark.load_subset(task, batch_size=self.params['batch_size_train'],
num_workers=self.params.get('num_dataloader_workers', 0))
mc_model.train()
optimizer.zero_grad()
grads_prev = self.calculate_line_loss(self.w_bar_prev, flatten_weights(mc_model, True), loader_prev)
grads_curr = self.calculate_line_loss(self.w_hat_curr, flatten_weights(mc_model, True), loader_curr)
# mc_model = assign_grads(mc_model, (grads_prev + grads_curr)/2.0)
mc_model = assign_grads(mc_model, (grads_prev + grads_curr))
optimizer.step()
return mc_model
def training_epoch_end(self):
self.w_hat_curr = flatten_weights(self.backbone, True)
def training_task_end(self):
if self.current_task > 1:
self.backbone = self.find_connected_minima(self.current_task)
self.w_bar_prev = flatten_weights(self.backbone, True)
self.current_task += 1
def training_step(self, task_id, inp, targ, optimizer, criterion):
optimizer.zero_grad()
pred = self.backbone(inp, task_id)
loss = criterion(pred, targ)
loss.backward()
# if task_id > 1:
# self.find_connected_minima(task_id)
optimizer.step()
| python |
import json
import requests
from kivy.core.audio import SoundLoader
from secret import WATSON_USERNAME
from secret import WATSON_PASSWORD
class watson_voice():
def __init__(self, voice_record):
self.name = voice_record['name']
self.language = voice_record['language']
self.gender = voice_record['gender']
self.url = voice_record['url']
self.desc = voice_record['description']
def __str__(self):
return self.name + ' ' + self.desc
def fetch_voices():
watson_voices = []
watson_voices_api_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/voices'
r = requests.get(watson_voices_api_url,
auth=(WATSON_USERNAME, WATSON_PASSWORD))
if r.status_code == 200:
for voice_rec in r.json()['voices']:
watson_voices.append(watson_voice(voice_rec))
return watson_voices
def speak(text, voice):
watson_api_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/synthesize'
voice_arg = 'voice=' + voice
text_arg = 'text=' + text
r = requests.get(watson_api_url + '?' + voice_arg + '&' + text_arg,
auth=(WATSON_USERNAME, WATSON_PASSWORD))
if r.status_code == 200:
file = open("out.wav", "wb")
file.write(r.content)
file.close()
sound = SoundLoader.load("out.wav")
if sound:
sound.play()
| python |
import math
import torch
import copy
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertTokenizer, BertModel
def linear_block(input_dim, hidden_dim):
linear = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(0.5))
return linear
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers):
super(MLP, self).__init__()
self.num_layers = num_layers
self.hidden_size = hidden_dim
layers = []
for i in range(num_layers-1):
layers.extend(
linear_block(hidden_dim if i> 0 else input_dim, hidden_dim)
)
layers.extend([nn.Linear(hidden_dim, input_dim)])
self.model = nn.Sequential(*layers)
## initilize the model
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5))
fan_in,_ = nn.init._calculate_fan_in_and_fan_out(m.weight)
bound = 1/math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def forward(self,x):
out = self.model(x)
return out
class SDSN(nn.Module):
"""docstring for SDSNA"""
# Replace simple dot product with SDSNA
# Scoring Lexical Entailment with a supervised directional similarity network
def __init__(self, arg):
super(SDSNA, self).__init__()
self.emb_dim = 300
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.map_linear_left = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
self.map_linear_right = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
self.final_linear = nn.Linear(2 * self.hidden_dim + self.emb_dim, 1)
def init_embs(self, w2v_weight):
self.embs = nn.Embedding.from_pretrained(w2v_weight, freeze=True)
def forward(self, inputs):
batch_size, _ = inputs.size()
left_w2v = self.embs(inputs[:,0])
right_w2v = self.embs(inputs[:,1])
left_trans = self.map_linear_left(left_w2v)
right_trans = self.map_linear_right(right_w2v)
def mlp(self, input_dim, hidden_dim, num_layers):
layers = []
for i in range(num_layers-1):
layers.extend(
linear_block(hidden_dim if i> 0 else input_dim, hidden_dim)
)
layers.extend([nn.Linear(hidden_dim, input_dim)])
return nn.Sequential(*layers)
class Word2Score(nn.Module):
"""docstring for Word2Score"""
def __init__(self, hidden_dim, num_layers):
super(Word2Score, self).__init__()
self.emb_dim = 300
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.map_linear_left = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
self.map_linear_right = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
def init_emb(self, w2v_weight):
self.embs = nn.Embedding.from_pretrained(w2v_weight, freeze=True)
def mlp(self, input_dim, hidden_dim, num_layers):
layers = []
for i in range(num_layers-1):
layers.extend(
linear_block(hidden_dim if i> 0 else input_dim, hidden_dim)
)
layers.extend([nn.Linear(hidden_dim, input_dim)])
return nn.Sequential(*layers)
def forward(self, inputs):
# inputs: [batch_size, 2]
batch_size, _ = inputs.size()
left_w2v = self.embs(inputs[:,0])
right_w2v = self.embs(inputs[:,1])
left_trans = self.map_linear_left(left_w2v)
right_trans = self.map_linear_right(right_w2v)
output = torch.einsum('ij,ij->i', [left_trans, right_trans])
left_norm = torch.norm(left_trans, dim=1).sum()
right_norm = torch.norm(right_trans, dim=1).sum()
return output, (left_norm+right_norm)
def inference(self, left_w2v, right_w2v):
left_trans = self.map_linear_left(left_w2v)
right_trans = self.map_linear_right(right_w2v)
output = torch.einsum('ij,ij->i', [left_trans, right_trans])
return output
class MEAN_Max(nn.Module):
"""docstring for MEAN"""
def __init__(self, input_dim, hidden_dim):
super(MEAN_Max, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, emb]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
oe = torch.cat((embed_input_left, embed_input_right), 2)
oe = oe.mean(2)
oe = self.output_layer(oe)
oe = oe.max(1)[0]
return oe
class MEAN(nn.Module):
"""docstring for MEAN"""
def __init__(self, input_dim, hidden_dim):
super(MEAN, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, emb]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
oe = torch.cat((embed_input_left, embed_input_right), 2)
oe = oe.mean(2)
oe = self.output_layer(oe)
oe = oe.mean(1)
return oe
class LSTM(nn.Module):
"""docstring for LSTM"""
def __init__(self, input_dim, hidden_dim):
super(LSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(p=0)
self.left_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.right_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.output_layer = nn.Sequential(
nn.Linear(hidden_dim*2, hidden_dim*2),
nn.ReLU(),
nn.Linear(hidden_dim*2, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = embed_input_left.view(-1, seqlen, self.input_dim)
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = embed_input_right.view(-1, seqlen, self.input_dim)
embed_input_right = self.dropout_layer(embed_input_right)
# hidden = (torch.zeros(1, batch_size*num_context, self.hidden_dim),
# torch.zeros(1, batch_size*num_context, self.hidden_dim))
output_left, (final_hidden_state_left, final_cell_state_left) = self.left_context_encoder(embed_input_left) #, hidden)
output_right,(final_hidden_state_right, final_cell_state_left) = self.right_context_encoder(embed_input_right) #, hidden)
encode_context_left = final_hidden_state_left.view(-1, num_context, self.hidden_dim)
encode_context_right = final_hidden_state_right.view(-1, num_context, self.hidden_dim)
# concat + mean_pooling + fully_connect
oe = torch.cat((encode_context_left, encode_context_right), 2)
oe = self.output_layer(oe)
oe = oe.mean(1)
return oe
class SelfAttention(nn.Module):
"""docstring for SelfAttention"""
def __init__(self, input_dim, hidden_dim):
super(SelfAttention, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.att_w = nn.Linear(input_dim, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
# [batch_size, context_num, seq_length, dim]
left_right_context = torch.cat((embed_input_left, embed_input_right),2)
#print(left_right_context.size())
att_weight = torch.matmul(self.att_w(left_right_context), self.att_v)
att_weight = nn.functional.softmax(att_weight, dim=2).view(batch_size, num_context, 2*seqlen, 1)
#print(att_weight.size())
oe = (left_right_context * att_weight).sum(2)
oe = self.output_layer(oe)
oe = oe.mean(1)
return oe ,att_weight
class HierAttention(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(HierAttention, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.att_w = nn.Linear(input_dim, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.att_h = nn.Linear(input_dim, hidden_dim)
self.att_hv = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
# [batch_size, context_num, seq_length, dim]
left_right_context = torch.cat((embed_input_left, embed_input_right),2)
#print(left_right_context.size())
att_weight = torch.matmul(self.att_w(left_right_context), self.att_v)
att_weight = nn.functional.softmax(att_weight, dim=2).view(batch_size, num_context, 2*seqlen, 1)
oe = (left_right_context * att_weight).sum(2)
#print(oe.size())
hier_att_weight = torch.matmul(self.att_h(oe), self.att_hv)
#print(hier_att_weight.size())
hier_att_weight = nn.functional.softmax(hier_att_weight, dim=1).view(batch_size, num_context, 1)
#print(hier_att_weight.size())
oe = (oe * hier_att_weight).sum(1)
oe = self.output_layer(oe)
return oe, att_weight, hier_att_weight
class HierAttentionEnsemble(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(HierAttention, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.att_w = nn.Linear(input_dim, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.att_h = nn.Linear(input_dim, hidden_dim)
self.att_hv = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
# [batch_size, context_num, seq_length, dim]
left_right_context = torch.cat((embed_input_left, embed_input_right),2)
#print(left_right_context.size())
att_weight = torch.matmul(self.att_w(left_right_context), self.att_v)
att_weight = nn.functional.softmax(att_weight, dim=2).view(batch_size, num_context, 2*seqlen, 1)
oe = (left_right_context * att_weight).sum(2)
#print(oe.size())
hier_att_weight = torch.matmul(self.att_h(oe), self.att_hv)
#print(hier_att_weight.size())
hier_att_weight = nn.functional.softmax(hier_att_weight, dim=1).view(batch_size, num_context, 1)
#print(hier_att_weight.size())
oe = (oe * hier_att_weight).sum(1)
oe = self.output_layer(oe)
return oe, att_weight, hier_att_weight
class ATTENTION(nn.Module):
"""docstring for ATTENTION"""
def __init__(self, input_dim, hidden_dim):
super(ATTENTION, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.left_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.right_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.att_w = nn.Linear(hidden_dim*2, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(hidden_dim*2, hidden_dim*2),
nn.ReLU(),
nn.Linear(hidden_dim*2, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim] -> [batch*context, seq, dim]
embed_input_left = embed_input_left.view(-1, seqlen, self.input_dim)
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = embed_input_right.view(-1, seqlen, self.input_dim)
embed_input_right = self.dropout_layer(embed_input_right)
# hidden = (torch.zeros(1, batch_size*num_context, self.hidden_dim),
# torch.zeros(1, batch_size*num_context, self.hidden_dim))
output_left, (final_hidden_state_left, final_cell_state_left) = self.left_context_encoder(embed_input_left) #, hidden)
output_right,(final_hidden_state_right, final_cell_state_left) = self.right_context_encoder(embed_input_right) #, hidden)
encode_context_left = final_hidden_state_left.view(-1, num_context, self.hidden_dim)
encode_context_right = final_hidden_state_right.view(-1, num_context, self.hidden_dim)
# concat + mean_pooling + fully_connect
oe = torch.cat((encode_context_left, encode_context_right), 2)
print(oe.size())
att_weight = torch.matmul(self.att_w(oe), self.att_v)
print(att_weight.size())
att_weight = nn.functional.softmax(att_weight, dim=1).view(batch_size, num_context, 1)
print(att_weight.size())
oe = (oe * att_weight).sum(1)
print("--------")
oe = self.output_layer(oe)
return oe
class BertEncoder(nn.Module):
def __init__(self, bert_dir, model_type="base"):
super(BertEncoder, self).__init__()
self.model_type = model_type
self.model = BertModel.from_pretrained(bert_dir)
self.set_finetune("full")
def set_finetune(self, finetune_type):
if finetune_type == "none":
for param in self.model.parameters():
param.requires_grad = False
elif finetune_type == "full":
for param in self.model.parameters():
param.requires_grad = True
elif finetune_type == "last":
for param in self.model.parameters():
param.require_grad = False
for param in self.encoder.layer[-1].parameters():
param.require_grad = True
def forward(self, input_ids, mask=None):
# [batch_size, context_num, seq_length]
batch_size, context_num, seq_length = input_ids.size()
flat_input_ids = input_ids.reshape(-1, input_ids.size(-1))
flat_mask = mask.reshape(-1, mask.size(-1))
pooled_cls = self.model(input_ids = flat_input_ids, attention_mask=flat_mask)[1]
# [batch_size * context_num, dim]
#print(pooled_cls.size())
reshaped_pooled_cls = pooled_cls.view(batch_size, context_num, -1)
# [batch_size, context_num, dim]
output = reshaped_pooled_cls.mean(1)
# [batch_size, dim]
return output
def get_output_dim(self):
if self.model_type == "large":
return 1024
else:
return 768
class Bert2Score(nn.Module):
def __init__(self, encoder, bert_dir, hidden_dim, drop_prob):
super(Bert2Score, self).__init__()
self.hidden_dim = hidden_dim
if "large" in encoder:
self.encoder = BertEncoder(bert_dir, "large")
else:
self.encoder = BertEncoder(bert_dir)
bert_dim = self.encoder.get_output_dim()
self.mlp1 = nn.Linear(bert_dim, hidden_dim)
self.mlp2 = nn.Linear(bert_dim, hidden_dim)
self.dropout = nn.Dropout(drop_prob)
def forward(self, input_ids, masks):
## input: [batch_size, 2, context, seq]
left_ids = input_ids[:,0,:,:]
right_ids = input_ids[:,1,:,:]
left_masks = masks[:,0,:,:]
right_masks = masks[:,1,:,:]
left_emb = self.encoder(left_ids, left_masks)
right_emb = self.encoder(right_ids, right_masks)
# [batch_size, hidden_dim]
tran_left = self.mlp1(self.dropout(left_emb))
tran_right = self.mlp2(self.dropout(right_emb))
output = torch.einsum('ij,ij->i', [tran_left, tran_right])
return output
class Context2Score(nn.Module):
"""docstring for Context2Score"""
def __init__(self, encoder, input_dim, hidden_dim, device, multiple=False):
super(Context2Score, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.device = device
self.attention = False
self.hier = False
#self.name = encoder
if 'lstm' in encoder:
if multiple:
self.encoder1 = nn.DataParallel(LSTM(input_dim, hidden_dim), device_ids=[0,1,2,3])
self.encoder2 = nn.DataParallel(LSTM(input_dim, hidden_dim), device_ids=[0,1,2,3])
else:
self.encoder1 = LSTM(input_dim, hidden_dim).to(device)
self.encoder2 = LSTM(input_dim, hidden_dim).to(device)
elif 'attention' in encoder:
if multiple:
self.encoder1 = ATTENTION(input_dim, hidden_dim)
self.encoder2 = ATTENTION(input_dim, hidden_dim)
else:
self.encoder1 = ATTENTION(input_dim, hidden_dim).to(device)
self.encoder2 = ATTENTION(input_dim, hidden_dim).to(device)
elif 'max' in encoder:
self.encoder1 = MEAN_Max(input_dim, hidden_dim).to(device)
self.encoder2 = MEAN_Max(input_dim, hidden_dim).to(device)
elif 'self' in encoder:
#self.encoder1, self.atten1 = SelfAttention(input_dim, hidden_dim).to(device)
self.encoder1 = SelfAttention(input_dim, hidden_dim).to(device)
self.encoder2 = SelfAttention(input_dim, hidden_dim).to(device)
self.attention = True
elif 'han' in encoder:
self.encoder1 = HierAttention(input_dim, hidden_dim).to(device)
self.encoder2 = HierAttention(input_dim, hidden_dim).to(device)
self.hier = True
else:
if multiple:
self.encoder1 = MEAN(input_dim, hidden_dim)
self.encoder2 = MEAN(input_dim, hidden_dim)
else:
self.encoder1 = MEAN(input_dim, hidden_dim).to(device)
self.encoder2 = MEAN(input_dim, hidden_dim).to(device)
def init_emb(self, w2v_weight):
self.word_embedding = nn.Embedding.from_pretrained(w2v_weight, freeze=True)
def forward(self, input_idx):
# input: [batch, 2, context, 2, seq]
embed_input1_left = self.word_embedding(input_idx[:, 0, :, 0]).to(self.device)
embed_input1_right = self.word_embedding(input_idx[:, 0, :, 1]).to(self.device)
embed_input2_left = self.word_embedding(input_idx[:, 1, :, 0]).to(self.device)
embed_input2_right = self.word_embedding(input_idx[:, 1, :, 1]).to(self.device)
if self.attention:
embed_hypo, atten1 = self.encoder1(embed_input1_left, embed_input1_right)
embed_hype, atten2 = self.encoder2(embed_input2_left, embed_input2_right)
output = torch.einsum('ij,ij->i', [embed_hypo, embed_hype])
return output, atten1, atten2
elif self.hier:
embed_hypo, atten1, hier_atten1 = self.encoder1(embed_input1_left, embed_input1_right)
embed_hype, atten2, hier_atten2 = self.encoder2(embed_input2_left, embed_input2_right)
output = torch.einsum('ij,ij->i', [embed_hypo, embed_hype])
atten_w = (atten1, hier_atten1, atten2, hier_atten2)
return output, atten_w
else:
embed_hypo = self.encoder1(embed_input1_left, embed_input1_right)
embed_hype = self.encoder2(embed_input2_left,embed_input2_right)
output = torch.einsum('ij,ij->i', [embed_hypo, embed_hype])
return output
| python |
# Imitando o comportamento de números numa classe
class Coordenada():
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "<Coordenada x:{0},y:{1}>".format(self.x, self.y)
# TODO: Implemente adição
def __add__(self, other):
pass
# TODO: Implemente subtração
def __sub__(self, other):
pass
# TODO: Implemente adição in-place
def __iadd__(self, other):
pass
def main():
# Declare some Coordenadas
c1 = Coordenada(10, 20)
c2 = Coordenada(30, 30)
print(c1, c2)
# TODO: Adicionar duas Coordenadas
# TODO: Subtrair duas Coordenadas
# TODO: Executar uma adição in-place
if __name__ == "__main__":
main()
| python |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019-2021 Tomasz Łuczak, TeaM-TL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Converters
- convert_preview_crop_gravity - convert corrdinates from crop3
- convert_border - add border to picture
- convert_text - add text
- convert_crop - crop picture
- convert_resize - resize picture
- convert_contrast - modify contrast
- convert_normalize - normalize levels
- convert_rotate - rotate picture
- convert_mirror - mirroring picture
- convert_pip - picture in picture, for inserting logo
- gravity - translate eg. NS to Northsouth as Tk expect
- gravity_outside - translate gravitation for adding text outside
"""
def convert_preview_crop_gravity(coordinates, x_max, y_max):
"""
convert corrdinates from crop3:
offset_x, offset_y, width, height, gravitation
original image size:
x_max, y_max
return coordinates for drawing crop: x0, y0, x1, y1
"""
offset_x = coordinates[0]
offset_y = coordinates[1]
width = coordinates[2]
height = coordinates[3]
gravitation = coordinates[4]
if gravitation == "NW":
x0 = offset_x
y0 = offset_y
x1 = x0 + width
y1 = y0 + height
elif gravitation == "N":
x0 = x_max/2 - width/2
y0 = offset_y
x1 = x_max/2 + width/2
y1 = y0 + height
elif gravitation == "NE":
x0 = x_max - width - offset_x
y0 = offset_y
x1 = x_max - offset_x
y1 = y0 + height
elif gravitation == "W":
x0 = offset_x
y0 = y_max/2 - height/2
x1 = x0 + width
y1 = y_max/2 + height/2
elif gravitation == "C":
x0 = x_max/2 - width/2
y0 = y_max/2 - height/2
x1 = x_max/2 + width/2
y1 = y_max/2 + height/2
elif gravitation == "E":
x0 = x_max - width - offset_x
y0 = y_max/2 - height/2
x1 = x_max - offset_x
y1 = y_max/2 + height/2
elif gravitation == "SW":
x0 = offset_x
y0 = y_max - height - offset_y
x1 = x0 + width
y1 = y_max - offset_y
elif gravitation == "S":
x0 = x_max/2 - width/2
y0 = y_max - height - offset_y
x1 = x_max/2 + width/2
y1 = y_max - offset_y
elif gravitation == "SE":
x0 = x_max - width - offset_x
y0 = y_max - height - offset_y
x1 = x_max - offset_x
y1 = y_max - offset_y
else:
x0 = 5
y0 = 5
x1 = x_max - 5
y1 = y_max -5
return (x0, y0, x1, y1)
def convert_border(width, color, border_on):
""" 1. Add border """
if border_on > 0:
command = " -bordercolor \"" + color + "\"" + \
" -border " + str(abs(int(width))) + " "
else:
command = ""
return command + " "
def convert_text(entries):
""" 2. Insert text into picture """
if entries['text_on'] == 1:
size = ' -pointsize ' + entries['font_size']
font = ' -font "' + entries['font'] + '"'
color = ' -fill "' + entries['text_color'] + '"'
if entries['text_inout'] == 0:
# inside
outside = ""
if entries['gravitation_onoff'] == 0:
gravitation = " "
else:
gravitation = " -gravity " + gravity(entries['gravitation'])
text = " -draw \"text " + entries['dx'] + "," + entries['dy'] \
+ " '" + entries['text'] + "'\" "
if entries['box'] == 0:
box = ""
else:
box = " -box \"" + entries['box_color'] + "\""
else:
# outside
gravitation = " -gravity " + gravity(entries['gravitation'])
text = " label:\"" + entries['text'] + "\" "
# position
if entries['gravitation'] == "NW" or entries['gravitation'] == "N" or entries['gravitation'] == "NE":
# top
outside = "+swap -append "
else:
# bottom
outside = "-append "
# background
if entries['box'] == 0:
box = ""
else:
box = " -background \"" + entries['box_color'] + "\""
command = box + color + size + gravitation + font + text + outside
else:
command = ""
return command + " "
def convert_crop(crop, gravitation, entries):
""" 3. Crop """
if crop == 1:
width = str(abs(int(entries['one_x2']) - int(entries['one_x1'])))
height = str(abs(int(entries['one_y2']) - int(entries['one_y1'])))
command = " -crop " + width + "x" + height \
+ "+" + entries['one_x1'] + "+" + entries['one_y1']
if crop == 2:
command = " -crop " \
+ entries['two_width'] + "x" + entries['two_height'] \
+ "+" + entries['two_x1'] + "+" + entries['two_y1']
if crop == 3:
command = " -gravity " + gravity(gravitation) + " -crop " \
+ entries['three_width'] + "x" + entries['three_height'] \
+ "+" + entries['three_dx'] + "+" + entries['three_dy']
return command + " "
def convert_resize(resize, pixel, percent, border):
""" 4. Resize """
# słownik wyjściowy
dict_return = {}
border = 2 * abs(int(border))
if resize == 0:
command = ""
sub_dir = ""
if resize == 1:
command = "-resize " + pixel + "x" + pixel + " "
sub_dir = pixel
elif resize == 2:
command = "-resize " + percent + "% "
sub_dir = percent
elif resize == 3:
command = "-resize " + str(1920 - border) + "x" + str(1080 - border) + " "
sub_dir = "1920x1080"
elif resize == 4:
command = "-resize " + str(2048 - border) + "x" + str(1556 - border) + " "
sub_dir = "2048x1556"
elif resize == 5:
command = "-resize " + str(4096 - border) + "x" + str(3112 - border) + " "
sub_dir = "4096x3112"
dict_return['command'] = command
dict_return['sub_dir'] = sub_dir
return dict_return
def convert_bw(black_white, sepia):
""" 5. black-white or sepia """
if black_white == 1:
command = "-colorspace Gray"
elif black_white == 2:
command = "-sepia-tone " + str(int(sepia)) + "%"
else:
command = ""
return command + " "
def convert_contrast(contrast, contrast_selected, entry1, entry2):
""" 6. Contrast """
command = ""
if contrast == 1:
command = "-contrast-stretch " + entry1 + "x" + entry2 + "%"
elif contrast == 2:
if contrast_selected == "+3":
command = "+contrast +contrast +contrast"
elif contrast_selected == "+2":
command = "+contrast +contrast"
elif contrast_selected == "+1":
command = "+contrast"
elif contrast_selected == "0":
command = ""
elif contrast_selected == "-1":
command = "-contrast"
elif contrast_selected == "-2":
command = "-contrast -contrast"
elif contrast_selected == "-3":
command = "-contrast -contrast -contrast"
else:
command = ""
elif contrast == 3:
command = "-normalize"
else:
command = ""
return command + " "
def convert_normalize(normalize, channel):
""" 7. Normalize """
if normalize == 1:
if channel != "None":
command = "-channel " + channel + " -equalize"
else:
command = "-equalize"
elif normalize == 2:
command = "-auto-level"
else:
command = ""
return command + " "
def convert_rotate(rotate):
""" 8. Rotate 90,180, 270 degree """
if rotate > 0:
command = "-rotate " + str(rotate)
else:
command = ""
return command + " "
def convert_mirror(flip, flop):
""" 10. Mirror: flip or flop """
if flip:
command_flip = "-flip "
else:
command_flip = ""
if flop:
command_flop = "-flop "
else:
command_flop = ""
return command_flip + command_flop + " "
def convert_pip(gravitation, width, height, offset_dx, offset_dy):
""" 9. Picture In Picture, eg. to add logo on image """
command = "-gravity " + gravity(gravitation) \
+ " -geometry " + width + "x" + height \
+ "+" + offset_dx + "+" + offset_dy
return command + " "
def gravity(gravitation):
""" translate gravitation name according to Tk specification"""
if gravitation == "N":
result = "North"
if gravitation == "NW":
result = "Northwest"
if gravitation == "NE":
result = "Northeast"
if gravitation == "W":
result = "West"
if gravitation == "C":
result = "Center"
if gravitation == "E":
result = "East"
if gravitation == "SW":
result = "Southwest"
if gravitation == "S":
result = "South"
if gravitation == "SE":
result = "Southeast"
if gravitation == "0":
result = "0"
return result
# EOF
| python |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import numpy as np
import os
import h5py
import subprocess
import shlex
import json
import glob
from .. ops import transform_functions, se3
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import minkowski
import transforms3d.quaternions as t3d
import h5py
def download_modelnet40():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, os.pardir, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
www += ' --no-check-certificate'
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def load_data(train, use_normals):
if train: partition = 'train'
else: partition = 'test'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, os.pardir, 'data')
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'ply_data_%s*.h5' % partition)):
f = h5py.File(h5_name)
if use_normals: data = np.concatenate([f['data'][:], f['normal'][:]], axis=-1).astype('float32')
else: data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.05):
# N, C = pointcloud.shape
sigma = 0.04*np.random.random_sample()
pointcloud += torch.empty(pointcloud.shape).normal_(mean=0, std=sigma).clamp(-clip, clip)
# pointcloud += np.clip(sigma * np.random.randn(N, C), -1 * clip, clip)
return pointcloud
# Create Partial Point Cloud. [Code referred from PRNet paper.]
def farthest_subsample_points(pointcloud1, num_subsampled_points=768):
pointcloud1 = pointcloud1
num_points = pointcloud1.shape[0]
nbrs1 = NearestNeighbors(n_neighbors=num_subsampled_points, algorithm='auto',
metric=lambda x, y: minkowski(x, y)).fit(pointcloud1[:, :3])
random_p1 = np.random.random(size=(1, 3)) + np.array([[500, 500, 500]]) * np.random.choice([1, -1, 1, -1])
idx1 = nbrs1.kneighbors(random_p1, return_distance=False).reshape((num_subsampled_points,))
gt_mask = torch.zeros(num_points).scatter_(0, torch.tensor(idx1), 1)
return pointcloud1[idx1, :], gt_mask
def add_outliers(pointcloud, gt_mask):
# pointcloud: Point Cloud (ndarray) [NxC]
# output: Corrupted Point Cloud (ndarray) [(N+300)xC]
N, C = pointcloud.shape
outliers = 2*torch.rand(100, C)-1 # Sample points in a cube [-0.5, 0.5]
pointcloud = torch.cat([pointcloud, outliers], dim=0)
gt_mask = torch.cat([gt_mask, torch.zeros(100)])
idx = torch.randperm(pointcloud.shape[0])
pointcloud, gt_mask = pointcloud[idx], gt_mask[idx]
return pointcloud, gt_mask
class UnknownDataTypeError(Exception):
def __init__(self, *args):
if args: self.message = args[0]
else: self.message = 'Datatype not understood for dataset.'
def __str__(self):
return self.message
class ModelNet40Data(Dataset):
def __init__(
self,
train=True,
num_points=1024,
download=True,
randomize_data=False,
unseen=False,
use_normals=False
):
super(ModelNet40Data, self).__init__()
if download: download_modelnet40()
self.data, self.labels = load_data(train, use_normals)
if not train: self.shapes = self.read_classes_ModelNet40()
self.num_points = num_points
self.randomize_data = randomize_data
self.unseen = unseen
if self.unseen:
self.labels = self.labels.reshape(-1) # [N, 1] -> [N,] (Required to segregate data according to categories)
if not train:
self.data = self.data[self.labels>=20]
self.labels = self.labels[self.labels>=20]
if train:
self.data = self.data[self.labels<20]
self.labels = self.labels[self.labels<20]
print("Successfully loaded first 20 categories for training and last 20 for testing!")
self.labels = self.labels.reshape(-1, 1) # [N,] -> [N, 1]
def __getitem__(self, idx):
if self.randomize_data: current_points = self.randomize(idx)
else: current_points = self.data[idx].copy()
current_points = torch.from_numpy(current_points[:self.num_points, :]).float()
label = torch.from_numpy(self.labels[idx]).type(torch.LongTensor)
return current_points, label
def __len__(self):
return self.data.shape[0]
def randomize(self, idx):
pt_idxs = np.arange(0, self.num_points)
np.random.shuffle(pt_idxs)
return self.data[idx, pt_idxs].copy()
def get_shape(self, label):
return self.shapes[label]
def read_classes_ModelNet40(self):
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, os.pardir, 'data')
file = open(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'shape_names.txt'), 'r')
shape_names = file.read()
shape_names = np.array(shape_names.split('\n')[:-1])
return shape_names
class ClassificationData(Dataset):
def __init__(self, data_class=ModelNet40Data()):
super(ClassificationData, self).__init__()
self.set_class(data_class)
def __len__(self):
return len(self.data_class)
def set_class(self, data_class):
self.data_class = data_class
def get_shape(self, label):
try:
return self.data_class.get_shape(label)
except:
return -1
def __getitem__(self, index):
return self.data_class[index]
class RegistrationData(Dataset):
def __init__(self, data_class=ModelNet40Data(), partial_source=False, noise=False, outliers=False):
super(RegistrationData, self).__init__()
self.set_class(data_class)
self.partial_source = partial_source
self.noise = noise
self.outliers = outliers
from .. ops.transform_functions import PNLKTransform
self.transforms = PNLKTransform(0.8, True)
def __len__(self):
return len(self.data_class)
def set_class(self, data_class):
self.data_class = data_class
def __getitem__(self, index):
template, label = self.data_class[index]
gt_mask = torch.ones(template.shape[0]) # by default all ones.
source = self.transforms(template)
if self.partial_source: source, gt_mask = farthest_subsample_points(source)
if self.noise: source = jitter_pointcloud(source) # Add noise in source point cloud.
if self.outliers: template, gt_mask = add_outliers(template, gt_mask)
igt = self.transforms.igt
return template, source, igt, gt_mask
class SegmentationData(Dataset):
def __init__(self):
super(SegmentationData, self).__init__()
def __len__(self):
pass
def __getitem__(self, index):
pass
class FlowData(Dataset):
def __init__(self):
super(FlowData, self).__init__()
self.pc1, self.pc2, self.flow = self.read_data()
def __len__(self):
if isinstance(self.pc1, np.ndarray):
return self.pc1.shape[0]
elif isinstance(self.pc1, list):
return len(self.pc1)
else:
raise UnknownDataTypeError
def read_data(self):
pass
def __getitem__(self, index):
return self.pc1[index], self.pc2[index], self.flow[index]
class SceneflowDataset(Dataset):
def __init__(self, npoints=1024, root='', partition='train'):
if root == '':
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, os.pardir, 'data')
root = os.path.join(DATA_DIR, 'data_processed_maxcut_35_20k_2k_8192')
if not os.path.exists(root):
print("To download dataset, click here: https://drive.google.com/file/d/1CMaxdt-Tg1Wct8v8eGNwuT7qRSIyJPY-/view")
exit()
else:
print("SceneflowDataset Found Successfully!")
self.npoints = npoints
self.partition = partition
self.root = root
if self.partition=='train':
self.datapath = glob.glob(os.path.join(self.root, 'TRAIN*.npz'))
else:
self.datapath = glob.glob(os.path.join(self.root, 'TEST*.npz'))
self.cache = {}
self.cache_size = 30000
###### deal with one bad datapoint with nan value
self.datapath = [d for d in self.datapath if 'TRAIN_C_0140_left_0006-0' not in d]
######
print(self.partition, ': ',len(self.datapath))
def __getitem__(self, index):
if index in self.cache:
pos1, pos2, color1, color2, flow, mask1 = self.cache[index]
else:
fn = self.datapath[index]
with open(fn, 'rb') as fp:
data = np.load(fp)
pos1 = data['points1'].astype('float32')
pos2 = data['points2'].astype('float32')
color1 = data['color1'].astype('float32')
color2 = data['color2'].astype('float32')
flow = data['flow'].astype('float32')
mask1 = data['valid_mask1']
if len(self.cache) < self.cache_size:
self.cache[index] = (pos1, pos2, color1, color2, flow, mask1)
if self.partition == 'train':
n1 = pos1.shape[0]
sample_idx1 = np.random.choice(n1, self.npoints, replace=False)
n2 = pos2.shape[0]
sample_idx2 = np.random.choice(n2, self.npoints, replace=False)
pos1 = pos1[sample_idx1, :]
pos2 = pos2[sample_idx2, :]
color1 = color1[sample_idx1, :]
color2 = color2[sample_idx2, :]
flow = flow[sample_idx1, :]
mask1 = mask1[sample_idx1]
else:
pos1 = pos1[:self.npoints, :]
pos2 = pos2[:self.npoints, :]
color1 = color1[:self.npoints, :]
color2 = color2[:self.npoints, :]
flow = flow[:self.npoints, :]
mask1 = mask1[:self.npoints]
pos1_center = np.mean(pos1, 0)
pos1 -= pos1_center
pos2 -= pos1_center
return pos1, pos2, color1, color2, flow, mask1
def __len__(self):
return len(self.datapath)
class AnyData:
def __init__(self, pc, mask=False, repeat=1000):
# pc: Give any point cloud [N, 3] (ndarray)
# mask: False means full source and True mean partial source.
self.template = torch.tensor(pc, dtype=torch.float32).unsqueeze(0)
self.template = self.template.repeat(repeat, 1, 1)
from .. ops.transform_functions import PNLKTransform
self.transforms = PNLKTransform(mag=0.5, mag_randomly=True)
self.mask = mask
def __len__(self):
return self.template.shape[0]
def __getitem__(self, index):
template = self.template[index]
source = self.transforms(template)
if self.mask:
source, gt_mask = farthest_subsample_points(source, num_subsampled_points=int(template.shape[0]*0.7))
igt = self.transforms.igt
if self.mask:
return template, source, igt, gt_mask
else:
return template, source, igt
class UserData:
def __init__(self, template, source, mask=None, igt=None):
self.template = template
self.source = source
self.mask = mask
self.igt = igt
self.check_dataset()
def check_dataset(self):
if len(self.template)>2:
assert self.template.shape[0] == self.source.shape[0], "Number of templates are not equal to number of sources."
if self.mask is None: self.mask = np.zeros((self.template.shape[0], self.template.shape[1], 1))
if self.igt is None: self.igt = np.eye(4).reshape(1, 4, 4).repeat(self.template.shape[0], 0)
else:
self.template = self.template.reshape(1, -1, 3)
self.source = self.source.reshape(1, -1, 3)
if self.mask is None: self.mask = np.zeros((1, self.template.shape[0], 1))
if self.igt is None: self.igt = np.eye(4).reshape(1, 4, 4)
assert self.template.shape[-1] == 3, "Template point cloud array should have 3 co-ordinates."
assert self.source.shape[-1] == 3, "Source point cloud array should have 3 co-ordinates."
def __len__(self):
if len(self.template.shape) == 2: return 1
elif len(self.template.shape) == 3: return self.template.shape[0]
else: print("Error in the data given by user!")
@staticmethod
def pc2torch(data):
return torch.tensor(data).float()
def __getitem__(self, index):
template = self.pc2torch(self.template[index])
source = self.pc2torch(self.source[index])
mask = self.pc2torch(self.mask[index])
igt = self.pc2torch(self.igt[index])
return template, source, mask, igt
if __name__ == '__main__':
class Data():
def __init__(self):
super(Data, self).__init__()
self.data, self.label = self.read_data()
def read_data(self):
return [4,5,6], [4,5,6]
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx], self.label[idx]
cd = RegistrationData('abc')
import ipdb; ipdb.set_trace() | python |
import os
from unittest import TestCase
from healthtools.scrapers.base_scraper import Scraper
from healthtools.scrapers.doctors import DoctorsScraper
from healthtools.scrapers.foreign_doctors import ForeignDoctorsScraper
from healthtools.scrapers.health_facilities import HealthFacilitiesScraper
from healthtools.scrapers.nhif_inpatient import NhifInpatientScraper
from healthtools.scrapers.nhif_outpatient import NhifOutpatientScraper
from healthtools.scrapers.nhif_outpatient_cs import NhifOutpatientCsScraper
class BaseTest(TestCase):
"""
Base class for scraper unittests
"""
def setUp(self):
# get test data directory
self.TEST_DIR = os.path.dirname(os.path.abspath(__file__)) + "/"
# set up test scrapers
self.base_scraper = Scraper()
self.doctors_scraper = DoctorsScraper()
self.foreign_doctors_scraper = ForeignDoctorsScraper()
self.health_facilities_scraper = HealthFacilitiesScraper()
self.nhif_inpatient_scraper = NhifInpatientScraper()
self.nhif_outpatient_scraper = NhifOutpatientScraper()
self.nhif_outpatient_cs_scraper = NhifOutpatientCsScraper()
# set up test indices
index = "healthtools-test"
self.doctors_scraper.es_index = index
self.foreign_doctors_scraper.es_index = index
self.health_facilities_scraper.es_index = index
self.nhif_inpatient_scraper.es_index = index
self.nhif_outpatient_scraper.es_index = index
self.nhif_outpatient_cs_scraper.es_index = index
# set up tests data keys and archive keys
self.doctors_scraper.data_key = "test/" + self.doctors_scraper.data_key
self.doctors_scraper.data_archive_key = "test/" + self.doctors_scraper.data_archive_key
self.foreign_doctors_scraper.data_key = "test/" + self.foreign_doctors_scraper.data_key
self.foreign_doctors_scraper.data_archive_key = "test/" + self.foreign_doctors_scraper.data_archive_key
self.health_facilities_scraper.data_key = "test/" + self.health_facilities_scraper.data_key
self.health_facilities_scraper.data_archive_key = "test/" + self.health_facilities_scraper.data_archive_key
self.nhif_inpatient_scraper.data_key = "test/" + self.nhif_inpatient_scraper.data_key
self.nhif_inpatient_scraper.data_archive_key = "test/" + self.nhif_inpatient_scraper.data_archive_key
self.nhif_outpatient_scraper.data_key = "test/" + self.nhif_outpatient_scraper.data_key
self.nhif_outpatient_scraper.data_archive_key = "test/" + self.nhif_outpatient_scraper.data_archive_key
self.nhif_outpatient_cs_scraper.data_key = "test/" + self.nhif_outpatient_cs_scraper.data_key
self.nhif_outpatient_cs_scraper.data_archive_key = "test/" + self.nhif_outpatient_cs_scraper.data_archive_key
| python |
"""
.. _model-rgcn:
Relational graph convolutional network
================================================
**Author:** Lingfan Yu, Mufei Li, Zheng Zhang
In this tutorial, you learn how to implement a relational graph convolutional
network (R-GCN). This type of network is one effort to generalize GCN
to handle different relationships between entities in a knowledge base. To
learn more about the research behind R-GCN, see `Modeling Relational Data with Graph Convolutional
Networks <https://arxiv.org/pdf/1703.06103.pdf>`_
The straightforward graph convolutional network (GCN) and
`DGL tutorial <http://doc.dgl.ai/tutorials/index.html>`_) exploits
structural information of a dataset (that is, the graph connectivity) in order to
improve the extraction of node representations. Graph edges are left as
untyped.
A knowledge graph is made up of a collection of triples in the form
subject, relation, object. Edges thus encode important information and
have their own embeddings to be learned. Furthermore, there may exist
multiple edges among any given pair.
"""
###############################################################################
# A brief introduction to R-GCN
# ---------------------------
# In *statistical relational learning* (SRL), there are two fundamental
# tasks:
#
# - **Entity classification** - Where you assign types and categorical
# properties to entities.
# - **Link prediction** - Where you recover missing triples.
#
# In both cases, missing information is expected to be recovered from the
# neighborhood structure of the graph. For example, the R-GCN
# paper cited earlier provides the following example. Knowing that Mikhail Baryshnikov was educated at the Vaganova Academy
# implies both that Mikhail Baryshnikov should have the label person, and
# that the triple (Mikhail Baryshnikov, lived in, Russia) must belong to the
# knowledge graph.
#
# R-GCN solves these two problems using a common graph convolutional network. It's
# extended with multi-edge encoding to compute embedding of the entities, but
# with different downstream processing.
#
# - Entity classification is done by attaching a softmax classifier at the
# final embedding of an entity (node). Training is through loss of standard
# cross-entropy.
# - Link prediction is done by reconstructing an edge with an autoencoder
# architecture, using a parameterized score function. Training uses negative
# sampling.
#
# This tutorial focuses on the first task, entity classification, to show how to generate entity
# representation. `Complete
# code <https://github.com/dmlc/dgl/tree/rgcn/examples/pytorch/rgcn>`_
# for both tasks is found in the DGL Github repository.
#
# Key ideas of R-GCN
# -------------------
# Recall that in GCN, the hidden representation for each node :math:`i` at
# :math:`(l+1)^{th}` layer is computed by:
#
# .. math:: h_i^{l+1} = \sigma\left(\sum_{j\in N_i}\frac{1}{c_i} W^{(l)} h_j^{(l)}\right)~~~~~~~~~~(1)\\
#
# where :math:`c_i` is a normalization constant.
#
# The key difference between R-GCN and GCN is that in R-GCN, edges can
# represent different relations. In GCN, weight :math:`W^{(l)}` in equation
# :math:`(1)` is shared by all edges in layer :math:`l`. In contrast, in
# R-GCN, different edge types use different weights and only edges of the
# same relation type :math:`r` are associated with the same projection weight
# :math:`W_r^{(l)}`.
#
# So the hidden representation of entities in :math:`(l+1)^{th}` layer in
# R-GCN can be formulated as the following equation:
#
# .. math:: h_i^{l+1} = \sigma\left(W_0^{(l)}h_i^{(l)}+\sum_{r\in R}\sum_{j\in N_i^r}\frac{1}{c_{i,r}}W_r^{(l)}h_j^{(l)}\right)~~~~~~~~~~(2)\\
#
# where :math:`N_i^r` denotes the set of neighbor indices of node :math:`i`
# under relation :math:`r\in R` and :math:`c_{i,r}` is a normalization
# constant. In entity classification, the R-GCN paper uses
# :math:`c_{i,r}=|N_i^r|`.
#
# The problem of applying the above equation directly is the rapid growth of
# the number of parameters, especially with highly multi-relational data. In
# order to reduce model parameter size and prevent overfitting, the original
# paper proposes to use basis decomposition.
#
# .. math:: W_r^{(l)}=\sum\limits_{b=1}^B a_{rb}^{(l)}V_b^{(l)}~~~~~~~~~~(3)\\
#
# Therefore, the weight :math:`W_r^{(l)}` is a linear combination of basis
# transformation :math:`V_b^{(l)}` with coefficients :math:`a_{rb}^{(l)}`.
# The number of bases :math:`B` is much smaller than the number of relations
# in the knowledge base.
#
# .. note::
# Another weight regularization, block-decomposition, is implemented in
# the `link prediction <link-prediction_>`_.
#
# Implement R-GCN in DGL
# ----------------------
#
# An R-GCN model is composed of several R-GCN layers. The first R-GCN layer
# also serves as input layer and takes in features (for example, description texts)
# that are associated with node entity and project to hidden space. In this tutorial,
# we only use the entity ID as an entity feature.
#
# R-GCN layers
# ~~~~~~~~~~~~
#
# For each node, an R-GCN layer performs the following steps:
#
# - Compute outgoing message using node representation and weight matrix
# associated with the edge type (message function)
# - Aggregate incoming messages and generate new node representations (reduce
# and apply function)
#
# The following code is the definition of an R-GCN hidden layer.
#
# .. note::
# Each relation type is associated with a different weight. Therefore,
# the full weight matrix has three dimensions: relation, input_feature,
# output_feature.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl import DGLGraph
import dgl.function as fn
from functools import partial
class RGCNLayer(nn.Module):
def __init__(self, in_feat, out_feat, num_rels, num_bases=-1, bias=None,
activation=None, is_input_layer=False):
super(RGCNLayer, self).__init__()
self.in_feat = in_feat
self.out_feat = out_feat
self.num_rels = num_rels
self.num_bases = num_bases
self.bias = bias
self.activation = activation
self.is_input_layer = is_input_layer
# sanity check
if self.num_bases <= 0 or self.num_bases > self.num_rels:
self.num_bases = self.num_rels
# weight bases in equation (3)
self.weight = nn.Parameter(torch.Tensor(self.num_bases, self.in_feat,
self.out_feat))
if self.num_bases < self.num_rels:
# linear combination coefficients in equation (3)
self.w_comp = nn.Parameter(torch.Tensor(self.num_rels, self.num_bases))
# add bias
if self.bias:
self.bias = nn.Parameter(torch.Tensor(out_feat))
# init trainable parameters
nn.init.xavier_uniform_(self.weight,
gain=nn.init.calculate_gain('relu'))
if self.num_bases < self.num_rels:
nn.init.xavier_uniform_(self.w_comp,
gain=nn.init.calculate_gain('relu'))
if self.bias:
nn.init.xavier_uniform_(self.bias,
gain=nn.init.calculate_gain('relu'))
def forward(self, g):
if self.num_bases < self.num_rels:
# generate all weights from bases (equation (3))
weight = self.weight.view(self.in_feat, self.num_bases, self.out_feat)
weight = torch.matmul(self.w_comp, weight).view(self.num_rels,
self.in_feat, self.out_feat)
else:
weight = self.weight
if self.is_input_layer:
def message_func(edges):
# for input layer, matrix multiply can be converted to be
# an embedding lookup using source node id
embed = weight.view(-1, self.out_feat)
index = edges.data['rel_type'] * self.in_feat + edges.src['id']
return {'msg': embed[index] * edges.data['norm']}
else:
def message_func(edges):
w = weight[edges.data['rel_type']]
msg = torch.bmm(edges.src['h'].unsqueeze(1), w).squeeze()
msg = msg * edges.data['norm']
return {'msg': msg}
def apply_func(nodes):
h = nodes.data['h']
if self.bias:
h = h + self.bias
if self.activation:
h = self.activation(h)
return {'h': h}
g.update_all(message_func, fn.sum(msg='msg', out='h'), apply_func)
###############################################################################
# Full R-GCN model defined
# ~~~~~~~~~~~~~~~~~~~~~~~
class Model(nn.Module):
def __init__(self, num_nodes, h_dim, out_dim, num_rels,
num_bases=-1, num_hidden_layers=1):
super(Model, self).__init__()
self.num_nodes = num_nodes
self.h_dim = h_dim
self.out_dim = out_dim
self.num_rels = num_rels
self.num_bases = num_bases
self.num_hidden_layers = num_hidden_layers
# create rgcn layers
self.build_model()
# create initial features
self.features = self.create_features()
def build_model(self):
self.layers = nn.ModuleList()
# input to hidden
i2h = self.build_input_layer()
self.layers.append(i2h)
# hidden to hidden
for _ in range(self.num_hidden_layers):
h2h = self.build_hidden_layer()
self.layers.append(h2h)
# hidden to output
h2o = self.build_output_layer()
self.layers.append(h2o)
# initialize feature for each node
def create_features(self):
features = torch.arange(self.num_nodes)
return features
def build_input_layer(self):
return RGCNLayer(self.num_nodes, self.h_dim, self.num_rels, self.num_bases,
activation=F.relu, is_input_layer=True)
def build_hidden_layer(self):
return RGCNLayer(self.h_dim, self.h_dim, self.num_rels, self.num_bases,
activation=F.relu)
def build_output_layer(self):
return RGCNLayer(self.h_dim, self.out_dim, self.num_rels, self.num_bases,
activation=partial(F.softmax, dim=1))
def forward(self, g):
if self.features is not None:
g.ndata['id'] = self.features
for layer in self.layers:
layer(g)
return g.ndata.pop('h')
###############################################################################
# Handle dataset
# ~~~~~~~~~~~~~~~~
# This tutorial uses Institute for Applied Informatics and Formal Description Methods (AIFB) dataset from R-GCN paper.
# load graph data
from dgl.contrib.data import load_data
import numpy as np
data = load_data(dataset='aifb')
num_nodes = data.num_nodes
num_rels = data.num_rels
num_classes = data.num_classes
labels = data.labels
train_idx = data.train_idx
# split training and validation set
val_idx = train_idx[:len(train_idx) // 5]
train_idx = train_idx[len(train_idx) // 5:]
# edge type and normalization factor
edge_type = torch.from_numpy(data.edge_type)
edge_norm = torch.from_numpy(data.edge_norm).unsqueeze(1)
labels = torch.from_numpy(labels).view(-1)
###############################################################################
# Create graph and model
# ~~~~~~~~~~~~~~~~~~~~~~~
# configurations
n_hidden = 16 # number of hidden units
n_bases = -1 # use number of relations as number of bases
n_hidden_layers = 0 # use 1 input layer, 1 output layer, no hidden layer
n_epochs = 25 # epochs to train
lr = 0.01 # learning rate
l2norm = 0 # L2 norm coefficient
# create graph
g = DGLGraph()
g.add_nodes(num_nodes)
g.add_edges(data.edge_src, data.edge_dst)
g.edata.update({'rel_type': edge_type, 'norm': edge_norm})
# create model
model = Model(len(g),
n_hidden,
num_classes,
num_rels,
num_bases=n_bases,
num_hidden_layers=n_hidden_layers)
###############################################################################
# Training loop
# ~~~~~~~~~~~~~~~~
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2norm)
print("start training...")
model.train()
for epoch in range(n_epochs):
optimizer.zero_grad()
logits = model.forward(g)
loss = F.cross_entropy(logits[train_idx], labels[train_idx])
loss.backward()
optimizer.step()
train_acc = torch.sum(logits[train_idx].argmax(dim=1) == labels[train_idx])
train_acc = train_acc.item() / len(train_idx)
val_loss = F.cross_entropy(logits[val_idx], labels[val_idx])
val_acc = torch.sum(logits[val_idx].argmax(dim=1) == labels[val_idx])
val_acc = val_acc.item() / len(val_idx)
print("Epoch {:05d} | ".format(epoch) +
"Train Accuracy: {:.4f} | Train Loss: {:.4f} | ".format(
train_acc, loss.item()) +
"Validation Accuracy: {:.4f} | Validation loss: {:.4f}".format(
val_acc, val_loss.item()))
###############################################################################
# .. _link-prediction:
#
# The second task, link prediction
# --------------------------------
# So far, you have seen how to use DGL to implement entity classification with an
# R-GCN model. In the knowledge base setting, representation generated by
# R-GCN can be used to uncover potential relationships between nodes. In the
# R-GCN paper, the authors feed the entity representations generated by R-GCN
# into the `DistMult <https://arxiv.org/pdf/1412.6575.pdf>`_ prediction model
# to predict possible relationships.
#
# The implementation is similar to that presented here, but with an extra DistMult layer
# stacked on top of the R-GCN layers. You can find the complete
# implementation of link prediction with R-GCN in our `Github Python code example
# <https://github.com/dmlc/dgl/blob/master/examples/pytorch/rgcn/link_predict.py>`_.
| python |
"""
The type of race condition that this class is designed to prevent is somewhat
difficult to write unit tests for.
My apologies for the abysmal coverage.
T
"""
from google.appengine.ext import db
from catnado.testing.testcase import SimpleAppEngineTestCase
from catnado.unique_property_record import (
UniquePropertyRecord,
UniquePropertyRecordExistsError,
)
NAME = 'name'
TEST = 'test'
UNIQUE_NAME = 'unique_name'
PARENT = 'parent'
class SimpleTestModel(db.Model):
unique_name = db.StringProperty()
class UniquePropertyRecordTest(SimpleAppEngineTestCase):
def test_duplicate_key_raises_exception(self):
UniquePropertyRecord.create(TEST, TEST, TEST)
with self.assertRaises(UniquePropertyRecordExistsError):
UniquePropertyRecord.create(TEST, TEST, TEST)
UniquePropertyRecord.create(SimpleTestModel, UNIQUE_NAME, NAME)
with self.assertRaises(UniquePropertyRecordExistsError):
UniquePropertyRecord.create(SimpleTestModel, UNIQUE_NAME, NAME)
def test_nones_disallowed(self):
with self.assertRaises(AssertionError):
UniquePropertyRecord.create(TEST, TEST, None)
| python |
from PIL import Image
def parse_photo(file_path):
"""Open image(s), remove Alpha Channel if image has it and store image(s)."""
images = []
for file_name in file_path:
try:
# Open file
img = Image.open(file_name)
# If image has Alpha Channel, remove it
if img.mode == "RGBA":
img = rgb_fix(img)
# Store image
images.append(img)
# Check if file is supported
except IOError:
return None
return images
def rgb_fix(image):
"""Remove Alpha Channel from image."""
color = (255, 255, 255)
# Convert all transparent pixels into white pixels
rgb_image = Image.new('RGB', image.size, color)
rgb_image.paste(image, mask=image.split()[3])
# Return converted image
return rgb_image
| python |
#
# Copyright (c) 2019 Juniper Networks, Inc. All rights reserved.
#
"""
Telemetry feature implementation.
This file contains implementation of abstract config generation for
telemetry feature
"""
from collections import OrderedDict
from abstract_device_api.abstract_device_xsd import (
CollectorParams, EnabledInterfaceParams, Feature, PhysicalInterface,
SflowProfile, Telemetry
)
from .db import FlowNodeDM, GrpcProfileDM, PhysicalInterfaceDM, \
SflowProfileDM, TelemetryProfileDM
from .feature_base import FeatureBase
class TelemetryFeature(FeatureBase):
@classmethod
def feature_name(cls):
return 'telemetry'
# end feature_name
def __init__(self, logger, physical_router, configs):
"""Telemetry Feature"""
self.pi_list = None
self.telemetry_map = None
super(TelemetryFeature, self).__init__(logger, physical_router,
configs)
# end __init__
def _get_or_add_to_telemetry_map(self, telemetry_name):
if telemetry_name not in self.telemetry_map:
tp = Telemetry(name=telemetry_name)
self.telemetry_map[telemetry_name] = tp
return self.telemetry_map[telemetry_name]
# end _get_or_add_to_telemetry_map
def _build_telemetry_interface_config(self, interface,
telemetry_profile_name,
sflow_profile_name=None,
sflow_profile_params=None):
if sflow_profile_params:
self._build_sflow_interface_config(
interface,
telemetry_profile_name,
sflow_profile_name,
sflow_profile_params)
# end _build_telemetry_interface_config
def _build_sflow_interface_config(self, interface,
telemetry_profile_name,
sflow_profile_name,
sflow_profile_params):
interface_name = interface.name
interface_fqname_str = ':'.join(interface.fq_name)
interface_type = interface.interface_type
sflow_interface_type = sflow_profile_params.get(
'enabled_interface_type')
if TelemetryFeature._check_interface_for_sflow(
interface_fqname_str,
interface_type,
sflow_interface_type,
sflow_profile_params.get('enabled_interface_params')):
self._build_telemetry_config(telemetry_profile_name,
sflow_profile_name,
sflow_profile_params)
pi = PhysicalInterface(name=interface_name)
self.pi_list.add(pi)
pi.set_telemetry_profile(telemetry_profile_name)
# end _build_sflow_interface_config
@staticmethod
def _check_interface_for_sflow(interface_fqname_str,
interface_type,
sflow_interface_type,
enabled_custom_interface_list):
if sflow_interface_type == "all":
return True
elif sflow_interface_type == "custom":
for custom_intf in enabled_custom_interface_list:
# Assumption: custom_intf['name'] will in fact be
# a fqname str as sent by the UI
if interface_fqname_str == custom_intf.get('name'):
return True
elif sflow_interface_type == interface_type:
return True
return False
# end _check_interface_for_sflow
def _build_telemetry_config(self, tp_name, sflow_name, sflow_params):
tp = self._get_or_add_to_telemetry_map(tp_name)
collector_ip_addr = None
sflow_profile_obj = SflowProfile(name=sflow_name)
scf = sflow_params.get('stats_collection_frequency')
if scf:
if scf.get('sample_rate'):
sflow_profile_obj.set_sample_rate(scf.get('sample_rate'))
if scf.get('polling_interval') is not None:
sflow_profile_obj.set_polling_interval(
scf.get('polling_interval'))
if scf.get('direction'):
sflow_profile_obj.set_sample_direction(
scf.get('direction'))
agent_id = sflow_params.get('agent_id')
if agent_id:
sflow_profile_obj.set_agent_id(agent_id)
adap_sampl_rt = sflow_params.get('adaptive_sample_rate')
if adap_sampl_rt:
sflow_profile_obj.set_adaptive_sample_rate(adap_sampl_rt)
enbld_intf_type = sflow_params.get('enabled_interface_type')
if enbld_intf_type:
sflow_profile_obj.set_enabled_interface_type(enbld_intf_type)
enbld_intf_params = sflow_params.get('enabled_interface_params')
for param in enbld_intf_params or []:
enbld_intf_name = param.get('name')
stats_sampl_rt = None
stats_poll_intvl = None
stats_coll_freq = \
param.get('stats_collection_frequency')
if stats_coll_freq:
stats_sampl_rt = stats_coll_freq.get('sample_rate')
stats_poll_intvl = stats_coll_freq.get('polling_interval')
enbld_intf_params_obj = EnabledInterfaceParams(
name=enbld_intf_name
)
if stats_sampl_rt:
enbld_intf_params_obj.set_sample_rate(stats_sampl_rt)
if stats_poll_intvl:
enbld_intf_params_obj.set_polling_interval(stats_poll_intvl)
sflow_profile_obj.add_enabled_interface_params(
enbld_intf_params_obj)
# all flow nodes will have same same load balancer IP
for node in list(FlowNodeDM.values()):
collector_ip_addr = node.virtual_ip_addr
if collector_ip_addr:
collector_params = CollectorParams(
ip_address=collector_ip_addr,
udp_port=6343
)
sflow_profile_obj.set_collector_params(
collector_params)
tp.set_sflow_profile(sflow_profile_obj)
# end _build_telemetry_config
def _build_telemetry_grpc_config(self, tp_name,
grpc_profile_name,
grpc_profile_params):
snets = []
tp = self._get_or_add_to_telemetry_map(tp_name)
grpc_profile_obj = GrpcProfile(name=grpc_profile_name)
allow_clients_subnets = grpc_profile_params.get(
'allow_clients', {}).get('subnet', [])
for allow_clients_subnet in allow_clients_subnets:
prefix = allow_clients_subnet.get('ip_prefix')
prefix_len = allow_clients_subnet.get('ip_prefix_len')
snet = Subnet(prefix=prefix,
prefix_len=prefix_len)
snets.append(snet)
grpc_profile_obj.set_allow_clients(snets)
tp.set_grpc_profile(grpc_profile_obj)
# end _build_telemetry_grpc_config
def feature_config(self, **kwargs):
self.pi_list = set()
self.telemetry_map = OrderedDict()
feature_config = Feature(name=self.feature_name())
pr = self._physical_router
tp_uuid = pr.telemetry_profile
tp = TelemetryProfileDM.get(tp_uuid)
sflow_profile_params = None
sflow_profile_name = ''
grpc_profile_params = None
grpc_profile_name = ''
tp_name = ''
if tp:
tp_name = tp.fq_name[-1] + "-" + tp.fq_name[-2]
sflow_uuid = tp.sflow_profile
sflow_profile = SflowProfileDM.get(sflow_uuid)
if sflow_profile:
sflow_profile_params = \
sflow_profile.sflow_params
sflow_profile_name = sflow_profile.fq_name[-1] + \
"-" + sflow_profile.fq_name[-2]
for interface_uuid in pr.physical_interfaces:
interface = PhysicalInterfaceDM.get(interface_uuid)
self._build_telemetry_interface_config(interface, tp_name,
sflow_profile_name,
sflow_profile_params)
grpc_uuid = tp.grpc_profile
grpc_profile = GrpcProfileDM.get(grpc_uuid)
if grpc_profile:
grpc_profile_params = grpc_profile.grpc_params
grpc_profile_name = grpc_profile.fq_name[-1] + \
"-" + grpc_profile.fq_name[-2]
self._build_telemetry_grpc_config(tp_name,
grpc_profile_name,
grpc_profile_params)
for pi in self.pi_list:
feature_config.add_physical_interfaces(pi)
for telemetry_name in self.telemetry_map:
feature_config.add_telemetry(self.telemetry_map[telemetry_name])
return feature_config
# end feature_config
# end TelemetryFeature
| python |
"""
This evaluation script modifies code for the official Quoref evaluator (``allennlp/tools/quoref_eval.py``) to deal
with evaluating on contrast sets.
"""
import json
from typing import Dict, Tuple, List, Any, Set
import argparse
from collections import defaultdict
import numpy as np
from allennlp.tools import drop_eval
def _get_contrast_sets(perturbed_gold_annotations: Dict[str, Any]) -> List[Set[str]]:
grouped_instance_ids = defaultdict(set)
for article_info in perturbed_gold_annotations["data"]:
for paragraph_info in article_info["paragraphs"]:
for qa_pair in paragraph_info["qas"]:
query_id = qa_pair["id"]
original_query_id = qa_pair["original_id"]
grouped_instance_ids[original_query_id].add(original_query_id)
grouped_instance_ids[original_query_id].add(query_id)
return list(grouped_instance_ids.values())
def _get_questions_and_answers_from_data(annotations: Dict[str, Any]) -> Dict[str, List[str]]:
"""
If the annotations file is in the same format as the original data files, this method can be used to extract a
dict of query ids and answers.
"""
answers_dict: Dict[str, List[str]] = {}
questions_dict: Dict[str, str] = {}
for article_info in annotations["data"]:
for paragraph_info in article_info["paragraphs"]:
for qa_pair in paragraph_info["qas"]:
query_id = qa_pair["id"]
candidate_answers = [answer["text"] for answer in qa_pair["answers"]]
answers_dict[query_id] = candidate_answers
questions_dict[query_id] = qa_pair["question"]
return answers_dict, questions_dict
def get_instance_metrics(annotations: Dict[str, Any],
predicted_answers: Dict[str, Any]) -> Dict[str, Tuple[float, float]]:
"""
Takes gold annotations and predicted answers and evaluates the predictions for each question
in the gold annotations. Both JSON dictionaries must have query_id keys, which are used to
match predictions to gold annotations.
The ``predicted_answers`` JSON must be a dictionary keyed by query id, where the value is a
list of strings (or just one string) that is the answer.
The ``annotations`` are assumed to have either the format of the dev set in the Quoref data release, or the
same format as the predicted answers file.
"""
instance_metrics: Dict[str, Tuple[float, float]] = {}
if "data" in annotations:
# We're looking at annotations in the original data format. Let's extract the answers.
annotated_answers, questions_dict = _get_questions_and_answers_from_data(annotations)
else:
questions_dict = None
annotated_answers = annotations
for query_id, candidate_answers in annotated_answers.items():
max_em_score = 0.0
max_f1_score = 0.0
if query_id in predicted_answers:
predicted = predicted_answers[query_id]
gold_answer = tuple(candidate_answers)
em_score, f1_score = drop_eval.get_metrics(predicted, gold_answer)
if gold_answer[0].strip() != "":
max_em_score = max(max_em_score, em_score)
max_f1_score = max(max_f1_score, f1_score)
else:
print("Missing prediction for question: {}".format(query_id))
max_em_score = 0.0
max_f1_score = 0.0
instance_metrics[query_id] = max_em_score, max_f1_score
return instance_metrics, questions_dict
def evaluate_contrast_sets(original_prediction_path: str,
original_gold_path: str,
perturbed_prediction_path: str,
perturbed_gold_path: str,
verbose: bool = False) -> None:
"""
Takes a prediction files and gold files of original and perturbed sets, evaluates the predictions in both
files, and computes individual metrics and consistency over contrast sets. All
files must be json formatted and must have query_id keys, which are used to match predictions to gold
annotations. Writes metrics to standard output.
"""
# pylint: disable=too-many-locals,too-many-statements
original_predicted_answers = json.load(open(original_prediction_path, encoding="utf-8"))
original_annotations = json.load(open(original_gold_path, encoding="utf-8"))
perturbed_predicted_answers = json.load(open(perturbed_prediction_path, encoding="utf-8"))
perturbed_annotations = json.load(open(perturbed_gold_path, encoding="utf-8"))
original_instance_metrics, original_questions = get_instance_metrics(original_annotations,
original_predicted_answers)
perturbed_instance_metrics, perturbed_questions = get_instance_metrics(perturbed_annotations,
perturbed_predicted_answers)
original_em_scores = [x[0] for x in original_instance_metrics.values()]
original_f1_scores = [x[1] for x in original_instance_metrics.values()]
global_original_em = np.mean(original_em_scores)
global_original_f1 = np.mean(original_f1_scores)
perturbed_em_scores = [x[0] for x in perturbed_instance_metrics.values()]
perturbed_f1_scores = [x[1] for x in perturbed_instance_metrics.values()]
global_perturbed_em = np.mean(perturbed_em_scores)
global_perturbed_f1 = np.mean(perturbed_f1_scores)
global_combined_em = np.mean(original_em_scores + perturbed_em_scores)
global_combined_f1 = np.mean(original_f1_scores + perturbed_f1_scores)
print("\nMetrics on original dataset")
print("Exact-match accuracy {0:.2f}".format(global_original_em * 100))
print("F1 score {0:.2f}".format(global_original_f1 * 100))
print("\nMetrics on perturbed dataset")
print("Exact-match accuracy {0:.2f}".format(global_perturbed_em * 100))
print("F1 score {0:.2f}".format(global_perturbed_f1 * 100))
print("\nMetrics on combined dataset")
print("Exact-match accuracy {0:.2f}".format(global_combined_em * 100))
print("F1 score {0:.2f}".format(global_combined_f1 * 100))
contrast_sets = _get_contrast_sets(perturbed_annotations)
set_sizes = [len(set_) for set_ in contrast_sets]
mean_size = np.mean(set_sizes)
std_sizes = np.std(set_sizes)
all_instance_metrics = {key: value for key, value in list(original_instance_metrics.items()) +
list(perturbed_instance_metrics.items())}
consistency_scores = []
if original_questions is not None and perturbed_questions is not None:
all_questions = {key: (value, "original") for key, value in original_questions.items()}
all_questions.update({key: (value, "perturbed") for key, value in perturbed_questions.items()})
elif verbose:
print("Warning: verbose flag is set, but original data does not contain questions! Ignoring the flag.")
verbose = False
num_changed_questions = 0
for set_ in contrast_sets:
consistency = min([all_instance_metrics[query_id][0] for query_id in set_])
consistency_scores.append(consistency)
perturbed_set_questions = []
if original_questions is not None:
for query_id in set_:
question_text, question_type = all_questions[query_id]
if question_type == 'original':
original_set_question = question_text
else:
perturbed_set_questions.append(question_text)
num_changed_questions += sum([text != original_set_question for text in perturbed_set_questions])
if verbose:
print("===================")
for query_id in set_:
print(f"Question: {all_questions[query_id]}")
print(f"Metrics: {all_instance_metrics[query_id]}")
print(f"Consistency: {consistency}")
global_consistency = np.mean(consistency_scores)
percent_changed_questions = num_changed_questions / len(perturbed_questions) * 100
print("\nMetrics on contrast sets:")
print(f"Number of contrast sets: {len(contrast_sets)}")
print(f"Max contrast set size: {max(set_sizes)}")
print(f"Mean set size: {mean_size} (+/- {std_sizes})")
print(f"Number of questions changed: {num_changed_questions} ({percent_changed_questions}%)")
print("Consistency: {0:.2f}".format(global_consistency * 100))
if __name__ == "__main__":
# pylint: disable=invalid-name
parser = argparse.ArgumentParser(description="Evaluate Quoref predictions given contrast sets")
parser.add_argument(
"--original_gold_path",
type=str,
required=True,
default="quoref-test-v0.1.json",
help="location of the original test set with answers",
)
parser.add_argument(
"--original_prediction_path",
type=str,
required=True,
help="location of the file with predictions over the original test set",
)
parser.add_argument(
"--perturbed_gold_path",
type=str,
required=True,
help="location of the perturbed test set with answers",
)
parser.add_argument(
"--perturbed_prediction_path",
type=str,
required=True,
help="location of the file with predictions over the perturbed test set",
)
parser.add_argument(
"--verbose",
action='store_true',
help="will show details of instances if set",
)
args = parser.parse_args()
evaluate_contrast_sets(args.original_prediction_path,
args.original_gold_path,
args.perturbed_prediction_path,
args.perturbed_gold_path,
args.verbose)
| python |
try:
from django.conf.urls import *
except ImportError: # django < 1.4
from django.conf.urls.defaults import *
# place app url patterns here
| python |
import os
from flask import Flask, request, jsonify, make_response
from flask_cors import CORS
from joinnector import SDK
# through the custom helperclient
from src.client.nector_client import NectorClient
client_sdk = NectorClient(os.environ.get("API_KEY"), os.environ.get(
"API_SECRET"), os.environ.get("API_MODE"))
# through the sdk helper client
sdk = SDK(os.environ.get("API_KEY"), os.environ.get(
"API_SECRET"), os.environ.get("API_MODE"))
delegate_client = SDK.get_delegate_client()
'''
For security purpose these methods can not be triggered from client calls
To whitelist calls directly from client side, remove the method name from the array
It is requested to call the "not_allowed_controller_method_names" only from other backend functions (idealy they should be called while performing business operations) since they cause quota consumption on nector.
not_allowed_controller_method_names = [
"reward_deals", "create_leads", "save_leads", "get_subscriptions",
"create_taskactivities", "create_wallets", "create_wallettransactions"
];
whitelist all the methods by default methods in not_allowed_controller_method_names are blocklisted to be called from frontend app or website directly for security reasons
'''
delegatesdk = delegate_client(sdk, [])
app = Flask(__name__)
CORS(app)
def make_json_response(json_data, status=200):
response = make_response(
jsonify(json_data),
status
)
response.headers["Content-Type"] = "application/json"
return response
@app.route('/', methods=['GET'])
def health():
return make_json_response({"message": "Server is running"})
@app.route('/nector-delegate', methods=['POST'])
def delegate():
try:
response = client_sdk.delegate_method()
if response.json() is not None:
return make_json_response(response.json(), response.status_code)
except Exception as ex:
print(ex)
return make_json_response({"message": "Something went wrong, please try after sometime"}, 422)
@app.route('/nector-direct-delegate', methods=['POST'])
def direct_delegate():
try:
response = delegatesdk.delegate_method(request.get_json())
if response.json() is not None:
return make_json_response(response.json(), response.status_code)
except Exception as ex:
print(ex)
return make_json_response({"message": "Something went wrong, please try after sometime"}, 422)
| python |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
class Net(nn.Module):
def __init__(self, num_class=1024):
super(Net, self).__init__()
num_output_hidden = int(np.log2(num_class - 1)) + 1
self.fc1 = nn.Linear(num_class, 1024)
self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(512, num_output_hidden)
self.sigmod = nn.Sigmoid()
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
x = self.sigmod(x)
return x
if __name__ == "__main__":
print(Net()) | python |
import numpy as np
from scipy.interpolate import UnivariateSpline
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import scipy.stats as st
import TransitionMatrix as TM
from TransitionMatrix import SetTransitionMatrix_NULL
def configuration(m,spaces):
if m == 1:
if spaces == 4:
spaces = 20
elif spaces == 17 or spaces == 24:
spaces = 38
elif spaces == 37:
spaces = 48
elif spaces == 42:
spaces = 15
elif spaces == 62 or spaces == 69:
spaces = 71
elif spaces == 84:
spaces = 92
elif spaces == 97:
spaces = 94
return spaces
elif m == 2:
if spaces == 4:
spaces = 13
elif spaces == 17 or spaces == 24:
spaces =30
elif spaces == 37:
spaces = 48
elif spaces == 42:
spaces = 15
elif spaces == 62 or spaces == 69:
spaces = 55
elif spaces == 84:
spaces = 75
elif spaces == 97:
spaces = 70
return spaces
else:
spaces = spaces
def num_gen(m):
turn_stats = [] # setting turn_stats for every game
spaces = 0
turns = 0
move_bank = []
i = 0
#while turns < 104:
for turns in range(1,500):
dice = np.random.randint(1, 6)
# to keep track out how mant turns it takes
move_bank.insert(turns, dice)
#print(spaces,"spaces")
#print(dice,"dice",turns,"turns")
i = i + 1
if dice == 1:
#print("beforeinside",spaces)
spaces = spaces + dice
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
elif dice == 2:
#print("beforeinside", spaces)
spaces = spaces + dice
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
# elif spaces > 104:
# print("breaking the law", turns)
# turn_stats.insert(i, turns) # adding only to count turns
# break
elif dice == 3:
#print("beforeinside", spaces)
spaces = spaces + dice
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
# elif spaces > 104:
# print("breaking the law", turns)
# turn_stats.insert(i, turns) # adding only to count turns
# break
elif dice == 4:
#print("beforeinside", spaces)
spaces = spaces + dice
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
# elif spaces > 104:
# print("breaking the law", turns)
# turn_stats.insert(i, turns) # adding only to count turns
# break
elif dice == 5:
#print("beforeinside", spaces)
spaces = spaces + dice
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
# elif spaces > 104:
# print("breaking the law", turns)
# turn_stats.insert(i, turns) # adding only to count turns
# break
elif dice == 6:
#print("beforeinside", spaces)
spaces = spaces + 0
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
return (turn_stats)
def game_analysis(config):
turns_to_win = []
for game in range(1,101):
turns_to_win.insert(game,num_gen(config))
#print (turns)
return (turns_to_win)
def run_this(zero,dist):
a = game_analysis(zero)
a.sort() #sorting list
avg = np.mean(a)
std = np.std(a)
print(avg,'mean')
mode = st.mode(a)
print(mode[0],'mode')
#print(avg,std)
#if dist == 'pdf':
num_bins = 10
n, bins, patches = plt.hist(a, num_bins, normed=1, facecolor='green', alpha=0.5)
y = mlab.normpdf(bins, avg, std)
plt.plot(bins, y, 'r--')
if zero == 1:
plt.xlabel('Turns to Win: Configuration 1')
elif zero == 2:
plt.xlabel('Turns to Win: Configuration 2')
else:
plt.xlabel('Turns to Win')
plt.ylabel('Probability')
plt.title("Cumalative Density Function: Monte Carlo")
plt.show()
#elif dist == 'cdf':
num_bins = 10
fig, ax = plt.subplots(figsize=(8, 4))
n, bins, patches = ax.hist(a, num_bins, normed=1, histtype='step', cumulative=True)
y = mlab.normpdf(bins, avg, std).cumsum()
y /= y[-1]
ax.plot(bins, y, 'k--', linewidth=1.5)
if zero == 1:
plt.xlabel('Turns to Win: Configuration 1')
elif zero == 2:
plt.xlabel('Turns to Win: Configuration 2')
else:
plt.xlabel('Turns to Win')
plt.ylabel('Probability')
plt.title("Cumulative Density Function: Monte Carlo")
plt.show()
run_this(3,'cdf')
| python |
Import jogovelha
import sys
erroInicializar = False
jogo = jogovelha.inicializar()
if len(jogo) != 3:
erroInicializar = True
else:
for linha in jogo:
if len(linha) != 3:
erroInicializar = True
else:
for elemento in linha:
if elemento != '.':
erroInicializar =
if erroInicializar:
sys.exit(1)
else:
sys.exit(0)
| python |
#!/usr/bin/python
from re import findall
from collections import defaultdict
from itertools import combinations
def sortSides(triangles):
for i in range(len(triangles)):
triangles[i] = sorted(triangles[i])
def part1(numbers):
sortSides(numbers)
isTriangle = 0
for t in numbers:
if t[0] + t[1] > t[2]:
isTriangle += 1
print(isTriangle)
def part2(numbers):
isTriangle = 0
for i in range(3):
for n in range(0, len(numbers)//3, 3):
t1, t2, t3 = sorted([numbers[n][i], numbers[n+1][i], numbers[n+2][i]])
if t1+t2 > t3:
isTriangle += 1
print(isTriangle)
def main():
with open('input', 'r') as fp:
triangles = []
for line in fp.read().strip().split('\n'):
triangles.append(list(map(int, findall(r'(\d+)', line))))
t2 = triangles
part1(triangles)
part2(t2)
if __name__ == '__main__':
main()
| python |
from aoc20191215a import discover_map, move, draw
def aoc(data):
seen = discover_map(data)
step = 0
while 1 in seen.values():
prev = seen.copy()
for (x, y), value in prev.items():
if value == 2:
for xx, yy, _ in move(x, y):
if prev[(xx, yy)] == 1:
seen[(xx, yy)] = 2
draw(seen, 0, 0)
step += 1
return step
| python |
from tkinter import *
from tkinter import messagebox
from dao.book_repository_json import BookRepositoryJson
from model.book import Book
from presentation.add_edit_book_dialog import AddEditBookDialog
from presentation.app_main_window import AppMainWindow
from presentation.show_items_view import ShowItemsView
from utils.tkinter_utils import print_hierarchy
from utils.uuid_sequence_generator import uuid_sequence_generator
class Application:
def __init__(self, book_repository=BookRepositoryJson(id_sequence=uuid_sequence_generator())):
self.book_repository = book_repository
def start(self):
self.book_repository.load()
self.root = Tk()
self.main_window = AppMainWindow(self.root, self)
print_hierarchy(self.root)
self.root.mainloop()
def browseBooks(self):
self.book_repository.load()
books = self.book_repository.find_all()
self.show_books_view = ShowItemsView(self.root, items=books, item_class=Book,
add_command=self.show_add_book, edit_command=self.show_edit_book, delete_command=self.delete_books, )
def show_add_book(self):
self.add_book_dialog = AddEditBookDialog(self.root, application=self)
def show_edit_book(self, books):
if len(books) == 0:
messagebox.showinfo(title="Edit Book Dialog", message="Please select a book to edit.")
return
edited_book = self.book_repository.find_by_id(books[0][0])
self.add_book_dialog = AddEditBookDialog(self.root, book=edited_book, application=self)
def add_edit_book(self, book):
if book.id:
self.book_repository.update(book) #edit existing book
else:
self.book_repository.insert(book) # add new book
self.book_repository.persist()
self.show_books_view.set_items(self.book_repository.find_all())
def delete_books(self, books):
for book_tuple in books:
self.book_repository.delete_by_id(book_tuple[0])
self.book_repository.persist()
self.show_books_view.set_items(self.book_repository.find_all())
if __name__ == '__main__':
app = Application(BookRepositoryJson(id_sequence=uuid_sequence_generator()))
app.start()
| python |
from utils.qSLP import qSLP
from qiskit.utils import QuantumInstance
from qiskit import Aer, QuantumCircuit
from utils.data_visualization import *
from utils.Utils_pad import padding
from utils.import_data import get_dataset
from qiskit.circuit.library import ZZFeatureMap, ZFeatureMap
from qiskit.circuit.library import RealAmplitudes
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, execute, BasicAer
import pickle
from utils.Utils import get_params, parity
from sklearn.metrics import accuracy_score
import pandas as pd
import sys
def get_quantum_instance():
IBMQ.load_account() # Load account from disk
provider = IBMQ.get_provider(hub='ibm-q')
small_devices = provider.backends(filters=lambda x: x.configuration().n_qubits == 5
and not x.configuration().simulator
and x.status().operational== True)
least_busy(small_devices)
backend = least_busy(small_devices)
# Comment to run on real devices
# backend = Aer.get_backend('aer_simulator')
return QuantumInstance(backend, shots=1024)
def main(path_results, path_models, path_save):
path_res = path_results
datasets = ["iris01","MNIST09", "MNIST38", "iris12", "iris02"]
for dataset in datasets:
qinstance = get_quantum_instance()
X_train, X_test, Y_train, Y_test = get_dataset(dataset)
X_test_pad = padding(X_test)
for d in range(1,4):
# Create model
model_name = f"pad_qSLP_{d}"
print(model_name)
params = get_params(model_name, dataset)
model = qSLP(d, True)
qc, sp_par, ansatz_par = model.get_full_circ()
# Set params
weights = dict(zip(ansatz_par, params))
qc = qc.bind_parameters(weights)
ris = []
# Execute tests
for i in range(X_test.shape[0]):
inp = dict(zip(sp_par, X_test_pad[i]))
q = qc.bind_parameters(inp)
res = execute(q, qinstance.backend, shots=1024).result()
ris.append(res.get_counts())
# Process and save results
ris = [int(max(el, key=el.get)) for el in ris]
acc = accuracy_score(ris, Y_test)
result = {
"model": [model_name],
"real_dev_score" : [acc]
}
res = pd.DataFrame(result)
res.to_csv(path_save, mode = "a", header=False, index = False)
# Create model
model_name = f"sdq_qSLP_{d}"
print(model_name)
params = get_params(model_name, dataset)
model = qSLP(d, False)
qc, sp_par, ansatz_par = model.get_full_circ()
# Set params
weights = dict(zip(ansatz_par, params))
qc = qc.bind_parameters(weights)
ris = []
# Execute circuit
for i in range(X_test.shape[0]):
inp = dict(zip(sp_par, X_test[i]))
q = qc.bind_parameters(inp)
res = execute(q, qinstance.backend, shots=1024).result()
ris.append(res.get_counts())
# Process and save results
ris = [int(max(el, key=el.get)) for el in ris]
acc = accuracy_score(ris, Y_test)
result = {
"model": [model_name],
"real_dev_score" : [acc]
}
res = pd.DataFrame(result)
res.to_csv(path_save, mode = "a", header=False, index = False)
# Create model qnnC_v1
model_name = "qNNC_v1"
print(model_name)
tot_qubit = 2
feature_map = ZZFeatureMap(feature_dimension=2,
reps=1, entanglement='linear')
ansatz = RealAmplitudes(2, reps=1)
interpreter = parity
qc = QuantumCircuit(tot_qubit)
qc.append(feature_map, range(tot_qubit))
qc.append(ansatz, range(tot_qubit))
qc.measure_all()
params = get_params(model_name, dataset)
weights = dict(zip(ansatz.parameters, params))
qc = qc.bind_parameters(weights)
ris = []
for i in range(X_test.shape[0]):
weigths = dict(zip(feature_map.parameters, X_test[i]))
q = qc.bind_parameters(weigths)
res = execute(q, qinstance.backend, shots=1024).result()
ris.append(max(res.get_counts(), key=res.get_counts().get).count('1') % 2)
acc = accuracy_score(ris, Y_test)
#acc = accuracy_score([max(el, key=el.get).count('1') % 2 for el in ris], Y_test)
result = {
"model": [model_name],
"real_dev_score" : [acc]
}
res = pd.DataFrame(result)
res.to_csv(path_save, mode = "a", header=False, index = False)
# Create model qnnC_v2
model_name = "qNNC_v2"
print(model_name)
tot_qubit = 2
feature_map = ZFeatureMap(feature_dimension=2,
reps=1)
ansatz = RealAmplitudes(2, reps=2)
interpreter = parity
qc = QuantumCircuit(tot_qubit)
qc.append(feature_map, range(tot_qubit))
qc.append(ansatz, range(tot_qubit))
qc.measure_all()
params = get_params(model_name, dataset)
weights = dict(zip(ansatz.parameters, params))
qc = qc.bind_parameters(weights)
ris = []
for i in range(X_test.shape[0]):
weigths = dict(zip(feature_map.parameters, X_test[i]))
q = qc.bind_parameters(weigths)
res = execute(q, qinstance.backend, shots=1024).result()
ris.append(max(res.get_counts(), key=res.get_counts().get).count('1') % 2)
acc = accuracy_score(ris, Y_test)
result = {
"model": [model_name],
"real_dev_score" : [acc]
}
res = pd.DataFrame(result)
res.to_csv(path_save, mode = "a", header=False, index = False)
# Create model QSVC
model_name = "QSVC"
print(model_name)
best_df = pd.read_csv("results/test_simulation/simulated_best.csv")
best_qsvc = best_df[best_df["model"] == model_name]
k = best_qsvc[best_qsvc["dataset"] == dataset]["k"].item()
loaded_model = pickle.load(open(f"results/training/qsvm/{model_name}_{dataset}_{k}.sav", 'rb'))
rus= loaded_model.predict(X_test)
acc = accuracy_score(rus, Y_test)
result = {
"model": [model_name],
"real_dev_score" : [acc]
}
res = pd.DataFrame(result)
res.to_csv(path_save, mode = "a", header=False, index = False)
columns = [ "model","real_dev_score" ]
df = pd.read_csv(path_save,names=columns)
df.to_csv(path_save, index=False)
if __name__ == "__main__":
#args = sys.argv[1:]
args = ['results/training/file_result.txt', 'results/training/qsvm/', 'results/test_real/acc_real.txt' ]
if len(args) != 3:
raise Exception("Wrong number of arguments, specify: csv file for results, path to qsvc model save folder, csv file to save loss/accuracy ")
path_results = args[0]
path_models = args[1]
path_save = args[2]
main(path_results, path_models, path_save)
| python |
# OP_RETURN.py
#
# Python script to generate and retrieve OP_RETURN bitcore transactions
#
# Copyright (c) Coin Sciences Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import subprocess, json, time, random, os.path, binascii, struct, string, re, hashlib
# Python 2-3 compatibility logic
try:
basestring
except NameError:
basestring = str
# User-defined quasi-constants
OP_RETURN_BITCORE_IP='127.0.0.1' # IP address of your bitcore node
OP_RETURN_BITCORE_USE_CMD=False # use command-line instead of JSON-RPC?
if OP_RETURN_BITCORE_USE_CMD:
OP_RETURN_BITCORE_PATH='/usr/local/bin/bitcore-cli' # path to bitcore-cli executable on this server
else:
OP_RETURN_BITCORE_PORT='20001' # leave empty to use default port for mainnet/testnet
OP_RETURN_BITCORE_USER='admin1' # leave empty to read from ~/.bitcore/bitcore.conf (Unix only)
OP_RETURN_BITCORE_PASSWORD='123' # leave empty to read from ~/.bitcore/bitcore.conf (Unix only)
OP_RETURN_BTX_FEE=0.0001 # BTX fee to pay per transaction
OP_RETURN_BTX_DUST=0.00001 # omit BTX outputs smaller than this
OP_RETURN_MAX_BYTES=80 # maximum bytes in an OP_RETURN (80 as of Bitcore 0.11)
OP_RETURN_MAX_BLOCKS=10 # maximum number of blocks to try when retrieving data
OP_RETURN_NET_TIMEOUT=10 # how long to time out (in seconds) when communicating with bitcore node
# User-facing functions
def OP_RETURN_send(send_address, send_amount, metadata, testnet=False):
# Validate some parameters
if not OP_RETURN_bitcore_check(testnet):
return {'error': 'Please check Bitcore Core is running and OP_RETURN_BITCORE_* constants are set correctly'}
result=OP_RETURN_bitcore_cmd('validateaddress', testnet, send_address)
if not ('isvalid' in result and result['isvalid']):
return {'error': 'Send address could not be validated: '+send_address}
if isinstance(metadata, basestring):
metadata=metadata.encode('utf-8') # convert to binary string
metadata_len=len(metadata)
if metadata_len>65536:
return {'error': 'This library only supports metadata up to 65536 bytes in size'}
if metadata_len>OP_RETURN_MAX_BYTES:
return {'error': 'Metadata has '+str(metadata_len)+' bytes but is limited to '+str(OP_RETURN_MAX_BYTES)+' (see OP_RETURN_MAX_BYTES)'}
# Calculate amounts and choose inputs
output_amount=send_amount+OP_RETURN_BTX_FEE
inputs_spend=OP_RETURN_select_inputs(output_amount, testnet)
if 'error' in inputs_spend:
return {'error': inputs_spend['error']}
change_amount=inputs_spend['total']-output_amount
# Build the raw transaction
change_address=OP_RETURN_bitcore_cmd('getrawchangeaddress', testnet)
outputs={send_address: send_amount}
if change_amount>=OP_RETURN_BTX_DUST:
outputs[change_address]=change_amount
raw_txn=OP_RETURN_create_txn(inputs_spend['inputs'], outputs, metadata, len(outputs), testnet)
# Sign and send the transaction, return result
return OP_RETURN_sign_send_txn(raw_txn, testnet)
def OP_RETURN_store(data, testnet=False):
# Data is stored in OP_RETURNs within a series of chained transactions.
# If the OP_RETURN is followed by another output, the data continues in the transaction spending that output.
# When the OP_RETURN is the last output, this also signifies the end of the data.
# Validate parameters and get change address
if not OP_RETURN_bitcore_check(testnet):
return {'error': 'Please check Bitcore Core is running and OP_RETURN_BITCORE_* constants are set correctly'}
if isinstance(data, basestring):
data=data.encode('utf-8') # convert to binary string
data_len=len(data)
if data_len==0:
return {'error': 'Some data is required to be stored'}
change_address=OP_RETURN_bitcore_cmd('getrawchangeaddress', testnet)
# Calculate amounts and choose first inputs to use
output_amount=OP_RETURN_BTX_FEE*int((data_len+OP_RETURN_MAX_BYTES-1)/OP_RETURN_MAX_BYTES) # number of transactions required
inputs_spend=OP_RETURN_select_inputs(output_amount, testnet)
if 'error' in inputs_spend:
return {'error': inputs_spend['error']}
inputs=inputs_spend['inputs']
input_amount=inputs_spend['total']
# Find the current blockchain height and mempool txids
height=int(OP_RETURN_bitcore_cmd('getblockcount', testnet))
avoid_txids=OP_RETURN_bitcore_cmd('getrawmempool', testnet)
# Loop to build and send transactions
result={'txids':[]}
for data_ptr in range(0, data_len, OP_RETURN_MAX_BYTES):
# Some preparation for this iteration
last_txn=((data_ptr+OP_RETURN_MAX_BYTES)>=data_len) # is this the last tx in the chain?
change_amount=input_amount-OP_RETURN_BTX_FEE
metadata=data[data_ptr:data_ptr+OP_RETURN_MAX_BYTES]
# Build and send this transaction
outputs={}
if change_amount>=OP_RETURN_BTX_DUST: # might be skipped for last transaction
outputs[change_address]=change_amount
raw_txn=OP_RETURN_create_txn(inputs, outputs, metadata, len(outputs) if last_txn else 0, testnet)
send_result=OP_RETURN_sign_send_txn(raw_txn, testnet)
# Check for errors and collect the txid
if 'error' in send_result:
result['error']=send_result['error']
break
result['txids'].append(send_result['txid'])
if data_ptr==0:
result['ref']=OP_RETURN_calc_ref(height, send_result['txid'], avoid_txids)
# Prepare inputs for next iteration
inputs=[{
'txid': send_result['txid'],
'vout': 1,
}]
input_amount=change_amount
# Return the final result
return result
def OP_RETURN_retrieve(ref, max_results=1, testnet=False):
# Validate parameters and get status of Bitcore Core
if not OP_RETURN_bitcore_check(testnet):
return {'error': 'Please check Bitcore Core is running and OP_RETURN_BITCORE_* constants are set correctly'}
max_height=int(OP_RETURN_bitcore_cmd('getblockcount', testnet))
heights=OP_RETURN_get_ref_heights(ref, max_height)
if not isinstance(heights, list):
return {'error': 'Ref is not valid'}
# Collect and return the results
results=[]
for height in heights:
if height==0:
txids=OP_RETURN_list_mempool_txns(testnet) # if mempool, only get list for now (to save RPC calls)
txns=None
else:
txns=OP_RETURN_get_block_txns(height, testnet) # if block, get all fully unpacked
txids=txns.keys()
for txid in txids:
if OP_RETURN_match_ref_txid(ref, txid):
if height==0:
txn_unpacked=OP_RETURN_get_mempool_txn(txid, testnet)
else:
txn_unpacked=txns[txid]
found=OP_RETURN_find_txn_data(txn_unpacked)
if found:
# Collect data from txid which matches ref and contains an OP_RETURN
result={
'txids': [str(txid)],
'data': found['op_return'],
}
key_heights={height: True}
# Work out which other block heights / mempool we should try
if height==0:
try_heights=[] # nowhere else to look if first still in mempool
else:
result['ref']=OP_RETURN_calc_ref(height, txid, txns.keys())
try_heights=OP_RETURN_get_try_heights(height+1, max_height, False)
# Collect the rest of the data, if appropriate
if height==0:
this_txns=OP_RETURN_get_mempool_txns(testnet) # now retrieve all to follow chain
else:
this_txns=txns
last_txid=txid
this_height=height
while found['index'] < (len(txn_unpacked['vout'])-1): # this means more data to come
next_txid=OP_RETURN_find_spent_txid(this_txns, last_txid, found['index']+1)
# If we found the next txid in the data chain
if next_txid:
result['txids'].append(str(next_txid))
txn_unpacked=this_txns[next_txid]
found=OP_RETURN_find_txn_data(txn_unpacked)
if found:
result['data']+=found['op_return']
key_heights[this_height]=True
else:
result['error']='Data incomplete - missing OP_RETURN'
break
last_txid=next_txid
# Otherwise move on to the next height to keep looking
else:
if len(try_heights):
this_height=try_heights.pop(0)
if this_height==0:
this_txns=OP_RETURN_get_mempool_txns(testnet)
else:
this_txns=OP_RETURN_get_block_txns(this_height, testnet)
else:
result['error']='Data incomplete - could not find next transaction'
break
# Finish up the information about this result
result['heights']=list(key_heights.keys())
results.append(result)
if len(results)>=max_results:
break # stop if we have collected enough
return results
# Utility functions
def OP_RETURN_select_inputs(total_amount, testnet):
# List and sort unspent inputs by priority
unspent_inputs=OP_RETURN_bitcore_cmd('listunspent', testnet, 0)
if not isinstance(unspent_inputs, list):
return {'error': 'Could not retrieve list of unspent inputs'}
unspent_inputs.sort(key=lambda unspent_input: unspent_input['amount']*unspent_input['confirmations'], reverse=True)
# Identify which inputs should be spent
inputs_spend=[]
input_amount=0
for unspent_input in unspent_inputs:
inputs_spend.append(unspent_input)
input_amount+=unspent_input['amount']
if input_amount>=total_amount:
break # stop when we have enough
if input_amount<total_amount:
return {'error': 'Not enough funds are available to cover the amount and fee'}
# Return the successful result
return {
'inputs': inputs_spend,
'total': input_amount,
}
def OP_RETURN_create_txn(inputs, outputs, metadata, metadata_pos, testnet):
raw_txn=OP_RETURN_bitcore_cmd('createrawtransaction', testnet, inputs, outputs)
txn_unpacked=OP_RETURN_unpack_txn(OP_RETURN_hex_to_bin(raw_txn))
metadata_len=len(metadata)
if metadata_len<=75:
payload=bytearray((metadata_len,))+metadata # length byte + data (https://en.bitcoin.it/wiki/Script)
elif metadata_len<=256:
payload="\x4c"+bytearray((metadata_len,))+metadata # OP_PUSHDATA1 format
else:
payload="\x4d"+bytearray((metadata_len%256,))+bytearray((int(metadata_len/256),))+metadata # OP_PUSHDATA2 format
metadata_pos=min(max(0, metadata_pos), len(txn_unpacked['vout'])) # constrain to valid values
txn_unpacked['vout'][metadata_pos:metadata_pos]=[{
'value': 0,
'scriptPubKey': '6a'+OP_RETURN_bin_to_hex(payload) # here's the OP_RETURN
}]
return OP_RETURN_bin_to_hex(OP_RETURN_pack_txn(txn_unpacked))
def OP_RETURN_sign_send_txn(raw_txn, testnet):
signed_txn=OP_RETURN_bitcore_cmd('signrawtransaction', testnet, raw_txn)
if not ('complete' in signed_txn and signed_txn['complete']):
return {'error': 'Could not sign the transaction'}
send_txid=OP_RETURN_bitcore_cmd('sendrawtransaction', testnet, signed_txn['hex'])
if not (isinstance(send_txid, basestring) and len(send_txid)==64):
return {'error': 'Could not send the transaction'}
return {'txid': str(send_txid)}
def OP_RETURN_list_mempool_txns(testnet):
return OP_RETURN_bitcore_cmd('getrawmempool', testnet)
def OP_RETURN_get_mempool_txn(txid, testnet):
raw_txn=OP_RETURN_bitcore_cmd('getrawtransaction', testnet, txid)
return OP_RETURN_unpack_txn(OP_RETURN_hex_to_bin(raw_txn))
def OP_RETURN_get_mempool_txns(testnet):
txids=OP_RETURN_list_mempool_txns(testnet)
txns={}
for txid in txids:
txns[txid]=OP_RETURN_get_mempool_txn(txid, testnet)
return txns
def OP_RETURN_get_raw_block(height, testnet):
block_hash=OP_RETURN_bitcore_cmd('getblockhash', testnet, height)
if not (isinstance(block_hash, basestring) and len(block_hash)==64):
return {'error': 'Block at height '+str(height)+' not found'}
return {
'block': OP_RETURN_hex_to_bin(OP_RETURN_bitcore_cmd('getblock', testnet, block_hash, False))
}
def OP_RETURN_get_block_txns(height, testnet):
raw_block=OP_RETURN_get_raw_block(height, testnet)
if 'error' in raw_block:
return {'error': raw_block['error']}
block=OP_RETURN_unpack_block(raw_block['block'])
return block['txs']
# Talking to bitcore-cli
def OP_RETURN_bitcore_check(testnet):
info=OP_RETURN_bitcore_cmd('getinfo', testnet)
return isinstance(info, dict) and 'balance' in info
def OP_RETURN_bitcore_cmd(command, testnet, *args): # more params are read from here
if OP_RETURN_BITCORE_USE_CMD:
sub_args=[OP_RETURN_BITCORE_PATH]
if testnet:
sub_args.append('-testnet')
sub_args.append(command)
for arg in args:
sub_args.append(json.dumps(arg) if isinstance(arg, (dict, list, tuple)) else str(arg))
raw_result=subprocess.check_output(sub_args).decode("utf-8").rstrip("\n")
try: # decode JSON if possible
result=json.loads(raw_result)
except ValueError:
result=raw_result
else:
request={
'id': str(time.time())+'-'+str(random.randint(100000,999999)),
'method': command,
'params': args,
}
port=OP_RETURN_BITCORE_PORT
user=OP_RETURN_BITCORE_USER
password=OP_RETURN_BITCORE_PASSWORD
if not (len(port) and len(user) and len(password)):
conf_lines=open(os.path.expanduser('~')+'/bitcore-testnet-box/1/bitcore.conf').readlines()
for conf_line in conf_lines:
parts=conf_line.strip().split('=', 1) # up to 2 parts
if (parts[0]=='rpcport') and not len(port):
port=int(parts[1])
if (parts[0]=='rpcuser') and not len(user):
user=parts[1]
if (parts[0]=='rpcpassword') and not len(password):
password=parts[1]
if not len(port):
port=50332 if testnet else 8556 #28332 50332
#port=50332 if testnet else 8556
if not (len(user) and len(password)):
return None # no point trying in this case
url='http://'+OP_RETURN_BITCORE_IP+':'+str(port)+'/'
try:
from urllib2 import HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, build_opener, install_opener, urlopen
except ImportError:
from urllib.request import HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, build_opener, install_opener, urlopen
passman=HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, user, password)
auth_handler=HTTPBasicAuthHandler(passman)
opener=build_opener(auth_handler)
install_opener(opener)
raw_result=urlopen(url, json.dumps(request).encode('utf-8'), OP_RETURN_NET_TIMEOUT).read()
result_array=json.loads(raw_result.decode('utf-8'))
result=result_array['result']
return result
# Working with data references
# The format of a data reference is: [estimated block height]-[partial txid] - where:
# [estimated block height] is the block where the first transaction might appear and following
# which all subsequent transactions are expected to appear. In the event of a weird blockchain
# reorg, it is possible the first transaction might appear in a slightly earlier block. When
# embedding data, we set [estimated block height] to 1+(the current block height).
# [partial txid] contains 2 adjacent bytes from the txid, at a specific position in the txid:
# 2*([partial txid] div 65536) gives the offset of the 2 adjacent bytes, between 0 and 28.
# ([partial txid] mod 256) is the byte of the txid at that offset.
# (([partial txid] mod 65536) div 256) is the byte of the txid at that offset plus one.
# Note that the txid is ordered according to user presentation, not raw data in the block.
def OP_RETURN_calc_ref(next_height, txid, avoid_txids):
txid_binary=OP_RETURN_hex_to_bin(txid)
for txid_offset in range(15):
sub_txid=txid_binary[2*txid_offset:2*txid_offset+2]
clashed=False
for avoid_txid in avoid_txids:
avoid_txid_binary=OP_RETURN_hex_to_bin(avoid_txid)
if (
(avoid_txid_binary[2*txid_offset:2*txid_offset+2]==sub_txid) and
(txid_binary!=avoid_txid_binary)
):
clashed=True
break
if not clashed:
break
if clashed: # could not find a good reference
return None
tx_ref=ord(txid_binary[2*txid_offset:1+2*txid_offset])+256*ord(txid_binary[1+2*txid_offset:2+2*txid_offset])+65536*txid_offset
return '%06d-%06d' % (next_height, tx_ref)
def OP_RETURN_get_ref_parts(ref):
if not re.search('^[0-9]+\-[0-9A-Fa-f]+$', ref): # also support partial txid for second half
return None
parts=ref.split('-')
if re.search('[A-Fa-f]', parts[1]):
if len(parts[1])>=4:
txid_binary=OP_RETURN_hex_to_bin(parts[1][0:4])
parts[1]=ord(txid_binary[0:1])+256*ord(txid_binary[1:2])+65536*0
else:
return None
parts=list(map(int, parts))
if parts[1]>983039: # 14*65536+65535
return None
return parts
def OP_RETURN_get_ref_heights(ref, max_height):
parts=OP_RETURN_get_ref_parts(ref)
if not parts:
return None
return OP_RETURN_get_try_heights(parts[0], max_height, True)
def OP_RETURN_get_try_heights(est_height, max_height, also_back):
forward_height=est_height
back_height=min(forward_height-1, max_height)
heights=[]
mempool=False
try_height=0
while True:
if also_back and ((try_height%3)==2): # step back every 3 tries
heights.append(back_height)
back_height-=1
else:
if forward_height>max_height:
if not mempool:
heights.append(0) # indicates to try mempool
mempool=True
elif not also_back:
break # nothing more to do here
else:
heights.append(forward_height)
forward_height+=1
if len(heights)>=OP_RETURN_MAX_BLOCKS:
break
try_height+=1
return heights
def OP_RETURN_match_ref_txid(ref, txid):
parts=OP_RETURN_get_ref_parts(ref)
if not parts:
return None
txid_offset=int(parts[1]/65536)
txid_binary=OP_RETURN_hex_to_bin(txid)
txid_part=txid_binary[2*txid_offset:2*txid_offset+2]
txid_match=bytearray([parts[1]%256, int((parts[1]%65536)/256)])
return txid_part==txid_match # exact binary comparison
# Unpacking and packing bitcore blocks and transactions
def OP_RETURN_unpack_block(binary):
buffer=OP_RETURN_buffer(binary)
block={}
block['version']=buffer.shift_unpack(4, '<L')
block['hashPrevBlock']=OP_RETURN_bin_to_hex(buffer.shift(32)[::-1])
block['hashMerkleRoot']=OP_RETURN_bin_to_hex(buffer.shift(32)[::-1])
block['time']=buffer.shift_unpack(4, '<L')
block['bits']=buffer.shift_unpack(4, '<L')
block['nonce']=buffer.shift_unpack(4, '<L')
block['tx_count']=buffer.shift_varint()
block['txs']={}
old_ptr=buffer.used()
while buffer.remaining():
transaction=OP_RETURN_unpack_txn_buffer(buffer)
new_ptr=buffer.used()
size=new_ptr-old_ptr
raw_txn_binary=binary[old_ptr:old_ptr+size]
txid=OP_RETURN_bin_to_hex(hashlib.sha256(hashlib.sha256(raw_txn_binary).digest()).digest()[::-1])
old_ptr=new_ptr
transaction['size']=size
block['txs'][txid]=transaction
return block
def OP_RETURN_unpack_txn(binary):
return OP_RETURN_unpack_txn_buffer(OP_RETURN_buffer(binary))
def OP_RETURN_unpack_txn_buffer(buffer):
# see: https://en.bitcoin.it/wiki/Transactions
txn={
'vin': [],
'vout': [],
}
txn['version']=buffer.shift_unpack(4, '<L') # small-endian 32-bits
inputs=buffer.shift_varint()
if inputs>100000: # sanity check
return None
for _ in range(inputs):
input={}
input['txid']=OP_RETURN_bin_to_hex(buffer.shift(32)[::-1])
input['vout']=buffer.shift_unpack(4, '<L')
length=buffer.shift_varint()
input['scriptSig']=OP_RETURN_bin_to_hex(buffer.shift(length))
input['sequence']=buffer.shift_unpack(4, '<L')
txn['vin'].append(input)
outputs=buffer.shift_varint()
if outputs>100000: # sanity check
return None
for _ in range(outputs):
output={}
output['value']=float(buffer.shift_uint64())/100000000
length=buffer.shift_varint()
output['scriptPubKey']=OP_RETURN_bin_to_hex(buffer.shift(length))
txn['vout'].append(output)
txn['locktime']=buffer.shift_unpack(4, '<L')
return txn
def OP_RETURN_find_spent_txid(txns, spent_txid, spent_vout):
for txid, txn_unpacked in txns.items():
for input in txn_unpacked['vin']:
if (input['txid']==spent_txid) and (input['vout']==spent_vout):
return txid
return None
def OP_RETURN_find_txn_data(txn_unpacked):
for index, output in enumerate(txn_unpacked['vout']):
op_return=OP_RETURN_get_script_data(OP_RETURN_hex_to_bin(output['scriptPubKey']))
if op_return:
return {
'index': index,
'op_return': op_return,
}
return None
def OP_RETURN_get_script_data(scriptPubKeyBinary):
op_return=None
if scriptPubKeyBinary[0:1]==b'\x6a':
first_ord=ord(scriptPubKeyBinary[1:2])
if first_ord<=75:
op_return=scriptPubKeyBinary[2:2+first_ord]
elif first_ord==0x4c:
op_return=scriptPubKeyBinary[3:3+ord(scriptPubKeyBinary[2:3])]
elif first_ord==0x4d:
op_return=scriptPubKeyBinary[4:4+ord(scriptPubKeyBinary[2:3])+256*ord(scriptPubKeyBinary[3:4])]
return op_return
def OP_RETURN_pack_txn(txn):
binary=b''
binary+=struct.pack('<L', txn['version'])
binary+=OP_RETURN_pack_varint(len(txn['vin']))
for input in txn['vin']:
binary+=OP_RETURN_hex_to_bin(input['txid'])[::-1]
binary+=struct.pack('<L', input['vout'])
binary+=OP_RETURN_pack_varint(int(len(input['scriptSig'])/2)) # divide by 2 because it is currently in hex
binary+=OP_RETURN_hex_to_bin(input['scriptSig'])
binary+=struct.pack('<L', input['sequence'])
binary+=OP_RETURN_pack_varint(len(txn['vout']))
for output in txn['vout']:
binary+=OP_RETURN_pack_uint64(int(round(output['value']*100000000)))
binary+=OP_RETURN_pack_varint(int(len(output['scriptPubKey'])/2)) # divide by 2 because it is currently in hex
binary+=OP_RETURN_hex_to_bin(output['scriptPubKey'])
binary+=struct.pack('<L', txn['locktime'])
return binary
def OP_RETURN_pack_varint(integer):
if integer>0xFFFFFFFF:
packed="\xFF"+OP_RETURN_pack_uint64(integer)
elif integer>0xFFFF:
packed="\xFE"+struct.pack('<L', integer)
elif integer>0xFC:
packed="\xFD".struct.pack('<H', integer)
else:
packed=struct.pack('B', integer)
return packed
def OP_RETURN_pack_uint64(integer):
upper=int(integer/4294967296)
lower=integer-upper*4294967296
return struct.pack('<L', lower)+struct.pack('<L', upper)
# Helper class for unpacking bitcore binary data
class OP_RETURN_buffer():
def __init__(self, data, ptr=0):
self.data=data
self.len=len(data)
self.ptr=ptr
def shift(self, chars):
prefix=self.data[self.ptr:self.ptr+chars]
self.ptr+=chars
return prefix
def shift_unpack(self, chars, format):
unpack=struct.unpack(format, self.shift(chars))
return unpack[0]
def shift_varint(self):
value=self.shift_unpack(1, 'B')
if value==0xFF:
value=self.shift_uint64()
elif value==0xFE:
value=self.shift_unpack(4, '<L')
elif value==0xFD:
value=self.shift_unpack(2, '<H')
return value
def shift_uint64(self):
return self.shift_unpack(4, '<L')+4294967296*self.shift_unpack(4, '<L')
def used(self):
return min(self.ptr, self.len)
def remaining(self):
return max(self.len-self.ptr, 0)
# Converting binary <-> hexadecimal
def OP_RETURN_hex_to_bin(hex):
try:
raw=binascii.a2b_hex(hex)
except Exception:
return None
return raw
def OP_RETURN_bin_to_hex(string):
return binascii.b2a_hex(string).decode('utf-8')
| python |