content
stringlengths
0
894k
type
stringclasses
2 values
# # Convert raw output of the Caffe 'time' command # to the CK timing format. # # Developers: # - Grigori Fursin, cTuning foundation, 2016 # - Anton Lokhmotov, dividiti, 2016 # import json import os import re def ck_postprocess(i): ck=i['ck_kernel'] d={} ####################################### ck.out('Loading tmp-output1.tmp ...') r=ck.load_text_file({'text_file':'tmp-output1.tmp'}) if r['return']>0: return r log=r['string'] ####################################### ck.out ('Loading tmp-output2.tmp ...') r=ck.load_text_file({'text_file':'tmp-output2.tmp'}) if r['return']>0: return r err=r['string'] # Searching 1 execution time xttp='' ss=' run time: ' j=log.find(ss) if j<0: ss='Total time: ' j=log.find(ss) if j>=0: j1=log.find(' seconds.',j) if j1<0: j1=log.find(' s',j) if j1>=0: xttp=log[j+len(ss):j1].strip() if xttp=='': return {'return':1, 'error':'couldn\'t find total time in the output'} ttp=float(xttp) if ttp!=0: d['execution_time']=ttp d['execution_time_kernel_0']=ttp d['log_stdout']=log d['log_stderr']=err d['post_processed']='yes' # Write CK json r=ck.save_json_to_file({'json_file':'tmp-ck-timer.json', 'dict':d}) if r['return']>0: return r return {'return':0} # Do not add anything here!
python
#!/usr/bin/env python3 import asyncio import time import cryptocom.exchange as cro from cryptocom.exchange.structs import Pair from cryptocom.exchange.structs import PrivateTrade from binance.client import Client class CorecitoAccount: """Configures and runs the right code based on the selected exchange in config""" def __init__(self, config=None): self.exchange = config['corecito_exchange'] self.api_key = config['api_key'] self.api_secret = config['api_secret'] self.core_number = config['core_number'] self.min_price_stop = config['min_price_stop'] if 'min_price_stop' in config else None self.max_price_stop = config['max_price_stop'] if 'max_price_stop' in config else None self.min_core_number_increase_percentage = config['min_core_number_increase_percentage'] self.max_core_number_increase_percentage = config['max_core_number_increase_percentage'] self.min_core_number_decrease_percentage = config['min_core_number_decrease_percentage'] self.max_core_number_decrease_percentage = config['max_core_number_decrease_percentage'] self.is_fiat = config['is_fiat'] if self.exchange == 'crypto.com': self.account = cro.Account(api_key=self.api_key, api_secret=self.api_secret) self.cro_exchange = cro.Exchange() self.base_currency = config['cryptocom_base_currency'] self.core_number_currency = config['cryptocom_core_number_currency'] self.pair = eval('cro.pairs.' + config['cryptocom_trading_pair']) self.pair_name = self.pair.name.replace('_', '/') self.cro_coin_base_currency = eval('cro.coins.' + config['cryptocom_base_currency']) self.cro_coin_core_number_currency = eval('cro.coins.' + config['cryptocom_core_number_currency']) self.max_decimals_buy = config['cryptocom_max_decimals_buy'] self.max_decimals_sell = config['cryptocom_max_decimals_sell'] elif self.exchange == 'binance': binance = Binance(public_key = self.api_key, secret_key = self.api_secret, sync=True) self.account = binance.b self.pair = config['binance_trading_pair'] self.pair_name = self.pair.replace('_', '/') self.base_currency = config['binance_base_currency'] self.core_number_currency = config['binance_core_number_currency'] self.max_decimals_buy = config['binance_max_decimals_buy'] self.max_decimals_sell = config['binance_max_decimals_sell'] if not self.account: raise Exception('Could not connect to the exchange account with provided keys!') async def get_tickers(self): # Get pair ticker info if self.exchange == 'crypto.com': tickers = await self.cro_exchange.get_tickers() ticker = tickers[self.pair] buy_price = ticker.buy_price sell_price = ticker.sell_price elif self.exchange == 'binance': tickers = self.account.get_orderbook_tickers() # Example Binance {'symbol': 'ETHBTC', 'bidPrice': '0.02706800', 'bidQty': '7.30000000', 'askPrice': '0.02707300', 'askQty': '24.00000000'} # Bid == BUY, ask == SELL ticker = next((x for x in tickers if x["symbol"] == self.pair), None) buy_price = float(ticker["bidPrice"]) sell_price = float(ticker["askPrice"]) await asyncio.sleep(0.5) return({'buy_price': buy_price, 'sell_price': sell_price}) async def get_balances(self): # Get account balances if self.exchange == 'crypto.com': balances = await self.account.get_balance() base_currency_balance = balances[self.cro_coin_base_currency] base_currency_available = base_currency_balance.available core_number_currency_balance = balances[self.cro_coin_core_number_currency] core_number_currency_available = core_number_currency_balance.available elif self.exchange == 'binance': base_currency_balance = self.account.get_asset_balance(asset=self.base_currency) or 0.0 if base_currency_balance == 0.0: base_currency_available = 0.0 else: base_currency_available = float(base_currency_balance["free"]) core_number_currency_balance = self.account.get_asset_balance(asset=self.core_number_currency) or 0.0 if core_number_currency_balance == 0.0: core_number_currency_available = 0.0 else: core_number_currency_available = float(core_number_currency_balance["free"]) await asyncio.sleep(0.5) return({'base_currency_balance': base_currency_balance, 'base_currency_available': base_currency_available, 'core_number_currency_balance': core_number_currency_balance, 'core_number_currency_available': core_number_currency_available}) async def order_market_buy(self, tx_result, quantity=0.0): if self.exchange == 'crypto.com': # NOTE: We use tx_result instead of quantity here because Crypto.com has a weird behaviour: it uses ETH to denominate the transaction await self.account.buy_market(self.pair, tx_result) elif self.exchange == 'binance': self.account.order_market_buy(symbol=self.pair, quantity=quantity) await asyncio.sleep(0.5) async def order_market_sell(self, quantity=0.0): if self.exchange == 'crypto.com': await self.account.sell_market(self.pair, quantity) elif self.exchange == 'binance': self.account.order_market_sell(symbol=self.pair, quantity=quantity) await asyncio.sleep(0.5) # This wrapper solves time-offset inconsistencies between local-PC time and Binance server time class Binance: def __init__(self, public_key = '', secret_key = '', sync = False): self.time_offset = 0 self.b = Client(public_key, secret_key) if sync: self.time_offset = self._get_time_offset() def _get_time_offset(self): res = self.b.get_server_time() return res['serverTime'] - int(time.time() * 1000) def synced(self, fn_name, **args): args['timestamp'] = int(time.time() - self.time_offset)
python
#!/usr/bin/env python import sys, os sys.path.append(os.path.realpath("..")) sys.path.append(os.path.realpath("../ElectronicComponents")) sys.path.append(os.path.realpath("../ElectronicModel")) import RPi.GPIO as GPIO ## Import GPIO library import time ## Import 'time' library. Allows us to use 'sleep' from ElectronicComponents import * from ElectronicModel import Chase # port for stop button STOP_BUTTON = 21 def init_electronic(): GPIO.setmode(GPIO.BCM) # Input reset (stop) # in the StopButton constructor #GPIO.setup(STOP_BUTTON, GPIO.IN) ## stop button # init seven digits # in the seven_digits constructor # init the SN74HC959inputs # in the SN74HC595 constructor def main(): #init electronic components InitGPIO.init_electronic() stop_button = StopButton(STOP_BUTTON) eight_outputs = SN74HC595( {'ser':5,'oe':6,'rclk':13,'srclk':19,'srclr':26} ) chase = Chase() eight_outputs.allow_output(True) while not stop_button.stop_state: ti = chase.ticks() print(ti) eight_outputs.write_output( ti ) time.sleep(0.2) eight_outputs.write_output( 128 ) time.sleep(2) # clean the GPIO InitGPIO.clean() def main3(): #init electronic components init_electronic() eight_outputs = SN74HC595( {'ser':5,'oe':6,'rclk':12,'srclk':19,'srclr':26} ) seven_digits_1 = SevenDigits( (19, 26, 22, 27, 18, 13, 6, 17) ) seven_digits_2 = SevenDigits(output_ports = None, use_direct_gpio = False, component_interface = eight_outputs) chase = Chase() dummyChase = True objectOutput = None eight_outputs.allow_output(True) i = 0 # step 1 seven_digits_1.set_light_on("dc") seven_digits_2.set_light_off("dc") while not GPIO.input(STOP_BUTTON): output_mod1 = str(hex(i % 16))[-1] output_mod2 = str(hex((i / 16) % 16))[-1] #seven_digits_1.write_output( output_mod1 ) if dummyChase: eight_outputs.write_output( chase.ticks() ) else: seven_digits_2.write_output( output_mod2 ) i += 1 time.sleep(0.2) time.sleep(2) # step 2 seven_digits_1.set_light_off("dc") seven_digits_2.set_light_on("dc") seven_digits_1.write_output("2") while not GPIO.input(STOP_BUTTON): for j in range(97, 103): seven_digits_2.write_output( chr(j) ) time.sleep(0.2) time.sleep(2) # step 3 seven_digits_1.set_light_on("dc") seven_digits_2.set_light_on("dc") seven_digits_1.write_output("3") while not GPIO.input(STOP_BUTTON): for j in range(0, 8): print(j) seven_digits_2.write_output( pow(2,j) ) time.sleep(0.2) time.sleep(2) # step 4 seven_digits_1.set_light_off("dc") seven_digits_2.set_light_off("dc") seven_digits_1.write_output("4") while not GPIO.input(STOP_BUTTON): for j in range(1,7): f = 1 << j print(f) seven_digits_2.write_output(f) time.sleep(0.2) time.sleep(5) # clean the GPIO GPIO.cleanup() def main2(): #init electronic components init_electronic() eight_outputs = SN74HC595( (23, 24, 25, 12, 20) ) seven_digits = SevenDigits( (19, 26, 22, 27, 18, 13, 6, 17) ) seven_digits_2 = SevenDigits(output_ports = None, use_direct_gpio = False, component_interface = eight_outputs) eight_outputs.allow_output(True) seven_digits.write_output("-") for j in range(0, 10): print(j) seven_digits_2.write_output(str(j)) time.sleep(2) GPIO.cleanup() if __name__ == '__main__': try: main() except KeyboardInterrupt: GPIO.cleanup()
python
""" ********** I2C Device ********** :Author: Michael Murton """ # Copyright (c) 2019-2021 MQTTany contributors # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. __all__ = ["getDeviceClass", "updateConfOptions"] import typing as t from collections import OrderedDict from common import update_dict from . import mcp230xx from .base import I2CDevice def getDeviceClass(device: str) -> t.Union[t.Type[I2CDevice], None]: """ Returns an I2CDevice subclass to handle ``device`` or ``None`` if one is not available. """ dev_classes: t.Dict[str, t.Type[I2CDevice]] = {} dev_classes.update(mcp230xx.SUPPORTED_DEVICES) return dev_classes.get(device, None) def updateConfOptions( conf_options: t.MutableMapping[str, t.Dict[t.Any, t.Any]] ) -> "OrderedDict[str, t.Dict[t.Any, t.Any]]": """ Returns a copy of ``conf_options`` updated with options from each device. """ conf_options = update_dict(conf_options, mcp230xx.CONF_OPTIONS) return t.cast("OrderedDict[str, t.Dict[t.Any, t.Any]]", conf_options)
python
from rest_framework import serializers from .models import Hero, FAQ, Help, Privacy class HeroSerializer(serializers.ModelSerializer): image_url = serializers.SerializerMethodField() class Meta: model = Hero fields = [ "id", "title", "description", "image_url", "activity_url", "explore_ideas_url", "tinkering_resource_url", ] def get_image_url(self, instance): return instance.image.name class PrivacySerializer(serializers.ModelSerializer): class Meta: model = Privacy fields = [ "privacy_policy", "terms_of_use", "edited_on" ] class HelpSerializer(serializers.ModelSerializer): class Meta: model = Help fields = [ "about", ] class FAQListSerializer(serializers.ModelSerializer): class Meta: model = FAQ fields = [ "question", "answer" ]
python
#!/usr/bin/python import os import matplotlib.pyplot as mplot import itertools from experiments import PATH_RESULTS, RESULT_SEP PATH_PLOTS = 'plots' PLOTS_EXTENSION = '.eps' PLOT_COLORS = itertools.cycle('bgrcmyk') # PLOT_STYLES = itertools.cycle('ov^<>1234sp*hH+xDd|_') PLOT_STYLES = itertools.cycle('op^s+xd|<D1H_>2*45vh') """ Set to 1 if you want to count only words that belong exclusively to ONE family. Set to 0 if you want to count words that simply belong to the family. """ exclusive = 0 """ Dear user, I really need to warn you before you make use of the following code. This code is not really part of the Multiword Project, it was written to conduct several (exhaustive and random) studies. Please, do not use this code on your own! You should better write your own code based on the other modules (certain.py, multiword.py, etc.). The code in this file is very specific to what we needed, I can not ensure that you will be fine with that... """ MACRO = {'fpp' : r'$\mathcal{F}_\mathregular{rep.3}$', 'fpu' : r'$\mathcal{F}_\mathregular{p.unb.}$', 'fa' : r'$\mathcal{F}_\mathregular{anch.}$', 'fu' : r'$\mathcal{F}_\mathregular{unr.}$'} def load_results_from_files(filenames): """ Aggregate the results that are in the files whose names or in given sequence of filenames. filenames -- a list of filenames """ def load_results_from_file(filename): f = open(os.path.join(PATH_RESULTS, filename)) results = [] for line in f: word, size, nplus, diff = line.split(RESULT_SEP) results.append((word, int(size))) return results results = [] for filename in filenames: results += load_results_from_file(filename) return results def prepare_results(results): """ Prepare a given set of results and return a dict structure that contains, for each size of words, a dict structure that contains, for each number of states, a list of words that have this size and this number of states. """ words = dict() for word, size in results: length = len(word) number_of_states = words.setdefault(length, dict()) list_of_words = number_of_states.setdefault(size, []) if word not in list_of_words: list_of_words.append(word) return words def plot_length(chart, prepared_results, length, label = ''): """ Plot onto chart the given results on one chart with: x-axis = number of states of the DFA y-axis = number of DFA having this number of states. Only the data of results that concern given word length are considered. chart -- A matplotlib.pyplot object. prepared_results -- A dict structure returned by prepare_results. length -- The words length to consider. label -- The label to use. Default is length. """ x_values = prepared_results[length].keys() x_values.sort() y_values = [] for x_value in x_values: number = len(prepared_results[length][x_value]) # VERY IMPORTANT, PLEASE READ!!! # When we first ran experiments, the number of DFA we computed for each # size n of word and each size s of alphabet was NOT s**n, but (s**n)/2. # We considered that, for instance, "aab" is equal to "bba" (there is # just an isomorphism). The "2" in the following line of code is there # to display the right number of DFA... Please note that the code # actually present in experiments.py generates exactly (s**n). Thus, # if you need to plot something using this function, be careful!! y_values.append(2 * number) label_to_use = label if label != '' else str(length) # print 'Drawing for length %d : \n%s\n%s' % (length, '\t'.join([str(x) for x in x_values]), '\t'.join([str(x) for x in y_values])) chart.plot(x_values, y_values, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = label_to_use) if __name__ == '__main__': choice = raw_input('0) mdfa for |w|=14\n' + '1) mdfa_ab_2_16\n' + '2) mdfa_rand_ab_17_28\n' + '3) mdfa relative to |sigma|\n' + '4) Size of the families for ab\n' + '5) Size of the families for abc\n' + '6) Size of the families relative to |sigma|\n') choice = int(choice) if choice == 0: mplot.xlabel('Number of states') mplot.ylabel('Number of words') #mplot.title('Number of DFA\'s for each number of states') in_filenames = ['mdfa_ab_2_16.txt'] results = prepare_results(load_results_from_files(in_filenames)) out_filename = 'mdfa_ab_14' + PLOTS_EXTENSION results = prepare_results(load_results_from_files(in_filenames)) plot_length(mplot, results, 14, '14') mplot.savefig(os.path.join(PATH_PLOTS, out_filename)) mplot.show() elif choice == 1 or choice == 2: mplot.xlabel('Number of states') mplot.ylabel('Number of words') #mplot.title('Number of DFA\'s for each number of states') mplot.yscale('log') if choice == 1: in_filenames = ['mdfa_ab_2_16.txt'] else: in_filenames = ['mdfa_rand_ab_17_28.txt'] out_filename = in_filenames[0][:-4] + PLOTS_EXTENSION results = prepare_results(load_results_from_files(in_filenames)) for size in results.keys(): plot_length(mplot, results, size) if choice == 1: mplot.legend(loc = 2) else: mplot.legend(loc = 1) mplot.savefig(os.path.join(PATH_PLOTS, out_filename)) mplot.show() elif choice == 3: alphabets = ['ab', 'abc', 'abcd', 'abcde'] words_length = 8 mplot.xlim(words_length, words_length + words_length / 2 + 1) mplot.xlabel('Number of states') mplot.ylabel('Number of words') #mplot.title('Different alphabet sizes, |w| = %d.' % words_length) mplot.yscale('log') for alphabet in alphabets: print 'Considering %s...' % alphabet results = load_results_from_files(['mdfa_%s_%d_%d.txt' % (alphabet, words_length, words_length)]) print 'Preparing results...' results = prepare_results(results) print 'Plotting...\n' plot_length(mplot, results, words_length, 'Size %d' % len(alphabet)) mplot.legend(loc = 1) mplot.savefig(os.path.join(PATH_PLOTS, ('mdfa_alphabets_%d'+PLOTS_EXTENSION) % words_length)) mplot.show() elif choice == 4 or choice == 5: if choice == 4: sizes = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) results = [] results.append((2, [0, 0], [2, 0], [0, 0], [2, 0], 0)) results.append((4, [0, 0], [4, 2], [2, 0], [2, 0], 0)) results.append((8, [2, 0], [6, 0], [6, 0], [4, 0], 0)) results.append((16, [2, 0], [10, 6], [4, 0], [4, 0], 4)) results.append((32, [2, 0], [14, 4], [14, 4], [8, 4], 8)) results.append((64, [4, 0], [28, 10], [26, 8], [8, 2], 22)) results.append((128, [4, 0], [42, 8], [56, 20], [16, 8], 52)) results.append((256, [4, 0], [84, 26], [100, 36], [24, 12], 114)) results.append((512, [10, 0], [154, 44], [194, 76], [40, 20], 244)) results.append((1024, [10, 4], [300, 118], [356, 148], [72, 38], 502)) results.append((2048, [10, 4], [570, 252], [648, 292], [132, 86], 1052)) results.append((4096, [22, 4], [1150, 590], [1170, 536], [252, 158], 2156)) results.append((8192, [22, 12], [2234, 1276], [2130, 1024], [480, 320], 4444)) results.append((16384, [22, 14], [4468, 2798], [3844, 1884], [940, 640], 9080)) results.append((32768, [52, 20], [8866, 5924], [6916, 3476], [1824, 1292], 18584)) results.append((65536, [52, 32], [17706, 12566], [12498, 6320], [3660, 2600], 37820)) if choice == 5: sizes = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) results = [] results.append((3, [0, 0], [3, 0], [0, 0], [3, 0], 0)) results.append((9, [0, 0], [9, 6], [3, 0], [3, 0], 0)) results.append((27, [3, 0], [21, 6], [18, 0], [9, 0], 0)) results.append((81, [3, 0], [57, 18], [48, 0], [15, 0], 12)) results.append((243, [3, 0], [147, 30], [150, 12], [39, 12], 48)) results.append((729, [9, 0], [441, 90], [474, 66], [93, 24], 132)) results.append((2187, [9, 0], [1245, 144], [1578, 300], [243, 54], 402)) results.append((6561, [9, 0], [3735, 378], [4950, 1062], [735, 192], 1032)) results.append((19683, [33, 0], [11055, 804], [15666, 3840], [2037, 426], 2754)) results.append((59049, [33, 12], [33111, 2214], [48720, 12738], [6291, 1182], 6900)) results.append((177147, [33, 18], [98877, 5634], [150780, 42156], [18303, 2904], 17796)) results.append((531441, [105, 12], [296697, 15564], [463590, 134778], [55689, 7914], 44268)) results.append((1594323, [105, 54], [888627, 41700], [1420818, 428226], [165219, 19500], 112200)) results.append((4782969,[105, 78],[2665881, 112806],[4338714, 1338828],[498975, 52134],279210)) mplot.xlabel('|w|') mplot.ylabel('Percentage of words') #mplot.title('Coverages of the families, alphabet has %d symbols.' % (choice - 2)) prim = [x[1][exclusive] * 100.0 / x[0] for x in results] unb = [x[2][exclusive] * 100.0 / x[0] for x in results] anc = [x[3][exclusive] * 100.0 / x[0] for x in results] ove = [x[4][exclusive] * 100.0 / x[0] for x in results] other = [x[5] * 100.0 / x[0] for x in results] # mplot.plot(sizes, words, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = '# words') mplot.plot(sizes, prim, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = MACRO['fpp']) mplot.plot(sizes, unb, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = MACRO['fpu']) mplot.plot(sizes, anc, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = MACRO['fa']) mplot.plot(sizes, ove, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = MACRO['fu']) mplot.plot(sizes, other, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = 'others') mplot.ylim(0, 100) if choice == 4: mplot.xlim(0, 17) mplot.legend(loc = 1) mplot.savefig(os.path.join(PATH_PLOTS, 'families_ab_1_16'+PLOTS_EXTENSION)) elif choice == 5: mplot.xlim(1, 15) mplot.legend(loc = 2) mplot.savefig(os.path.join(PATH_PLOTS, 'families_abc_1_14'+PLOTS_EXTENSION)) mplot.show() elif choice == 6: mplot.xlabel('$|\\Sigma|$') mplot.ylabel('Percentage of words') #mplot.title('Coverages of the families, |w| = 8.') sizes = (2, 3, 4, 5, 6) results = [] results.append((256, [4, 0], [84, 26], [100, 36], [24, 12], 114)) results.append((6561, [9, 0], [3735, 378], [4950, 1062], [735, 192], 1032)) results.append((65536, [16, 0], [45328, 2460], [56640, 7176], [7864, 1248], 5172)) results.append((390625, [25, 0], [297525, 11060], [354580, 28740], [45285, 5880], 19080)) results.append((1679616, [36, 0], [1354356, 40950], [1557540, 85500], [181776, 23100], 57990)) prim = [x[1][exclusive] * 100.0 / x[0] for x in results] unb = [x[2][exclusive] * 100.0 / x[0] for x in results] anc = [x[3][exclusive] * 100.0 / x[0] for x in results] ove = [x[4][exclusive] * 100.0 / x[0] for x in results] other = [x[5] * 100.0 / x[0] for x in results] mplot.xlim(sizes[0] - 1, sizes[-1] + 1) mplot.ylim(0, 100) # mplot.plot(sizes, words, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = '# words') mplot.plot(sizes, prim, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = MACRO['fpp']) mplot.plot(sizes, unb, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = MACRO['fpu']) mplot.plot(sizes, anc, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = MACRO['fa']) mplot.plot(sizes, ove, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = MACRO['fu']) mplot.plot(sizes, other, '%s-%s' % (PLOT_COLORS.next(), PLOT_STYLES.next()), label = 'others') mplot.legend(loc = 2) mplot.savefig(os.path.join(PATH_PLOTS, 'families_alphabets_8'+PLOTS_EXTENSION)) mplot.show()
python
import datetime, os, sys import logging, functools import inspect import timeit from .ext_time import time_elapsed from .decorators import apply_decorator_to_all_functions_in_module def apply_logging_to_all_functions_in_module(module): """ To be used after creating a logger with dero.logging.create_logger(), and after importing a module. On subsequent calls to any functions from that module, they will be logged using the log_with decorator. NOTE: Be careful not to use this on any module containing a function to be called many times. For such modules, it is better to use the log_with decorator directly excluding those functions. Usage: import module import dero logger = dero.logging.create_logger() dero.logging.apply_logging_to_all_functions_in_module(module) module.whatever_function() #logs correctly """ name = _get_all_prior_frames() name += '.' + module.__name__ module.logger = logging.getLogger(name) module.log = log_with(module.logger) apply_decorator_to_all_functions_in_module(module, module.log) def create_logger(name='main'): """ Creates a logger in the __main__ namespace. Sets three handlers, two to file and one to stdout. All output goes to the .debug file, info and higher goes to the .log file, and error and higher goes to stdout. Pass a name to name log files. Usage: Imagine a project with three files, main.py, bar.py, and baz.py. We want to use the create_logger() function in the main namespace (file being run), and get_logger() in the imported files. Normal logs: Then log entries may be created with logger.debug(), logger.info(), logger.warning(), logger.error(), and logger.critical(). Exceptions: Log caught exceptions with logger.exception('Custom message'), this will include the traceback Entering and exiting functions: Use @dero.logging.log_with(logger) decorator, logs when entering and exiting function as well as passed args and kwargs and return values. Logs enter and exit at the info level and parameters and return values at the debug level. Example usage: main.py: import dero logger = dero.logging.create_logger() logger.info('Starting main') bar.barf() bar.py: import dero import baz logger = dero.logging.get_logger() def barf(): logger.info('some info about barf') baz.baz() baz.py: import dero logger = dero.logging.get_logger() def baz(): logger.info('some info about baz') Running main.py will output: 2016-08-08 15:09:17,109 - __main__ - INFO - Starting main 2016-08-08 15:09:17,111 - __main__.bar - INFO - some info about barf 2016-08-08 15:09:17,111 - __main__.bar.baz - INFO - some info about baz """ #Clear Jupyter notebook logger (this is code that only needs to be run in jupyter notebook) logger = logging.getLogger() logger.handlers = [] #Create logger logger = logging.getLogger('__main__') logger.setLevel(logging.DEBUG) handlers = [] #container for handlers #Make log dir if not os.path.exists('Logs'): os.makedirs('Logs') #Create debug logfile which logs everything creation_time = str(datetime.datetime.now().replace(microsecond=0)).replace(':','.') debug_handler = logging.FileHandler(r'Logs\{} {}.debug'.format(creation_time, name)) debug_handler.setLevel(logging.DEBUG) handlers.append(debug_handler) #Create standard logfile which logs process (info and up) info_handler = logging.FileHandler(r'Logs\{} {}.log'.format(creation_time, name)) info_handler.setLevel(logging.INFO) handlers.append(info_handler) #Now log errors to standard output error_handler = logging.StreamHandler(sys.stdout) error_handler.setLevel(logging.ERROR) handlers.append(error_handler) formatter = logging.Formatter('%(asctime)ls - %(name)s - %(levelname)s - %(message)s') for handler in handlers: handler.setFormatter(formatter) logger.addHandler(handler) return logger def get_logger(): """ To be used in an imported file. See create_logger() for usage. """ name = _get_all_prior_frames() return logging.getLogger(name) def _get_all_prior_frames(): """ Gets the calling stack formatted as a string seperated by periods, e.g.: __main__.bar.baz """ frame = inspect.currentframe() out = [] #container for output while True: frame = frame.f_back name = _filter_frame(frame) if frame is not None: if name is not False: #if False, is a name we don't need to record, should just continue out = [name] + out if name == '__main__': #once we get to __main__, we're done (ignore IPython stuff) return '.'.join(out) else: #if frame is none, we're done (no more frames) return '.'.join(out) def _filter_frame(frame): """ Checks if this frame is something meaningful and takes the appropriate action Returns the name if valid name, returns False if invalid name, returns None if frame is None """ try: name = frame.f_globals['__name__'] except AttributeError: #frame is None return None if name in ('importlib._bootstrap','importlib._bootstrap_external', __name__): return False return name def get_func_signature(func): code_list = inspect.getsourcelines(func)[0] code_str = ' '.join([c.strip() for c in code_list]) return code_str[code_str.find('def') + 4:code_str.find(':')] class log_with(object): '''Logging decorator that allows you to log with a specific logger. By default, logs entering and exiting function as well as arguments passed at the info level. Usage: import logging import dero logging.basicConfig() log = logging.getLogger('__name__') #can use custom name but using module name comes with benefits log.setLevel(logging.DEBUG) @dero.logging.log_with(log) def test_func(a, b, c=5): return a + b ''' # Customize these messages ENTRY_MESSAGE = 'Entering {}' args_message = 'Passed Args: \n{}, Kwargs: {}' result_message = '{} Result: \n{}' time_message = '{} took {}' EXIT_MESSAGE = 'Exiting {}' def __init__(self, logger=None, timer=True): self.logger = logger self.timer = timer def __call__(self, func): '''Returns a wrapper that wraps func. The wrapper will log the entry and exit points of the function with logging.INFO level. ''' # set logger if it was not set earlier if not self.logger: logging.basicConfig() self.logger = logging.getLogger(func.__module__) @functools.wraps(func) def wrapper(*args, **kwds): if self.timer: start_time = timeit.default_timer() self.logger.info(self.ENTRY_MESSAGE.format(get_func_signature(func))) # logging level .info(). Set to .debug() if you want to self.logger.debug(self.args_message.format(args, kwds)) f_result = func(*args, **kwds) self.logger.debug(self.result_message.format(func.__name__, f_result)) time_elapsed_str = time_elapsed(timeit.default_timer() - start_time) self.logger.debug(self.time_message.format(func.__name__, time_elapsed_str)) self.logger.info(self.EXIT_MESSAGE.format(func.__name__)) # logging level .info(). Set to .debug() if you want to return f_result return wrapper class Logger: def __init__(self, log_dir): self.log_dir = log_dir self.log_list = [] self.create_log_file() def log(self, message, error=False, neverprint=False): if error: message = 'ERROR: ' + message if message != '\n': time = datetime.datetime.now().replace(microsecond=0) message = str(time) + ': ' + message if self.debug and not neverprint: sys.stdout.write(message + '\n') sys.stdout.flush() #forces output now try: with open(self.log_path, 'a') as f: [f.write(item) for item in self.log_list] #log anything saved in memory that couldn't be written before f.write(message) f.write('\n') self.log_list = [] except PermissionError: #if someone happened to write to the file at the same time self.log_list.append(message) #save it to log later self.log_list.append('\n') def create_log_file(self): name = 'log_' + str(datetime.datetime.now().replace(microsecond=0)).replace(':','.') + '.txt' if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.log_path = os.path.join(self.log_dir, name) if not os.path.exists(self.log_path): with open(self.log_path, 'w') as f: f.write('\n')
python
#!/usr/bin/env python """Demonstrates configurable logging output""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import logging def main(): """Main function Set arguments, configure logging, run test""" parser = argparse.ArgumentParser() parser.add_argument( '-l', '--loglevel', metavar='LEVEL', type=str.lower, choices=['critical', 'error', 'warning', 'info', 'debug', 'notset'], default='notset', help="Highest level of log message to display", ) args = parser.parse_args() loglevel = getattr(logging, args.loglevel.upper(), None) if not isinstance(loglevel, int): raise ValueError('Invalid log level: {}'.format(loglevel)) logging.basicConfig( format='%(levelname)s:%(message)s', level=loglevel ) test() def test(): """Function to test each log level""" logging.critical('This is a CRITICAL message') logging.error('This is an ERROR message') logging.warning('This is a WARNING message') logging.info('This is an INFO message') logging.debug('This is a DEBUG message') if __name__ == '__main__': main()
python
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-02-09 12:32 from __future__ import unicode_literals from django.db import migrations import mptt import mptt.managers def _add_mptt_manager(cls): manager = mptt.managers.TreeManager() manager.model = cls mptt.register(cls, parent_attr='super_event') manager.contribute_to_class(cls, 'objects') def external_image_url_to_image_url(apps, schema_editor): Event = apps.get_model("events", "Event") Image = apps.get_model("events", "Image") _add_mptt_manager(Event) for event in Event.objects.filter(external_image_url__isnull=False): url = event.external_image_url image_object = Image.objects.create(url=url) event.image = image_object event.external_image_url = None event.save() def image_url_to_external_image_url(apps, schema_editor): Event = apps.get_model("events", "Event") Image = apps.get_model("events", "Image") _add_mptt_manager(Event) for event in Event.objects.filter(image__url__isnull=False).filter(image__image__exact=''): url = event.image.url event.external_image_url = url event.save() class Migration(migrations.Migration): dependencies = [ ('events', '0017_auto_20160208_1729'), ] operations = [ migrations.RunPython(external_image_url_to_image_url, image_url_to_external_image_url), ]
python
import json with open("./package.json", "r") as f: data = json.loads(f.read()) with open("./package.py", "w") as fw: fw.write( "version = '{0}';stable = {1}".format( data["version"], data["stable"]))
python
from flask import Flask, render_template, request, redirect, url_for from index import Index app = Flask(__name__) @app.route("/", methods=["GET", "POST"]) def form(): return render_template("form.html") @app.route("/search_result", methods=["GET", "POST"]) def search_result(): if request.method == "POST": search_key = request.form["key"] idx = Index() # TODO : split search_list search_list = idx.search(search_key) return render_template( "search_result.html", results=search_list, search_len=len(search_list) ) @app.route("/doc/<path>") def doc(path): try: return render_template(f"/doc/{path}") except Exception as e: return str(e) if __name__ == "__main__": app.run()
python
# -*- coding: utf-8 -*- from django.urls import * from .views import SuccessResponseView urlpatterns = [ path('preview/<int:basket_id>/', SuccessResponseView.as_view(preview=True), name='pagseguro-success-response'), path('checkout/payment-details/', SuccessResponseView.as_view(preview=True), name='pagseguro-success-response'), path('checkout/preview/', SuccessResponseView.as_view(preview=True), name='pagseguro-success-response'), path('retorno/pagseguro/', include('pagseguro.urls')), ]
python
from .base_setup import Base from rest_framework import status from django.urls import reverse from django.core import mail from authors.apps.authentication.models import User from authors.apps.profiles.models import Profile from authors.apps.core.cron import EmailNotificationCron class ArticleDeleteUpdateTests(Base): """Test suite for favouriting articles.""" def setUp(self): """Setup data for the tests.""" super().setUp() self.res = self.client.post( self.article_url, self.article_data, format="json", **self.headers_one) def tearDown(self): """Teardown for the tests.""" super().tearDown() def test_successfull_notification(self): """ Tests that a user successfully receiving notifications. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) self.assertEqual(notification.status_code, status.HTTP_200_OK) def test_successfully_get_a_notification(self): """ Tests that a user can get a notification. """ EmailNotificationCron().do() self.assertEqual(len(mail.outbox), 3) notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] response = self.client.get( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_successfully_delete_notification(self): """ Tests that a user can delete a notification. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] delete = self.client.delete( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_two) self.assertEqual(delete.status_code, status.HTTP_200_OK) def test_unsuccessfully_delete_notification(self): """ Tests that a user cannot delete a notification they do not own. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] delete = self.client.delete( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_one) self.assertEqual(delete.status_code, status.HTTP_403_FORBIDDEN) def test_unsuccessfully_mark_read_notification(self): """ Tests that a user cannot mark read a notification they do not own. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] delete = self.client.put( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_one) self.assertEqual(delete.status_code, status.HTTP_403_FORBIDDEN) def test_successfully_mark_read_notification(self): """ Tests that a user successfully marks as read. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] delete = self.client.put( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_two) self.assertEqual(delete.status_code, status.HTTP_200_OK) def test_unsuccessfully_mark_read_notification(self): """ Tests that a user cannot mark as read a notification they do not own. """ notification = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) pk = [*notification.data][0] delete = self.client.put( reverse('notifications:notification', kwargs={'pk': pk}), **self.headers_one) self.assertEqual(delete.status_code, status.HTTP_403_FORBIDDEN) def test_successfully_mark_all_notification_as_read(self): """ Tests that a user successfully marks all as read. """ notification = self.client.put( reverse('notifications:my_notifications'), **self.headers_two) self.assertEqual(notification.status_code, status.HTTP_200_OK) response = self.client.get( reverse('notifications:my_notifications'), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_unsuccessfully_mark_non_existing_notification(self): """ Tests that a user unssuccessful marks as read non existing notification. """ response = self.client.put( reverse('notifications:notification', kwargs={'pk': 500}), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_unsuccessfully_delete_non_existing_notification(self): """ Tests that a user unsuccessfully deletes non-existing notification. """ response = self.client.delete( reverse('notifications:notification', kwargs={'pk': 500}), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_unsuccessfully_get_non_existing_notification(self): """ Tests that a user unsuccessfully gets non-existing notification. """ response = self.client.get( reverse('notifications:notification', kwargs={'pk': 500}), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_successfully_activate_app_notification(self): """ Tests that a user successfully activating notifications. """ response = self.client.post( reverse('notifications:switch_app_notifications'), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_successfully_deactivate_app_notification(self): """ Tests that a user successfully deactivating notifications. """ self.client.post( reverse('notifications:switch_app_notifications'), **self.headers_one) response = self.client.post( reverse('notifications:switch_app_notifications'), **self.headers_one) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_successfully_activate_email_notification(self): """ Tests that a user successfully activating notifications. """ response = self.client.post( reverse('notifications:switch_email_notifications'), **self.headers_two) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_successfully_deactivate_email_notification(self): """ Tests that a user successfully deactivating notifications. """ self.client.post( reverse('notifications:switch_email_notifications'), **self.headers_one) response = self.client.post( reverse('notifications:switch_email_notifications'), **self.headers_one) self.assertEqual(response.status_code, status.HTTP_200_OK)
python
################################################################################# # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). # # You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # ################################################################################# import pkg_resources from unittest import TestCase from shapely.geometry import Point from deepracer_track_geometry.track_geometry import TrackGeometry from deepracer_track_geometry.constants import TrackDirection, TrackRegion, NdistMode, FiniteDifference class TrackGeometryTest(TestCase): def setUp(self) -> None: self.track_name = "monaco" self.track = TrackGeometry(self.track_name) def test_get_track_name(self) -> None: self.assertEqual(self.track.track_name, self.track_name) def test_get_track_length(self) -> None: self.assertEqual(self.track.length, self.track.track_center_line.length) def test_set_finish_line_wrap(self) -> None: self.track.finish_line = -0.3 self.assertEqual(self.track.finish_line, 0.7) def test_set_finish_line_positive(self) -> None: # positive self.track.finish_line = 0.3 self.assertEqual(self.track.finish_line, 0.3) def test_set_direction_invalid(self) -> None: with self.assertRaises(ValueError): # Invalid finish line value self.track.direction = "reverse" def test_set_direction_cw(self) -> None: self.track.direction = TrackDirection.CLOCKWISE.value self.assertEqual(self.track.direction, TrackDirection.CLOCKWISE) def test_set_direction_ccw(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value self.assertEqual(self.track.direction, TrackDirection.COUNTER_CLOCKWISE) def test_is_on_track_invalid_dimension(self) -> None: with self.assertRaises(ValueError) as ex: # Invalid coordinate value self.assertFalse(self.track.is_on_track(coordinates=[-0.60])) self.assertEqual("need at least 2 dimension coordinates.", str(ex.exception)) with self.assertRaises(ValueError) as ex: # Invalid coordinate value self.assertFalse(self.track.is_on_track(coordinates=[-0.60, 0.93, 0.1, 0.1])) self.assertEqual("max dimension of coordinates is 3.", str(ex.exception)) with self.assertRaises(ValueError) as ex: # Invalid coordinate value self.assertFalse(self.track.get_region_on_track(coordinates=[-0.60])) self.assertEqual("need at least 2 dimension coordinates.", str(ex.exception)) with self.assertRaises(ValueError) as ex: # Invalid coordinate value self.assertFalse(self.track.get_region_on_track(coordinates=[-0.60, 0.93, 0.1, 0.1])) self.assertEqual("max dimension of coordinates is 3.", str(ex.exception)) def test_is_on_track_inner_offtrack(self) -> None: coords = [-0.60, 0.93] # Inner Offtrack self.assertFalse(self.track.is_on_track(coordinates=coords)) self.assertEqual(self.track.get_region_on_track(coordinates=coords), TrackRegion.INNER_OFFTRACK) coords = [-0.60, 0.93, 3] # Inner Offtrack, z coord doesn't matter self.assertFalse(self.track.is_on_track(coordinates=coords)) self.assertEqual(self.track.get_region_on_track(coordinates=coords), TrackRegion.INNER_OFFTRACK) def test_is_on_track_inner_border(self) -> None: coords = [-6.38, 0.93] # Inner border self.assertFalse(self.track.is_on_track(coordinates=coords)) self.assertEqual(self.track.get_region_on_track(coordinates=coords), TrackRegion.INNER_OFFTRACK) coords = [-6.38, 0.93, 1] # Inner border, z coord doesn't matter self.assertFalse(self.track.is_on_track(coordinates=coords)) self.assertEqual(self.track.get_region_on_track(coordinates=coords), TrackRegion.INNER_OFFTRACK) def test_is_on_track_center_line(self) -> None: coords = [-7.014, 1.28] # Center Line self.assertTrue(self.track.is_on_track(coordinates=coords)) self.assertEqual(self.track.get_region_on_track(coordinates=coords), TrackRegion.INNER_LANE) coords = [-7.014, 1.28, 1] # Center Line, z coord doesn't matter self.assertTrue(self.track.is_on_track(coordinates=coords)) self.assertEqual(self.track.get_region_on_track(coordinates=coords), TrackRegion.INNER_LANE) def test_is_on_track_outer_lane(self) -> None: coords = [-7.2, 1.28] # Outer Lane self.assertTrue(self.track.is_on_track(coordinates=coords)) self.assertEqual(self.track.get_region_on_track(coordinates=coords), TrackRegion.OUTER_LANE) coords = [-7.2, 1.28, 3] # Outer lane, z coord doesn't matter self.assertTrue(self.track.is_on_track(coordinates=coords)) self.assertEqual(self.track.get_region_on_track(coordinates=coords), TrackRegion.OUTER_LANE) def test_is_on_track_outer_offtrack_shapely_point(self) -> None: coords = [-8.2, 1.28] # Outer Offtrack self.assertFalse(self.track.is_on_track(coordinates=Point(coords))) self.assertEqual(self.track.get_region_on_track(coordinates=Point(coords)), TrackRegion.OUTER_OFFTRACK) coords = [-8.2, 1.28, 3] # Outer Offtrack, z coord doesn't matter self.assertFalse(self.track.is_on_track(coordinates=Point(coords))) self.assertEqual(self.track.get_region_on_track(coordinates=Point(coords)), TrackRegion.OUTER_OFFTRACK) def test_is_on_track_inner_offtrack_shapely_point(self) -> None: coords = [-0.60, 0.93] # Inner Offtrack self.assertFalse(self.track.is_on_track(coordinates=Point(coords))) self.assertEqual(self.track.get_region_on_track(coordinates=Point(coords)), TrackRegion.INNER_OFFTRACK) coords = [-0.60, 0.93, 3] # Inner Offtrack, z coord doesn't matter self.assertFalse(self.track.is_on_track(coordinates=Point(coords))) self.assertEqual(self.track.get_region_on_track(coordinates=Point(coords)), TrackRegion.INNER_OFFTRACK) def test_is_on_track_inner_border_shapely_point(self) -> None: coords = [-6.38, 0.93] # Inner border self.assertFalse(self.track.is_on_track(coordinates=Point(coords))) self.assertEqual(self.track.get_region_on_track(coordinates=Point(coords)), TrackRegion.INNER_OFFTRACK) coords = [-6.38, 0.93, 1] # Inner border, z coord doesn't matter self.assertFalse(self.track.is_on_track(coordinates=Point(coords))) self.assertEqual(self.track.get_region_on_track(coordinates=Point(coords)), TrackRegion.INNER_OFFTRACK) def test_is_on_track_center_line_shapely_point(self) -> None: coords = [-7.014, 1.28] # Center Line self.assertTrue(self.track.is_on_track(coordinates=Point(coords))) self.assertEqual(self.track.get_region_on_track(coordinates=Point(coords)), TrackRegion.INNER_LANE) coords = [-7.014, 1.28, 1] # Center Line, z coord doesn't matter self.assertTrue(self.track.is_on_track(coordinates=Point(coords))) self.assertEqual(self.track.get_region_on_track(coordinates=Point(coords)), TrackRegion.INNER_LANE) def test_is_on_track_outer_lane_shapely_point(self) -> None: coords = [-7.2, 1.28] # Outer Lane self.assertTrue(self.track.is_on_track(coordinates=Point(coords))) self.assertEqual(self.track.get_region_on_track(coordinates=Point(coords)), TrackRegion.OUTER_LANE) coords = [-7.2, 1.28, 3] # Outer lane, z coord doesn't matter self.assertTrue(self.track.is_on_track(coordinates=Point(coords))) self.assertEqual(self.track.get_region_on_track(coordinates=Point(coords)), TrackRegion.OUTER_LANE) def test_is_on_track_outer_offtrack(self) -> None: coords = [-8.2, 1.28] # Outer Offtrack self.assertFalse(self.track.is_on_track(coordinates=coords)) self.assertEqual(self.track.get_region_on_track(coordinates=coords), TrackRegion.OUTER_OFFTRACK) coords = [-8.2, 1.28, 3] # Outer Offtrack, z coord doesn't matter self.assertFalse(self.track.is_on_track(coordinates=coords)) self.assertEqual(self.track.get_region_on_track(coordinates=coords), TrackRegion.OUTER_OFFTRACK) def _test_ndist(self, ndist_mode) -> None: test_ndist = 0.1 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(coords, ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, test_ndist) test_ndist = 0.3 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(coords, ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, test_ndist) test_ndist = 0.5 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(coords, ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, test_ndist) test_ndist = 0.8 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(coords, ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, test_ndist) test_ndist = 0.0 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(coords, ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, test_ndist) test_ndist = 1.0 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(coords, ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, 0.0) def _test_ndist_shapely_point(self, ndist_mode) -> None: test_ndist = 0.1 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(Point(coords), ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, test_ndist) test_ndist = 0.3 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(Point(coords), ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, test_ndist) test_ndist = 0.5 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(Point(coords), ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, test_ndist) test_ndist = 0.8 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(Point(coords), ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, test_ndist) test_ndist = 0.0 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(Point(coords), ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, test_ndist) test_ndist = 1.0 coords = self.track.get_point_from_ndist(test_ndist, ndist_mode=ndist_mode) ndist = self.track.get_ndist_from_point(Point(coords), ndist_mode=ndist_mode) self.assertAlmostEqual(ndist, 0.0) def test_ndist_finish_line_0_0_to_finish_line_ccw(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value self.track.finish_line = 0.0 # Change finish line self.assertEqual(self.track.finish_line, 0.0) self._test_ndist(ndist_mode=NdistMode.TO_FINISH_LINE) self._test_ndist_shapely_point(ndist_mode=NdistMode.TO_FINISH_LINE) def test_ndist_finish_line_0_0_from_finish_line_ccw(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value self.track.finish_line = 0.0 # Change finish line self.assertEqual(self.track.finish_line, 0.0) self._test_ndist(ndist_mode=NdistMode.FROM_FINISH_LINE) self._test_ndist_shapely_point(ndist_mode=NdistMode.TO_FINISH_LINE) def test_ndist_finish_line_0_3_to_finish_line_ccw(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value self.track.finish_line = 0.3 # Change finish line self.assertEqual(self.track.finish_line, 0.3) self._test_ndist(ndist_mode=NdistMode.TO_FINISH_LINE) self._test_ndist_shapely_point(ndist_mode=NdistMode.TO_FINISH_LINE) def test_ndist_finish_line_0_3_from_finish_line_ccw(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value self.track.finish_line = 0.3 # Change finish line self.assertEqual(self.track.finish_line, 0.3) self._test_ndist(ndist_mode=NdistMode.FROM_FINISH_LINE) self._test_ndist_shapely_point(ndist_mode=NdistMode.TO_FINISH_LINE) def test_ndist_finish_line_neg_0_3_to_finish_line_ccw(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value self.track.finish_line = -0.3 # Change finish line self.assertEqual(self.track.finish_line, 0.7) self._test_ndist(ndist_mode=NdistMode.TO_FINISH_LINE) self._test_ndist_shapely_point(ndist_mode=NdistMode.TO_FINISH_LINE) def test_ndist_finish_line_neg_0_3_from_finish_line_ccw(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value self.track.finish_line = -0.3 # Change finish line self.assertEqual(self.track.finish_line, 0.7) self._test_ndist(ndist_mode=NdistMode.FROM_FINISH_LINE) self._test_ndist_shapely_point(ndist_mode=NdistMode.TO_FINISH_LINE) def test_get_closest_waypoint_indices_0_1_cw(self) -> None: self.track.direction = TrackDirection.CLOCKWISE.value test_ndist = 0.1 prev_idx, next_idx = self.track.get_closest_waypoint_indices(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE) self.assertEqual(prev_idx, 214) self.assertEqual(next_idx, 215) test_ndist = 0.9 prev_idx, next_idx = self.track.get_closest_waypoint_indices(test_ndist, ndist_mode=NdistMode.FROM_FINISH_LINE) self.assertEqual(prev_idx, 214) self.assertEqual(next_idx, 215) def test_get_closest_waypoint_indices_0_1_ccw(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value test_ndist = 0.1 prev_idx, next_idx = self.track.get_closest_waypoint_indices(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE) self.assertEqual(prev_idx, 211) self.assertEqual(next_idx, 212) test_ndist = 0.9 prev_idx, next_idx = self.track.get_closest_waypoint_indices(test_ndist, ndist_mode=NdistMode.FROM_FINISH_LINE) self.assertEqual(prev_idx, 211) self.assertEqual(next_idx, 212) def test_get_closest_waypoint_indices_0_5_cw(self) -> None: self.track.direction = TrackDirection.CLOCKWISE.value test_ndist = 0.5 prev_idx, next_idx = self.track.get_closest_waypoint_indices(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE) self.assertEqual(prev_idx, 116) self.assertEqual(next_idx, 117) prev_idx, next_idx = self.track.get_closest_waypoint_indices(test_ndist, ndist_mode=NdistMode.FROM_FINISH_LINE) self.assertEqual(prev_idx, 116) self.assertEqual(next_idx, 117) def test_get_closest_waypoint_indices_0_5_ccw(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value test_ndist = 0.5 prev_idx, next_idx = self.track.get_closest_waypoint_indices(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE) self.assertEqual(prev_idx, 117) self.assertEqual(next_idx, 118) prev_idx, next_idx = self.track.get_closest_waypoint_indices(test_ndist, ndist_mode=NdistMode.FROM_FINISH_LINE) self.assertEqual(prev_idx, 117) self.assertEqual(next_idx, 118) def test_get_closest_waypoints_0_1_cw(self) -> None: self.track.direction = TrackDirection.CLOCKWISE.value test_ndist = 0.1 prev_coords, next_coords = self.track.get_closest_waypoints(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE) self.assertAlmostEqual(prev_coords[0], -8.01600242) self.assertAlmostEqual(prev_coords[1], -5.12338257) self.assertAlmostEqual(next_coords[0], -8.15831709) self.assertAlmostEqual(next_coords[1], -4.93767357) test_ndist = 0.9 prev_coords, next_coords = self.track.get_closest_waypoints(test_ndist, ndist_mode=NdistMode.FROM_FINISH_LINE) self.assertAlmostEqual(prev_coords[0], -8.01600242) self.assertAlmostEqual(prev_coords[1], -5.12338257) self.assertAlmostEqual(next_coords[0], -8.15831709) self.assertAlmostEqual(next_coords[1], -4.93767357) def test_get_closest_waypoints_0_1_ccw(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value test_ndist = 0.1 prev_coords, next_coords = self.track.get_closest_waypoints(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE) self.assertAlmostEqual(prev_coords[0], -2.43641901) self.assertAlmostEqual(prev_coords[1], 2.26828957) self.assertAlmostEqual(next_coords[0], -2.75742698) self.assertAlmostEqual(next_coords[1], 2.34699249) test_ndist = 0.9 prev_coords, next_coords = self.track.get_closest_waypoints(test_ndist, ndist_mode=NdistMode.FROM_FINISH_LINE) self.assertAlmostEqual(prev_coords[0], -2.43641901) self.assertAlmostEqual(prev_coords[1], 2.26828957) self.assertAlmostEqual(next_coords[0], -2.75742698) self.assertAlmostEqual(next_coords[1], 2.34699249) def test_get_closest_waypoints_0_5_cw(self) -> None: self.track.direction = TrackDirection.CLOCKWISE.value test_ndist = 0.5 prev_coords, next_coords = self.track.get_closest_waypoints(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE) self.assertAlmostEqual(prev_coords[0], 8.9961977) self.assertAlmostEqual(prev_coords[1], 0.3554957) self.assertAlmostEqual(next_coords[0], 8.7407155) self.assertAlmostEqual(next_coords[1], 0.1251201) prev_coords, next_coords = self.track.get_closest_waypoints(test_ndist, ndist_mode=NdistMode.FROM_FINISH_LINE) self.assertAlmostEqual(prev_coords[0], 8.9961977) self.assertAlmostEqual(prev_coords[1], 0.3554957) self.assertAlmostEqual(next_coords[0], 8.7407155) self.assertAlmostEqual(next_coords[1], 0.1251201) def test_get_closest_waypoints_0_5_ccw(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value test_ndist = 0.5 prev_coords, next_coords = self.track.get_closest_waypoints(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE) self.assertAlmostEqual(prev_coords[0], 8.7407155) self.assertAlmostEqual(prev_coords[1], 0.1251201) self.assertAlmostEqual(next_coords[0], 8.9961977) self.assertAlmostEqual(next_coords[1], 0.3554957) prev_coords, next_coords = self.track.get_closest_waypoints(test_ndist, ndist_mode=NdistMode.FROM_FINISH_LINE) self.assertAlmostEqual(prev_coords[0], 8.7407155) self.assertAlmostEqual(prev_coords[1], 0.1251201) self.assertAlmostEqual(next_coords[0], 8.9961977) self.assertAlmostEqual(next_coords[1], 0.3554957) def test_get_orientation_central_difference(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value test_ndist = 0.1 orientation = self.track.get_orientation(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE, finite_difference=FiniteDifference.CENTRAL_DIFFERENCE) self.assertEqual(len(orientation), 4) self.assertEqual(orientation[0], 0.0) self.assertEqual(orientation[1], 0.0) self.assertAlmostEqual(orientation[2], 0.9927828) self.assertAlmostEqual(orientation[3], 0.1199265) self.track.direction = TrackDirection.CLOCKWISE.value test_ndist = 0.1 orientation = self.track.get_orientation(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE, finite_difference=FiniteDifference.CENTRAL_DIFFERENCE) self.assertEqual(len(orientation), 4) self.assertEqual(orientation[0], 0.0) self.assertEqual(orientation[1], 0.0) self.assertAlmostEqual(orientation[2], 0.8967341) self.assertAlmostEqual(orientation[3], 0.4425698) def test_get_orientation_forward_difference(self) -> None: self.track.direction = TrackDirection.COUNTER_CLOCKWISE.value test_ndist = 0.1 orientation = self.track.get_orientation(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE, finite_difference=FiniteDifference.FORWARD_DIFFERENCE) self.assertEqual(len(orientation), 4) self.assertEqual(orientation[0], 0.0) self.assertEqual(orientation[1], 0.0) self.assertAlmostEqual(orientation[2], 0.9927828) self.assertAlmostEqual(orientation[3], 0.1199265) self.track.direction = TrackDirection.CLOCKWISE.value test_ndist = 0.1 orientation = self.track.get_orientation(test_ndist, ndist_mode=NdistMode.TO_FINISH_LINE, finite_difference=FiniteDifference.FORWARD_DIFFERENCE) self.assertEqual(len(orientation), 4) self.assertEqual(orientation[0], 0.0) self.assertEqual(orientation[1], 0.0) self.assertAlmostEqual(orientation[2], 0.8967341) self.assertAlmostEqual(orientation[3], 0.4425698)
python
######################################## # QUESTION ######################################## # This time no story, no theory. The examples below show you how to write function accum: # Examples: # accum("abcd") -> "A-Bb-Ccc-Dddd" # accum("RqaEzty") -> "R-Qq-Aaa-Eeee-Zzzzz-Tttttt-Yyyyyyy" # accum("cwAt") -> "C-Ww-Aaa-Tttt" ################################### # SOLUTION ################################### def accum(s): x = list(s) i = [] for k in list(range(0,len(x))): i.append(s[k]*(k+1)) j = '' for k in list(range(0,len(i))): j += i[k].title() + "-" n = len(j) j = j[0:(n-1)] return j # your code print(accum("adafdaff"))
python
# # BSD 3-Clause License # # Copyright (c) 2017 xxxx # All rights reserved. # Copyright 2021 Huawei Technologies Co., Ltd # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ============================================================================ #from __future__ import print_function from PIL import Image from os.path import join import os from .vision import VisionDataset from .utils import download_and_extract_archive, check_integrity, list_dir, list_files class Omniglot(VisionDataset): """`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset. Args: root (string): Root directory of dataset where directory ``omniglot-py`` exists. background (bool, optional): If True, creates dataset from the "background" set, otherwise creates from the "evaluation" set. This terminology is defined by the authors. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. download (bool, optional): If true, downloads the dataset zip files from the internet and puts it in root directory. If the zip files are already downloaded, they are not downloaded again. """ folder = 'omniglot-py' download_url_prefix = 'https://github.com/brendenlake/omniglot/raw/master/python' zips_md5 = { 'images_background': '68d2efa1b9178cc56df9314c21c6e718', 'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811' } def __init__(self, root, background=True, transform=None, target_transform=None, download=False): super(Omniglot, self).__init__(join(root, self.folder), transform=transform, target_transform=target_transform) self.background = background if download: self.download() if not self._check_integrity(): raise RuntimeError('Dataset not found or corrupted.' + ' You can use download=True to download it') self.target_folder = join(self.root, self._get_target_folder()) self._alphabets = list_dir(self.target_folder) self._characters = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets], []) self._character_images = [[(image, idx) for image in list_files(join(self.target_folder, character), '.png')] for idx, character in enumerate(self._characters)] self._flat_character_images = sum(self._character_images, []) def __len__(self): return len(self._flat_character_images) def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target) where target is index of the target character class. """ image_name, character_class = self._flat_character_images[index] image_path = join(self.target_folder, self._characters[character_class], image_name) image = Image.open(image_path, mode='r').convert('L') if self.transform: image = self.transform(image) if self.target_transform: character_class = self.target_transform(character_class) return image, character_class def _check_integrity(self): zip_filename = self._get_target_folder() if not check_integrity(join(self.root, zip_filename + '.zip'), self.zips_md5[zip_filename]): return False return True def download(self): if self._check_integrity(): print('Files already downloaded and verified') return filename = self._get_target_folder() zip_filename = filename + '.zip' url = self.download_url_prefix + '/' + zip_filename download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename]) def _get_target_folder(self): return 'images_background' if self.background else 'images_evaluation'
python
import unittest import sklearn.grid_search from spark_sklearn.grid_search import GridSearchCV from spark_sklearn.random_search import RandomizedSearchCV from spark_sklearn.test_utils import fixtureReuseSparkSession # Overwrite the sklearn GridSearch in this suite so that we can run the same tests with the same # parameters. @fixtureReuseSparkSession class AllTests(unittest.TestCase): # After testing, make sure to revert sklearn to normal (see _add_to_module()) @classmethod def tearDownClass(cls): super(AllTests, cls).tearDownClass() # Restore sklearn module to the original state after done testing this fixture. sklearn.grid_search.GridSearchCV = sklearn.grid_search.GridSearchCV_original del sklearn.grid_search.GridSearchCV_original sklearn.grid_search.RandomizedSearchCV = sklearn.grid_search.RandomizedSearchCV_original del sklearn.grid_search.RandomizedSearchCV_original class SPGridSearchWrapper(GridSearchCV): def __init__(self, estimator, param_grid, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): super(SPGridSearchWrapper, self).__init__(AllTests.spark.sparkContext, estimator, param_grid, scoring, fit_params, n_jobs, iid, refit, cv, verbose, pre_dispatch, error_score) class SPRandomizedSearchWrapper(RandomizedSearchCV): def __init__(self, estimator, param_distributions, n_iter=10, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise'): super(SPRandomizedSearchWrapper, self).__init__(AllTests.spark.sparkContext, estimator, param_distributions, n_iter, scoring, fit_params, n_jobs, iid, refit, cv, verbose, pre_dispatch, random_state, error_score) def _create_method(method): def do_test_expected(*_): method() return do_test_expected def _add_to_module(): # NOTE: This doesn't actually run scikit-learn tests against SPGridSearchWrapper # for scikit-learn >= 0.18, since the scikit-learn tests (in sklearn.model_selection.tests) use # sklearn.model_selection.GridSearchCV (not sklearn.grid_search.GridSearchCV) # TODO: Get scikit-learn tests to pass with spark-sklearn GridSearch implementation SKGridSearchCV = sklearn.grid_search.GridSearchCV sklearn.grid_search.GridSearchCV = SPGridSearchWrapper sklearn.grid_search.GridSearchCV_original = SKGridSearchCV SKRandomizedSearchCV = sklearn.grid_search.RandomizedSearchCV sklearn.grid_search.RandomizedSearchCV = SPRandomizedSearchWrapper sklearn.grid_search.RandomizedSearchCV_original = SKRandomizedSearchCV from sklearn.model_selection.tests import test_search all_methods = [(mname, method) for (mname, method) in test_search.__dict__.items() if mname.startswith("test_")] for name, method in all_methods: method_for_test = _create_method(method) method_for_test.__name__ = name setattr(AllTests, method.__name__, method_for_test) _add_to_module()
python
#!flask/bin/python # # Copyright 2019 XEBIALABS # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from flask import Flask from flask import request from flask import make_response from functools import wraps import os, io, json app = Flask(__name__) def getFile( fileName, status="200" ): filePath = "/remedy-stub/responses/%s" % fileName if not os.path.isfile(filePath): raise AuthError({"code": "response_file_not_found", "description": "Unable to load response file"}, 500) f = io.open(filePath, "r", encoding="utf-8") resp = make_response( (f.read(), status) ) resp.headers['Content-Type'] = 'application/json; charset=utf-8' return resp def requires_auth(f): """ Determines if the access token is valid """ @wraps(f) def decorated(*args, **kwargs): token = get_token_auth_header() if token != "DUMMY_TOKEN": raise AuthError({"code": "invalid_header", "description": "Unable to find appropriate key"}, 400) return f(*args, **kwargs) return decorated @app.route('/') def index(): return "Hello, World!" @app.route('/api/arsys/v1/entry/<formName>/<entryId>', methods=['GET']) @requires_auth def getEntry(formName, entryId): return getFile("ticket_000000000000103.json") @app.route('/api/arsys/v1/entry/<formName>', methods=['GET']) @requires_auth def getEntries(formName): return getFile("tickets.json") @app.route('/api/arsys/v1/entry/<formName>', methods=['POST']) @requires_auth def createEntry(formName): fields = request.get_json() app.logger.info("createEntry = %s" % json.dumps(fields)) resp = make_response(("", 201)) resp.headers['Location'] = '/api/arsys/v1/entry/000000000000103' return resp @app.route('/api/arsys/v1/entry/<formName>/<entryId>', methods=['PUT']) @requires_auth def updateEntry(formName, entryId): fields = request.get_json() app.logger.info("updateEntry = %s" % json.dumps(fields)) resp = getFile("ticket_000000000000103.json", 204) resp.headers['Location'] = '/api/arsys/v1/entry/000000000000103' return resp @app.route('/api/jwt/login', methods=['POST']) def login(): username = request.form.get("username") password = request.form.get("password") app.logger.info("URI = /api/jwt/login") app.logger.info("content: user=%s" % username) app.logger.info("content: password=%s" % password) if username == "xlr@xebialabs.com" and password == "admin": return "DUMMY_TOKEN" else: raise AuthError({"code": "credentials_invalid", "description": "Credentials are invalid"}, 403) def get_token_auth_header(): """ Obtains the access token from the Authorization Header """ auth = request.headers.get("Authorization", None) if not auth: raise AuthError({"code": "authorization_header_missing", "description": "Authorization header is expected"}, 401) parts = auth.split() if parts[0] != "AR-JWT": raise AuthError({"code": "invalid_header", "description": "Authorization header must start with AR-JWT"}, 401) token = parts[1] return token if __name__ == '__main__': app.run(debug=True)
python
#Chocolate Distribution #this chocolate function will return the minimum required difference def chocolate(l, no_of_packets, no_of_students) : if no_of_packets < no_of_students : return -1 if no_of_packets == 0 or no_of_students == 0 : return 0 l.sort(); p = len(l) p = p - 1 #last index of list last = 0 first = 0 i = 0 diff = 0 required_diff = l[p] + 1 #assigning maximum no.+1 i = 0 while ( i + no_of_students - 1) < no_of_packets : diff = l[i + no_of_students - 1] - l[i] if diff < required_diff : required_diff = diff last = i + no_of_students - 1 first = i i = i + 1 return required_diff no_of_students = int(input("enter the number of students")) no_of_packets = int(input("Enter the number of packets")) #print("enter the numbet contained by each packet repectively") b = 0 list_of_chocolate = [] for i in range( no_of_packets ) : b = int(input("enter the no.")) list_of_chocolate.append(b) print("the minimum difference in chocolate distributed between two people is : " ,end=" ") print(chocolate(list_of_chocolate,no_of_packets,no_of_students)) #input: #enter the number of students3 #Enter the number of packets7 #enter the no.7 #enter the no.3 #enter the no.2 #enter the no.4 #enter the no.9 #enter the no.12 #enter the no.56 #the minimum difference in chocolate distributed between two people is : 2
python
import models import logging from google.appengine.api import memcache # Memcache functions. def hitlist_cache(key,couple_key,update=False): # Try to get list on Eatery entity keys from memcache hitlist = memcache.get(key) if not hitlist or update: # Query all Eatery entities whose ancestor is the user's Couple hitlist_query = models.Eatery.all(keys_only=True).ancestor(couple_key) hitlist = list(hitlist_query) memcache.set(key,hitlist) return hitlist def cache_entity(key,query_key,parent_key,entity_query_function,keys_only=False,update=False): obj = memcache.get(key) if not obj or update: logging.error('User query for ' + key) # entity query function must return the actual object! obj = entity_query_function(query_key,parent_key,keys_only) memcache.set(key,obj) return obj def geocoded_hitlist_cache(key,couple_key,update=False): """Get a list of eatery entities that have been geocoded""" geocoded_hitlist = memcache.get(key) if not geocoded_hitlist or update: geocoded_hitlist = [] hitlist_keys = hitlist_cache("Hitlist|" + str(couple_key.key().id()),couple_key,False) for e_key in hitlist_keys: # Get the eatery entity from memcache and check if it has been geocoded. e = cache_entity('Eatery|' + str(e_key.id()),e_key.id(),couple_key,models.Eatery.by_id) if e.Latitude and e.Longitude: logging.error('BOOM:' + str(e.RestaurantName)) geocoded_hitlist.append(e) memcache.set(key,geocoded_hitlist) return geocoded_hitlist
python
import torch.nn as nn class METValueMLPConverter(nn.Module): def __init__(self, global_average_pooling=True): super().__init__() self.met_regressor = nn.Sequential( nn.Linear(1280, 100), nn.ReLU(), nn.Linear(100, 1), nn.ReLU() ) self.global_average_pooling = global_average_pooling def forward(self, feature): if self.global_average_pooling: feature = feature.mean(dim=-1).mean(dim=-1) return self.met_regressor(feature)
python
from audioop import avg import matplotlib.pyplot as plt import matplotlib import numpy as np import sys import re import csv from itertools import groupby import glob from statistics import mean """ This script plots vertical frequency bars for one bandit experiment. Give -c as to load the experiment data from .csv files """ NUM_BARS = 2 BOUNDS = (0,35) DIFFERENCE = False PROPORTIONAL = False #ideally when generalized, I want two bars to be placed in the 2/6 and 4/6 slots of a plot arms = [(5, 1.0), (4, 1.0), (3, 1.0), (2, 1.0), (1, 1.0)] #[(1, 1.0), (2, 1.0), (3, 1.0)] def arms_rewards_fromCSV(filepath): configs = [] rewards = [] with open(filepath, newline='') as csvfile: utildimser_reader = csv.reader(csvfile) next(utildimser_reader) for row in utildimser_reader: try: #print(row) configs.append((int(float(row[3])), round(float(row[2]), 2))) rewards.append(float(row[1])) except Exception as e: print(e) print("Exception in file " + filepath) print("Row is " + str(row)) return configs, rewards def truncate(utility): bounds = (175,230) lower_bound, upper_bound = bounds old_range = upper_bound - lower_bound if(utility > upper_bound): upper_bound = utility elif(utility < lower_bound): lower_bound = utility new_range = upper_bound - lower_bound result = float((utility - lower_bound)/new_range) return result files = None arm_choices = [] gaps = [] avg_utils = [] folder = sys.argv[1] if(folder[-1] != "/"): folder+= "/" files = glob.glob(folder + "*.csv") for j, file in enumerate(files): arm, rew = arms_rewards_fromCSV(file) bandit_rewards = [] current_arm = None for i, a in enumerate(arm): if(a[1] < 1): #print("skipped a cleaning window") continue else: bandit_rewards.append(rew[i]) current_arm = a print("avg normalized is ") avg_util = mean([truncate(rew) for rew in bandit_rewards]) print(avg_util) avg_utils.append((avg_util,current_arm)) best_arm = max(avg_utils, key= lambda k: k[0]) for avg_util in avg_utils: if avg_util[0] != best_arm[0]: gaps.append((best_arm[0] - avg_util[0], avg_util[1])) print("-----------------") print("best arm " + str(best_arm)) for i, gap in enumerate(gaps): print("gap " + str(i)) print(str(gap)) print("---") print("-----------------")
python
from abc import ABC, abstractmethod # Абстрактный класс для дополнения данных class Autocompleter(ABC): def __init__(self): super().__init__() # Получение автодополнений, где # con - соединение # tokens (list) - список лексем # content (str) - содержимое файла # line (int) - строка # position (int) - позиция в строке # chatId (str) - ID чата # branchId (str) - ID ветки @abstractmethod def getAutocompletions(self, con, tokens, content, line, position, chatId, branchId): pass
python
import pandas as exporter import glob def convert(src, dest): read_file = exporter.read_excel(src) read_file.to_csv(dest, index = None, header=True) # convert all files in directory # @param srcDir (list) - source dir path # @param srcExt (string) - source file extension # @param destDir (string) - destination path def convertAll(srcDir, srcExt, destDir): for item in glob.glob(srcDir+"/*."+str(srcExt)): list = item.split("/") filename = list[len(list)-1] exportToCsv(item, destDir+"/"+filename.replace("."+srcExt,".csv"))
python
from app.data.database import DB from app.data.skill_components import SkillComponent from app.data.components import Type from app.engine import equations class StatChange(SkillComponent): nid = 'stat_change' desc = "Gives stat bonuses" tag = 'combat' expose = (Type.Dict, Type.Stat) value = [] def stat_change(self, unit): return {stat[0]: stat[1] for stat in self.value} def tile_def(self): total_value = 0 for stat_nid, stat_value in self.value: if stat_nid == 'DEF': total_value += stat_value return total_value class StatMultiplier(SkillComponent): nid = 'stat_multiplier' desc = "Gives stat bonuses" tag = 'combat' expose = (Type.FloatDict, Type.Stat) value = [] def stat_change(self, unit): return {stat[0]: int((stat[1]-1)*unit.stats[stat[0]]) for stat in self.value} class GrowthChange(SkillComponent): nid = 'growth_change' desc = "Gives growth rate % bonuses" tag = 'combat' expose = (Type.Dict, Type.Stat) value = [] def growth_change(self, unit): return {stat[0]: stat[1] for stat in self.value} class EquationGrowthChange(SkillComponent): nid = 'equation_growth_change' desc = "Gives growth rate % bonuses equal to chosen equation" tag = 'combat' expose = Type.Equation def growth_change(self, unit): value = equations.parser.get(self.value, unit) return {stat_nid: value for stat_nid in DB.stats.keys()} class Damage(SkillComponent): nid = 'damage' desc = "Gives +X damage" tag = 'combat' expose = Type.Int value = 3 def modify_damage(self, unit, item): return self.value class EvalDamage(SkillComponent): nid = 'eval_damage' desc = "Gives +X damage solved using evaluate" tag = 'combat' expose = Type.String def modify_damage(self, unit, item): from app.engine import evaluate try: return int(evaluate.evaluate(self.value, unit, item=item)) except: print("Couldn't evaluate %s conditional" % self.value) return 0 class Resist(SkillComponent): nid = 'resist' desc = "Gives +X damage resist" tag = 'combat' expose = Type.Int value = 2 def modify_resist(self, unit, item_to_avoid): return self.value class Hit(SkillComponent): nid = 'hit' desc = "Gives +X accuracy" tag = 'combat' expose = Type.Int value = 15 def modify_accuracy(self, unit, item): return self.value class Avoid(SkillComponent): nid = 'avoid' desc = "Gives +X avoid" tag = 'combat' expose = Type.Int value = 20 def modify_avoid(self, unit, item_to_avoid): return self.value def tile_avoid(self): return self.value class Crit(SkillComponent): nid = 'crit' desc = "Gives +X crit" tag = 'combat' expose = Type.Int value = 30 def modify_crit_accuracy(self, unit, item): return self.value class CritAvoid(SkillComponent): nid = 'crit_avoid' desc = "Gives +X crit avoid" tag = 'combat' expose = Type.Int value = 10 def modify_crit_avoid(self, unit, item_to_avoid): return self.value class AttackSpeed(SkillComponent): nid = 'attack_speed' desc = "Gives +X attack speed" tag = 'combat' expose = Type.Int value = 4 def modify_attack_speed(self, unit, item): return self.value class DefenseSpeed(SkillComponent): nid = 'defense_speed' desc = "Gives +X defense speed" tag = 'combat' expose = Type.Int value = 4 def modify_defense_speed(self, unit, item_to_avoid): return self.value class DamageMultiplier(SkillComponent): nid = 'damage_multiplier' desc = "Multiplies damage given by a fraction" tag = 'combat' expose = Type.Float value = 0.5 def damage_multiplier(self, unit, item, target, mode): return self.value class ResistMultiplier(SkillComponent): nid = 'resist_multiplier' desc = "Multiplies damage taken by a fraction" tag = 'combat' expose = Type.Float value = 0.5 def resist_multiplier(self, unit, item, target, mode): return self.value
python
__all__ = ('Server', ) from ..traps import Future, skip_ready_cycle class Server: """ Server returned by ``EventThread.create_server``. Attributes ---------- active_count : `int` The amount of active connections bound to the server. backlog : `int` The maximum number of queued connections passed to `listen()` (defaults to 100). close_waiters : `None`, `list` of ``Future`` Futures, which are waiting for the server to close. If the server is already closed, set as `None`. loop : ``EventThread`` The event loop to what the server is bound to. protocol_factory : `callable` Factory function for creating a protocols. serving : `bool` Whether the server is serving. sockets : `None`, `list` of `socket.socket` The sockets served by the server. If the server is closed, then i set as `None`. ssl_context : `None`, `ssl.SSLContext` If ssl is enabled for the connections, then set as `ssl.SSLContext`. """ __slots__ = ( 'active_count', 'backlog', 'close_waiters', 'loop', 'protocol_factory', 'serving', 'sockets', 'ssl_context' ) def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog): """ Creates a new server with the given parameters. Parameters ---------- loop : ``EventThread`` The event loop to what the server will be bound to. sockets : `list` of `socket.socket` The sockets to serve by the server. protocol_factory : `callable` Factory function for creating a protocols. ssl_context : `None`, `ssl.SSLContext` To enable ssl for the connections, give it as `ssl.SSLContext`. backlog : `int` The maximum number of queued connections passed to `listen()` (defaults to 100). """ self.loop = loop self.sockets = sockets self.active_count = 0 self.close_waiters = [] self.protocol_factory = protocol_factory self.backlog = backlog self.ssl_context = ssl_context self.serving = False def __repr__(self): """Returns the server's representation.""" repr_parts = ['<', self.__class__.__name__] if self.serving: repr_parts.append(' serving') repr_parts.append(' sockets=') repr_parts.append(repr(self.sockets)) repr_parts.append(', protocol_factory=') repr_parts.append(repr(self.protocol_factory)) repr_parts.append('>') return ''.join(repr_parts) def _attach(self): """ Adds `1` to the server active counter. """ self.active_count += 1 def _detach(self): """ Removes `1` from the server's active counter. If there no more active sockets of the server, then closes it. """ active_count = self.active_count - 1 self.active_count = active_count if active_count: return if (self.sockets is None): self._wake_up_close_waiters() def _wake_up_close_waiters(self): """ Wakes up the server's close waiters. """ close_waiters = self.close_waiters if close_waiters is None: return self.close_waiters = None for close_waiter in close_waiters: close_waiter.set_result(None) def close(self): """ Closes the server by stopping serving it's sockets and waking up it's close waiters. """ sockets = self.sockets if sockets is None: return self.sockets = None loop = self.loop for socket in sockets: loop._stop_serving(socket) self.serving = False if self.active_count == 0: self._wake_up_close_waiters() async def start(self): """ Starts the server by starting serving it's sockets. This method is a coroutine. """ if self.serving: return self.serving = True protocol_factory = self.protocol_factory ssl_context = self.ssl_context backlog = self.backlog loop = self.loop for socket in self.sockets: socket.listen(backlog) loop._start_serving(protocol_factory, socket, ssl_context, self, backlog) # Skip one event loop cycle, so all the callbacks added up ^ will run before returning. await skip_ready_cycle() async def wait_closed(self): """ Blocks the task, till the sever is closes. This method is a coroutine. """ if self.sockets is None: return close_waiters = self.close_waiters if close_waiters is None: return close_waiter = Future(self.loop) close_waiters.append(close_waiter) await close_waiter
python
import os, sys inFilePath = sys.argv[1] file, ext = os.path.splitext(inFilePath) print ext
python
# -*- coding: utf-8 -*- """The operating system file system implementation.""" import os import platform import pysmdev from dfvfs.lib import definitions from dfvfs.lib import errors from dfvfs.lib import py2to3 from dfvfs.path import os_path_spec from dfvfs.vfs import file_system from dfvfs.vfs import os_file_entry class OSFileSystem(file_system.FileSystem): """Class that implements an operating system file system object.""" if platform.system() == u'Windows': PATH_SEPARATOR = u'\\' else: PATH_SEPARATOR = u'/' TYPE_INDICATOR = definitions.TYPE_INDICATOR_OS def _Close(self): """Closes the file system object. Raises: IOError: if the close failed. """ return def _Open(self, path_spec=None, mode='rb'): """Opens the file system object defined by path specification. Args: path_spec: optional path specification (instance of path.PathSpec). The default is None. mode: optional file access mode. The default is 'rb' read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if path_spec.HasParent(): raise errors.PathSpecError( u'Unsupported path specification with parent.') def FileEntryExistsByPathSpec(self, path_spec): """Determines if a file entry for a path specification exists. Args: path_spec: a path specification (instance of path.PathSpec). Returns: Boolean indicating if the file entry exists. """ location = getattr(path_spec, u'location', None) if location is None: return False is_device = False if platform.system() == u'Windows': # Windows does not support running os.path.exists on device files # so we use libsmdev to do the check. try: is_device = pysmdev.check_device(location) except IOError as exception: # Since pysmdev will raise IOError when it has no access to the device # we check if the exception message contains ' access denied ' and # return true. # Note that exception.message no longer works in Python 3. exception_string = str(exception) if not isinstance(exception_string, py2to3.UNICODE_TYPE): exception_string = py2to3.UNICODE_TYPE( exception_string, errors=u'replace') if u' access denied ' in exception_string: is_device = True if not is_device and not os.path.exists(location): return False return True def GetFileEntryByPathSpec(self, path_spec): """Retrieves a file entry for a path specification. Args: path_spec: a path specification (instance of path.PathSpec). Returns: A file entry (instance of vfs.FileEntry) or None. """ if not self.FileEntryExistsByPathSpec(path_spec): return return os_file_entry.OSFileEntry(self._resolver_context, self, path_spec) def GetRootFileEntry(self): """Retrieves the root file entry. Returns: A file entry (instance of vfs.FileEntry) or None. """ if platform.system() == u'Windows': # Return the root with the drive letter of the volume the current # working directory is on. location = os.getcwd() location, _, _ = location.partition(u'\\') location = u'{0:s}\\'.format(location) else: location = u'/' if not os.path.exists(location): return path_spec = os_path_spec.OSPathSpec(location=location) return self.GetFileEntryByPathSpec(path_spec) def JoinPath(self, path_segments): """Joins the path segments into a path. Args: path_segments: a list of path segments. Returns: A string containing the joined path segments prefixed with the path separator. """ # For paths on Windows we need to make sure to handle the first path # segment correctly. first_path_segment = None if path_segments and platform.system() == u'Windows': # Check if the first path segment contains a "special" path definition. first_path_segment = path_segments[0] first_path_segment_length = len(first_path_segment) first_path_segment_prefix = None # In case the path start with: \\.\C:\ if (first_path_segment_length >= 7 and first_path_segment.startswith(u'\\\\.\\') and first_path_segment[5:7] == u':\\'): first_path_segment_prefix = first_path_segment[4:6] first_path_segment = first_path_segment[7:] # In case the path start with: \\.\ or \\?\ elif (first_path_segment_length >= 4 and first_path_segment[:4] in [u'\\\\.\\', u'\\\\?\\']): first_path_segment_prefix = first_path_segment[:4] first_path_segment = first_path_segment[4:] # In case the path start with: C: elif first_path_segment_length >= 2 and first_path_segment[1] == u':': first_path_segment_prefix = first_path_segment[:2] first_path_segment = first_path_segment[2:] # In case the path start with: \\server\share (UNC). elif first_path_segment.startswith(u'\\\\'): prefix, _, remainder = first_path_segment[2:].partition( self.PATH_SEPARATOR) first_path_segment_prefix = u'\\\\{0:s}'.format(prefix) first_path_segment = u'\\{0:s}'.format(remainder) if first_path_segment_prefix: first_path_segment, _, remainder = first_path_segment.partition( self.PATH_SEPARATOR) if not remainder: _ = path_segments.pop(0) else: path_segments[0] = remainder first_path_segment = u''.join([ first_path_segment_prefix, first_path_segment]) else: first_path_segment = None # We are not using os.path.join() here since it will not remove all # variations of successive path separators. # Split all the path segments based on the path (segment) separator. path_segments = [ segment.split(self.PATH_SEPARATOR) for segment in path_segments] # Flatten the sublists into one list. path_segments = [ element for sublist in path_segments for element in sublist] # Remove empty path segments. path_segments = filter(None, path_segments) if first_path_segment is None: path = u'{0:s}{1:s}'.format( self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments)) else: path = first_path_segment if path_segments: path = u'{0:s}{1:s}{2:s}'.format( path, self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments)) return path
python
#!/usr/bin/env python3 ''' usage: avr-objcump -zS firmware.elf | python avr-cycles.py usage: avr-objcump -zS firmware.elf | python avr-cycles.py --mmcu=<mmcu> @author: raoul rubien 07/2016 ''' import sys import csv import json scriptPath = sys.path[0] config = json.load(open(scriptPath + "/avr-cycles.conf")) tableFolder = sys.path[0] + "/" + config["instructionTablesFolder"] + "/" table = config["instructionTable"] # overwrite default value if specified per cli: --mmcu=newMcu if len(sys.argv) == 2: mmcu = sys.argv[1] print("1 mmcu: %s" % mmcu) if "-mmcu=" in mmcu: mmcu = mmcu.replace("-mmcu=", "") print("mmcu: %s" % mmcu) table = config[mmcu] # read lookup table reader = csv.reader(open(tableFolder + table, "r")) dictionary = {} for k,v in reader: if k in dictionary: dictionary[k] = dictionary[k] + "|" + v else: dictionary[k] = v # translate stdin for line in sys.stdin: for k in dictionary.keys(): line = line.replace("\t"+str.lower(k)+"\t", "\t[[%s -> %s]]\t" %(k, dictionary[k])) sys.stdout.write(line)
python
import sys sys.path.append('..') import os, time import cudf, cupy, time, rmm import dask as dask, dask_cudf from dask.distributed import Client, wait, progress from dask_cuda import LocalCUDACluster import subprocess import core.config as conf workers = ', '.join([str(i) for i in range(conf.n_workers)]) os.environ["CUDA_VISIBLE_DEVICES"] = workers cluster = LocalCUDACluster() client = Client(cluster)
python
__author__ = 'andre' import sys def main(): n = int(raw_input()) sys.stdout.write("\t") for i in range(27): sys.stdout.write(str(i+1) + "\t") for i in range(27): sys.stdout.write("\n" + str(i+1)+"\t") for j in range(27): if (i+1+(j+1)**2)%n==0: sys.stdout.write("X.......") else: sys.stdout.write("........") print "" if __name__ == "__main__": main()
python
import gym from gym import spaces import numpy as np from gym.utils import seeding class BallInBoxEnv(gym.Env): """Custom Environment that follows gym interface""" metadata = {'render.modes': ['human']} def __init__(self): self.vmax = 1 self.r = 1 self.xmin = -10 self.xmax = 10 self.ymin = -10 self.ymax = 10 # x, y high = np.array([10, 10]) self.action_space = spaces.Box(low=-self.vmax, high=self.vmax, shape=(2,), dtype=np.float32) self.observation_space = spaces.Box(low=-high, high=high, dtype=np.float32) self.seed() self.viewer = None self.history = [] self.t = 0 self.num_collisions = 0 def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def step(self, u): self.t += 1 u = np.clip(u, -self.vmax, self.vmax) old_state = self._get_obs() oldx = self.x oldy = self.y collide = False self.x += u[0] self.y += u[1] clip_x = np.clip(self.x, self.xmin + self.r, self.xmax - self.r) clip_y = np.clip(self.y, self.ymin + self.r, self.ymax - self.r) if clip_x != self.x or clip_y != self.y: collide = True self.x = clip_x self.y = clip_y obs = self._get_obs() step_reward = 0 self.num_collisions += collide done = False return obs, step_reward, done, {'collisions': self.num_collisions} def reset(self): self.t = 0 self.num_collisions = 0 self.x = self.np_random.uniform(low=self.xmin + self.r, high=self.xmax - self.r) self.y = self.np_random.uniform(low=self.ymin + self.r, high=self.ymax - self.r) obs = self._get_obs() return obs def get_obs(self): return self._get_obs() def _get_obs(self): return np.array([self.x, self.y]) def set_state(self, state): self.x = state[0] self.y = state[1] def render(self, mode='human'): if self.viewer is None: from gym.envs.classic_control import rendering self.viewer = rendering.Viewer(64, 64) self.viewer.set_bounds(-10, 10, -10, 10) agent = rendering.make_circle(radius=self.r) agent.set_color(0.3, 0.45, 0.85) self.agent_trans = rendering.Transform() agent.add_attr(self.agent_trans) self.viewer.add_geom(agent) self.agent_trans.set_translation(self.x, self.y) return self.viewer.render(return_rgb_array = mode=='rgb_array') def close(self): if self.viewer: self.viewer.close() self.viewer = None
python
from kafka import KafkaConsumer consumer = KafkaConsumer(bootstrap_servers='localhost:9092', enable_auto_commit=False, metadata_max_age_ms=5000, group_id='test-consumer-group') consumer.subscribe(pattern='mytopic.*') try: for msg in consumer: print(msg.value.decode('utf-8')) print(msg.key.decode('utf-8')) # need to commit after processing as auto_commit is False consumer.commit() except Exception as e: print(e) finally: consumer.close()
python
#!/usr/bin/env python3.8 import sys,os,getopt from atdfPeripherals import extractPeripherals from atdfModules import extractModules from atdfInterrupts import extractInterrupts def normalizeOffsets(peripherals,modules): #Normalize Peripheral and Module offsets for attiny and atmega. Newer Chips like ATMega4808 & friends are already properly configured for peripheral in peripherals: if peripherals[peripheral]['offset'] == 0: moduleName = peripherals[peripheral]['name-in-module'] if "Default" in modules[moduleName].keys(): baseAddress=list(modules[moduleName]['Default'].keys())[0] peripherals[peripheral]['offset'] = baseAddress if baseAddress > 0: tmpDict={} for offset in modules[moduleName]['Default'].keys(): tmpDict[offset-baseAddress]=modules[moduleName]['Default'][offset] modules[moduleName]['Default']=tmpDict def fixupPeripheral(peripherals,modules,peripheral): if peripheral in peripherals.keys(): moduleName=peripherals[peripheral]["name-in-module"] if moduleName==peripheral: cleanModuleName=moduleName[:-1] moduleIndex=moduleName[-1] peripherals[peripheral]["name-in-module"]=cleanModuleName if moduleIndex == "0": modules.update({cleanModuleName: modules[moduleName]}) for offset in modules[cleanModuleName]["Default"]: modules[cleanModuleName]["Default"][offset]["name"] = modules[cleanModuleName]["Default"][offset]["name"].replace(moduleIndex,"") modules.pop(moduleName) def unifyModules(peripherals,modules): firstPortFound=False for peripheral in peripherals: if peripheral.startswith("PORT") and (len(peripheral) == 5): moduleName=peripherals[peripheral]["name-in-module"] if (len(modules[moduleName]["Default"]) == 3) and (firstPortFound == False): peripherals[peripheral]["name-in-module"] = "PORT" modules.update({"PORT":modules[moduleName]}) modules["PORT"]["Default"][0]["name"] = modules["PORT"]["Default"][0]["name"][:-1] modules["PORT"]["Default"][0]["caption"] = modules["PORT"]["Default"][0]["caption"].replace(" "+moduleName[-1]+" "," ") modules["PORT"]["Default"][1]["name"] = modules["PORT"]["Default"][1]["name"][:-1] modules["PORT"]["Default"][1]["caption"] = modules["PORT"]["Default"][1]["caption"].replace(" "+moduleName[-1]+" "," ") modules["PORT"]["Default"][2]["name"] = modules["PORT"]["Default"][2]["name"][:-1] modules["PORT"]["Default"][2]["caption"] = modules["PORT"]["Default"][2]["caption"].replace(" "+moduleName[-1]+" "," ") del modules[moduleName] firstPortFound=True elif (len(modules[moduleName]["Default"]) == 3) and (firstPortFound==True): peripherals[peripheral]["name-in-module"] = "PORT" del modules[moduleName] fixupPeripheral(peripherals,modules,"ADC0") fixupPeripheral(peripherals,modules,"ADC1") if "ADC" in peripherals.keys(): peripherals.update({"ADC0":peripherals["ADC"]}) peripherals.pop("ADC") fixupPeripheral(peripherals,modules,"SPI0") fixupPeripheral(peripherals,modules,"SPI1") if "SPI" in peripherals.keys(): peripherals.update({"SPI0":peripherals["SPI"]}) peripherals.pop("SPI") fixupPeripheral(peripherals,modules,"TWI0") fixupPeripheral(peripherals,modules,"TWI1") if "TWI" in peripherals.keys(): peripherals.update({"TWI0": peripherals["TWI"]}) peripherals.pop("TWI") fixupPeripheral(peripherals,modules,"USART0") fixupPeripheral(peripherals,modules,"USART1") fixupPeripheral(peripherals,modules,"USART2") if "USART" in peripherals.keys(): peripherals.update({"USART0": peripherals["USART"]}) peripherals.pop("USART") def unifyInterrupts(interrupts): if 0 in interrupts: interrupts.pop(0) for interrupt in interrupts: if interrupts[interrupt][interrupt]["name"] == "ADC": interrupts[interrupt][interrupt]["name"] = "ADC0" if interrupts[interrupt][interrupt]["name"] == "SPI": interrupts[interrupt][interrupt]["name"] = "SPI0" if interrupts[interrupt][interrupt]["name"].startswith("SPI_"): interrupts[interrupt][interrupt]["name"] = interrupts[interrupt][interrupt]["name"].replace("SPI_","SPI0_") if interrupts[interrupt][interrupt]["name"] == "TWI": interrupts[interrupt][interrupt]["name"] = "TWI0" if interrupts[interrupt][interrupt]["name"].startswith("TWI_"): interrupts[interrupt][interrupt]["name"] = interrupts[interrupt][interrupt]["name"].replace("TWI_","TWI0_") if interrupts[interrupt][interrupt]["name"] == "USART": interrupts[interrupt][interrupt]["name"] = "USART0" if interrupts[interrupt][interrupt]["name"].startswith("USART_"): interrupts[interrupt][interrupt]["name"] = interrupts[interrupt][interrupt]["name"].replace("USART_","USART0_") def main(argv): mplabXDir = "/Applications/Microchip" mplabXDir="atdf/" chip="" try: opts, args = getopt.getopt(argv,"hc:",["chip="]) except getopt.GetoptError: print('atdfToPas.py -c <Chip>') sys.exit(2) for opt, arg in opts: if opt == '-h': print('atdfToPas.py -c <Chip>') sys.exit() elif opt in ("-c", "--chip"): chip = arg if chip == "": print('atdfToPas.py -c <Chip>') sys.exit() peripherals=extractPeripherals(mplabXDir,chip) modules=extractModules(mplabXDir,chip) interrupts=extractInterrupts(mplabXDir,chip) normalizeOffsets(peripherals,modules) unifyModules(peripherals,modules) unifyInterrupts(interrupts) print("") if __name__ == "__main__": main(sys.argv[1:])
python
#!/usr/bin/env python import sys from embedimg import version from embedimg import entry def embedimg(): sys.exit(entry.cli_start(version.version)) if __name__ == "__main__": embedimg()
python
from asyncio import sleep from datetime import datetime, timedelta from io import BytesIO from os import remove from os.path import isfile from typing import Optional from PIL import Image, ImageFont, ImageDraw, ImageOps from discord import Member, Embed, File from discord.ext.commands import Cog, command, cooldown, BucketType from discord.ext.menus import ListPageSource, MenuPages from requests import get from ..db import db class HelpMenu(ListPageSource): def __init__(self, ctx, data): self.ctx = ctx super().__init__(data, per_page=10) async def write_page(self, menu, offset, fields=[]): len_data = len(self.entries) embed = Embed(title="XP Leaderboard", colour=self.ctx.author.colour) embed.set_thumbnail(url=self.ctx.guild.icon_url) embed.set_footer(text=f"{offset:,} - {min(len_data, offset + self.per_page - 1):,} of {len_data:,} members.") for name, value in fields: embed.add_field(name=name, value=value, inline=False) return embed async def format_page(self, menu, entries): offset = (menu.current_page * self.per_page) + 1 fields = [] table = ("\n".join( f"{idx + offset}. {self.ctx.bot.guild.get_member(entry[0]).display_name} (XP: {entry[1]} | Level: {entry[2]})" for idx, entry in enumerate(entries))) fields.append(("Ranks", table)) return await self.write_page(menu, offset, fields) class Exp(Cog): def __init__(self, bot): self.bot = bot async def process_xp(self, msg): xp, lvl, xplock = db.record("SELECT XP, LEVEL, XPLOCK FROM exp WHERE UserID = ?", msg.author.id) if msg.content == "+level": pass elif msg.content == "+rank": pass else: await self.add_xp(msg, xp, lvl) async def add_xp(self, message, xp, lvl): xp_to_add = 1 new_lvl = int(xp // 25) db.execute("UPDATE exp SET XP = XP + ?, Level = ?, XPLock = ? WHERE UserID = ?", xp_to_add, new_lvl, (datetime.utcnow() + timedelta(seconds=60)).isoformat(), message.author.id) if new_lvl > lvl: await message.channel.send(f"Congrats {message.author.mention} - you reached level {new_lvl:,}!") @command(name="level") @cooldown(1, 2, BucketType.user) async def display_level(self, ctx, target: Optional[Member]): target = target or ctx.author xp, lvl = db.record("SELECT XP, Level FROM exp WHERE UserID = ?", target.id) or (None, None) if lvl is not None: embed = Embed(title='EXPERIENCE', timestamp=datetime.utcnow()) embed.add_field(name=f'{target.display_name}\'s Level', value=f'Level: {lvl:,} \n Xp: {xp:,}') await ctx.send(embed=embed) else: await ctx.send("That member is not tracked by the experience system.") @command(name="rank") @cooldown(1, 2, BucketType.user) async def display_rank(self, ctx, target: Optional[Member]): target = target or ctx.author ids = db.column("SELECT UserID FROM exp ORDER BY XP DESC") try: await ctx.send(f"{target.display_name} is rank {ids.index(target.id) + 1} of {len(ids)}.") except ValueError: await ctx.send("That member is not tracked by the experience system.") @command(name="leaderboard", aliases=["lb"]) @cooldown(1, 2, BucketType.user) async def display_leaderboard(self, ctx): records = db.records("SELECT UserID, XP, Level FROM exp ORDER BY XP DESC") menu = MenuPages(source=HelpMenu(ctx, records), clear_reactions_after=True, timeout=60.0) await menu.start(ctx) @Cog.listener() async def on_ready(self): if not self.bot.ready: self.bot.cogs_ready.ready_up("exp") @Cog.listener() async def on_message(self, msg): if not msg.author.bot: if not str(msg.channel.type) == "private": await self.process_xp(msg) def setup(bot): bot.add_cog(Exp(bot))
python
_item_fullname_='openmm.AmberPrmtopFile' def is_openmm_AmberPrmtopFile(item): item_fullname = item.__class__.__module__+'.'+item.__class__.__name__ return _item_fullname_==item_fullname
python
# Copyright (C) 2021 GreenWaves Technologies, SAS # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. def find_seq(trans): last = None for idx, dim in enumerate(trans): if last is not None and dim == last + 1: return idx - 1 last = dim return None def remove_sequences(shape, trans): seq_idx = find_seq(trans) while seq_idx is not None: seq_start = trans[seq_idx] shape = shape[:seq_start] + [shape[seq_start]*shape[seq_start+1]] + shape[seq_start+2:] trans = [idx if idx < seq_start else idx - 1 for idx in trans if idx != seq_start] seq_idx = find_seq(trans) return shape, trans def remove_unit_dims(shape, trans): unit_idx = next((idx for idx, dim in enumerate(shape) if dim == 1), None) while unit_idx is not None: shape = shape[0:unit_idx] + shape[unit_idx+1:] trans = [idx if idx < unit_idx else idx - 1 for idx in trans if idx != unit_idx] unit_idx = next((idx for idx, dim in enumerate(shape) if dim == 1), None) return shape, trans def real_transpose(shape, trans): shape, trans = remove_unit_dims(list(shape), list(trans)) shape, trans = remove_sequences(shape, trans) return tuple(shape), tuple(trans)
python
from matplotlib import pyplot as plt from matplotlib import text import numpy as np import matplotlib as mpl from matplotlib.font_manager import FontProperties #labels7 = ['neutral', 'angry', 'surprise', 'disgust', 'fear', 'happy', 'sad'] #labels6 = ['angry', 'surprise', 'disgust', 'fear', 'happy', 'sad'] #labels7v2 = ['angry', 'surprise', 'disgust', 'fear', 'happy', 'sad', 'contempt'] zhCN=True labels7 = ['平静', '愤怒', '惊讶', '厌恶', '害怕', '快乐', '悲伤'] labels6 = ['愤怒', '惊讶', '厌恶', '害怕', '快乐', '悲伤'] labels7v2 = ['愤怒', '惊讶', '厌恶', '害怕', '快乐', '悲伤', '轻视'] def checkDit(value, df): temp=str(value) l=len(temp) if l<(df+3): return temp else: format='%0.'+str(df)+'f' nt=format%value if len(temp.split('.')[0])>1: return nt[0:(3+df)] else: return nt[0:(2+df)] ##change the cmap for Gray or Color display. def plot_confusion_matrix(cm, tag, labels, title=None, cmap = plt.cm.binary, details=False, df=2, colorbar=True): fsw=len(labels)*1.7 fsh=len(labels)*1.45 fsize=int(len(labels)/3+20) if zhCN: #font={'family':'Simhei','weight':'bold','size':str(fsize)} #plt.rc(['font',font]) #font=FontProperties(family='Simhei',size=fsize) mpl.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签 mpl.rcParams['axes.unicode_minus']=False #用来正常显示负号 print('Font size: %d'%fsize) plt.figure(figsize=(fsw, fsh)) ind_array = np.arange(len(labels)) x, y = np.meshgrid(ind_array, ind_array) format="%0."+str(df)+"f" thresshold=1.0/10**(df) print(thresshold) #format="%0.1f" for x_val, y_val in zip(x.flatten(), y.flatten()): c = cm[y_val][x_val] if x_val==y_val: plt.text(x_val, y_val, checkDit(c,df), color='white', fontsize=fsize, va='center', ha='center') #if c <thresshold or c==100: # plt.text(x_val, y_val, '%d'%(int(c)), color='white', fontsize=fsize, va='center', ha='center') ##elif c==100: ## plt.text(x_val, y_val, format %(c,), color='red', fontsize=fsize, va='center', ha='center') #else: # plt.text(x_val, y_val, format %(c), color='white', fontsize=fsize, va='center', ha='center') else: plt.text(x_val, y_val, checkDit(c,df), color='black', fontsize=fsize, va='center', ha='center') #if c < thresshold or c==100: # plt.text(x_val, y_val, '%d'%(int(c)), color='black', fontsize=fsize, va='center', ha='center') ##elif c>0: ## plt.text(x_val, y_val, format %(c), color='blue', fontsize=fsize, va='center', ha='center') #else: # plt.text(x_val, y_val, format %(c), color='black', fontsize=fsize, va='center', ha='center') tick_marks = np.array(range(len(labels)))+1.0 plt.gca().set_xticks(tick_marks, minor = True) plt.gca().set_yticks(tick_marks, minor = True) plt.gca().xaxis.set_ticks_position('none') plt.gca().yaxis.set_ticks_position('none') #plt.grid(True, which='minor', linestyle='-') plt.gcf().subplots_adjust(bottom=0.18) plt.gcf().subplots_adjust(left=0.17) plt.imshow(cm, interpolation='nearest', cmap=cmap) if title is not None: plt.title(title) if colorbar: cbar=plt.colorbar() if zhCN: cbar.set_label('准确率 (%)', size=fsize) else: cbar.set_label('Accuracy (%)', size=fsize) for t in cbar.ax.get_yticklabels(): t.set_fontsize(fsize) xlocations = np.array(range(len(labels))) if zhCN: plt.xticks(xlocations, labels, size=fsize) else: plt.xticks(xlocations, labels, size=fsize, rotation=60) #plt.xticks(xlocations, labels, rotation=90) plt.yticks(xlocations, labels, size=fsize) if details: plt.ylabel('GroundTruth') plt.xlabel('Predict') plt.savefig(tag+'.jpg') plt.close() if __name__=='__main__': #cm=[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3384615384615385, 0.05333333333333334, 0.0, 0.011904761904761904, 0.0, 0.013333333333333334, 0.02702702702702703], [0.19487179487179487, 0.0, 0.6912751677852349, 0.0, 0.0, 0.05333333333333334, 0.0], [0.3487179487179487, 0.013333333333333334, 0.0, 0.07142857142857142, 0.0, 0.02666666666666667, 0.06756756756756757], [0.29743589743589743, 0.0, 0.22818791946308725, 0.011904761904761904, 0.0, 0.013333333333333334, 0.0], [0.046153846153846156, 0.0, 0.006711409395973154, 0.0, 0.0, 0.9333333333333333, 0.0], [0.3435897435897436, 0.013333333333333334, 0.0, 0.011904761904761904, 0.0, 0.0, 0.06756756756756757]] #plot_confusion_matrix(cm, 'MStest', labels7) #cm=[[0.8071065989847716, 0.02666666666666667, 0.16, 0.047619047619047616, 0.03125, 0.0196078431372549, 0.02702702702702703], [0.27411167512690354, 0.08, 0.02666666666666667, 0.047619047619047616, 0.0, 0.013071895424836602, 0.06756756756756757], [0.07614213197969544, 0.0, 0.8133333333333334, 0.05952380952380952, 0.0, 0.0392156862745098, 0.02702702702702703], [0.27411167512690354, 0.02666666666666667, 0.02, 0.08333333333333333, 0.0, 0.032679738562091505, 0.17567567567567569], [0.116751269035533, 0.02666666666666667, 0.4066666666666667, 0.05952380952380952, 0.03125, 0.013071895424836602, 0.0], [0.06091370558375635, 0.013333333333333334, 0.03333333333333333, 0.047619047619047616, 0.020833333333333332, 0.8366013071895425, 0.013513513513513514], [0.233502538071066, 0.09333333333333334, 0.013333333333333334, 0.03571428571428571, 0.041666666666666664, 0.0196078431372549, 0.12162162162162163]] #plot_confusion_matrix(cm, 'FACE++') Test1=[[88.5714285714286,0,4.28571428571429,5,0.714285714285714,1.42857142857143], [0,95.7142857142857,0,2.85714285714286,0,1.42857142857143], [2.14285714285714,0,92.1428571428572,1.42857142857143,0,4.28571428571429], [1.42857142857143,5.71428571428571,2.14285714285714,85.0000000000000,2.14285714285714,3.57142857142857], [0.714285714285714,0,0,0.714285714285714,98.5714285714286,0], [2.14285714285714,0,5,4.28571428571429,0,88.5714285714286]] plot_confusion_matrix(Test1, 'Test1', labels6) Test2=[[97.7777777777778,0,0.740740740740741,0,0,0,1.48148148148148], [0,98.7755102040816,0,0,0,0,1.22448979591837], [0.568181818181818,0,99.4318181818182,0,0,0,0], [0,0,0,100,0,0,0], [0,0,0,0,100,0,0], [1.19047619047619,0,0,0,0,98.8095238095238,0], [0,0,0,0,0,0,100]] plot_confusion_matrix(Test2, 'Test2', labels7v2)
python
""" ===================================================== Exporting a fitted Earth models as a sympy expression ===================================================== A simple example returning a sympy expression describing the fit of a sine function computed by Earth. """ import numpy from pyearth import Earth from pyearth import export # Create some fake data numpy.random.seed(2) m = 1000 n = 10 X = 10 * numpy.random.uniform(size=(m, n)) - 40 y = 100 * \ (numpy.sin((X[:, 6])) - 4.0) + \ 10 * numpy.random.normal(size=m) # Fit an Earth model model = Earth(max_degree=2, minspan_alpha=.5, verbose=False) model.fit(X, y) print(model.summary()) #return sympy expression print('Resulting sympy expression:') print(export.export_sympy(model))
python
from time import sleep from pysphere import VITask, FaultTypes from pysphere.vi_virtual_machine import VIVirtualMachine from pysphere.resources.vi_exception import VIException, VIApiException from pysphere.vi_mor import VIMor from pysphere.vi_task import VITask import ssl import pypacksrc import re, subprocess def vs_connect(host, user, password, unverify=True): if unverify: try: ssl._create_default_https_context = ssl._create_unverified_context except: pass con = VIServer() con.connect(host, user,password,'/var/log/pysphere.log') return con def find_vm(vCenterserver, user, password, name): con = vs_connect(vCenterserver, user, password, unverify=True) try: vm = con.get_vm_by_name(name) return vm except VIException: return None def get_RP_by_name(host, user, password, name): con = vs_connect(host, user, password, unverify=True) rps = con.get_resource_pools() for mor, path in rps.iteritems(): if re.match('.*%s' % name,path): return mor return None def run_post_script(name,ip, post_script): retcode = subprocess.call([post_script,name,ip]) if retcode < 0: resp = 'ERROR: %s %s %s : Returned a non-zero result' % (post_script,name,ip) return resp def get_vm_ip_addresses(vCenterserver, username, password,vm_name, ipv6=False, maxwait=120): vm_obj = find_vm(vCenterserver, username, password, vm_name) net_info = None waitcount = 0 while net_info is None: if waitcount > maxwait: break net_info = vm_obj.get_property('net',False) waitcount += 5 sleep(5) if net_info: return net_info return None def get_NIC_address_per_connected_net(vCenterserver, username, password,vm_name, net_name, ipv6=False, maxwait=120): vm_obj = find_vm(vCenterserver, username, password, vm_name) net_info = None waitcount = 0 while net_info is None: if waitcount > maxwait: break net_info = vm_obj.get_property('net',False) waitcount += 5 sleep(5) if net_info: for i in range(len(net_info)): for ip in net_info[i]['ip_addresses']: if ipv6 and re.match('\d{1,4}\:.*',ip) and not re.match('fe83\:.*',ip): if(net_info[i]['network']==net_name): return ip elif not ipv6 and re.match('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}',ip) and ip != '127.0.0.1': if(net_info[i]['network']==net_name): return ip return None def get_dvSwitchs_by_DCname(vCenterserver, username, password, datacentername): con = vs_connect(vCenterserver, username, password) dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0] dcprops = VIProperty(con, dcmor) nfmor = dcprops.networkFolder._obj dvswitch_mors = con._retrieve_properties_traversal(property_names=['name'],from_node=nfmor, obj_type = 'DistributedVirtualSwitch') respdict={} for dvswitch_mor in dvswitch_mors: respdict[dvswitch_mor.PropSet[0]._val] = dvswitch_mor.Obj return respdict def get_dvSwitchuuid_by_dvsname_and_DC(vCenterserver, username, password, datacentername, dvSname): con = vs_connect(vCenterserver, username, password) dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0] dcprops = VIProperty(con, dcmor) nfmor = dcprops.networkFolder._obj dvswitch_mors = con._retrieve_properties_traversal(property_names=['name',"uuid"],from_node=nfmor, obj_type = 'DistributedVirtualSwitch') for dvswitch_mor in dvswitch_mors: if dvswitch_mor.PropSet[0]._val == dvSname: return dvswitch_mor.PropSet[1]._val return "Failure, dvswitch not found" def get_portgroupname_by_ref(vCenterserver, username, password,datacentername, pgRef): con = vs_connect(vCenterserver, username, password) dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0] dcprops = VIProperty(con, dcmor) nfmor = dcprops.networkFolder._obj portgroup_mors = con._retrieve_properties_traversal(property_names=['name','key'],from_node=nfmor, obj_type = 'DistributedVirtualPortgroup') for portgroup_mor in portgroup_mors: ref=portgroup_mor.get_element_propSet()[0].get_element_val() if ref==pgRef: return portgroup_mor.get_element_propSet()[1].get_element_val() return None def get_portgroupref_by_name(vCenterserver, username, password,datacentername, PGname): con = vs_connect(vCenterserver, username, password) dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0] dcprops = VIProperty(con, dcmor) nfmor = dcprops.networkFolder._obj portgroup_mors = con._retrieve_properties_traversal(property_names=['name','key'],from_node=nfmor, obj_type = 'DistributedVirtualPortgroup') for portgroup_mor in portgroup_mors: name = portgroup_mor.get_element_propSet()[1].get_element_val() if name==PGname: return portgroup_mor.get_element_propSet()[0].get_element_val() return None def get_portgroup_by_dvSwitchname(vCenterserver, username, password, datacentername, dvSwitchname): con = vs_connect(vCenterserver, username, password) dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0] dcprops = VIProperty(con, dcmor) nfmor = dcprops.networkFolder._obj portgroup_mors = con._retrieve_properties_traversal(property_names=['name','portgroup'],from_node=nfmor, obj_type = 'VmwareDistributedVirtualSwitch') RespDic={} for portgroup_mor in portgroup_mors: if (portgroup_mor.get_element_propSet()[0].get_element_val()==dvSwitchname): pgRefs = portgroup_mor.get_element_propSet()[1].get_element_val().ManagedObjectReference for pgRef in pgRefs: portgroup_mors = con._retrieve_properties_traversal(property_names=['name','key'],from_node=nfmor, obj_type = 'DistributedVirtualPortgroup') for portgroup_mor in portgroup_mors: ref=portgroup_mor.get_element_propSet()[0].get_element_val() if ref==pgRef: name = portgroup_mor.get_element_propSet()[1].get_element_val() RespDic[name]=pgRef return RespDic from pysphere import MORTypes from pysphere import VIServer, VIProperty from pysphere.resources import VimService_services as VI def create_portgroup_in_host(vCenterserver, username, password, host, pgname, vswitchname, vlan_id): resp = "succeeded" con = None try: con = vs_connect(vCenterserver, username, password) hostmor = [k for k, v in con.get_hosts().items() if v == host][0] prop = VIProperty(con, hostmor) network_system = prop.configManager.networkSystem._obj request = VI.AddPortGroupRequestMsg() _this = request.new__this(network_system) _this.set_attribute_type(network_system.get_attribute_type()) request.set_element__this(_this) portgrp = request.new_portgrp() portgrp.set_element_name(pgname) portgrp.set_element_vlanId(int(vlan_id)) portgrp.set_element_vswitchName(vswitchname) portgrp.set_element_policy(portgrp.new_policy()) request.set_element_portgrp(portgrp) con._proxy.AddPortGroup(request) except Exception, error: resp = str_remove_specialchars(error) if con: con.disconnect() return resp def get_standardvS_by_DCname(vCenterserver, username, password, datacentername): con = vs_connect(vCenterserver, username, password) dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0] dcprops = VIProperty(con, dcmor) nfmor = dcprops.networkFolder._obj dvswitch_mors = con._retrieve_properties_traversal(property_names=['name'],from_node=nfmor, obj_type = 'Network') respdict={} for dvswitch_mor in dvswitch_mors: var=dvswitch_mor.get_element_obj().lower() if 'network' in var : respdict[dvswitch_mor.PropSet[0]._val] = dvswitch_mor.Obj return respdict def vs_find_datacenter_by_name(vCenterserver, user, password, name): response = "failure datcenter not found" if name.isspace() or not(name) or (name=="None"): return "None" con = None try: con = vs_connect(vCenterserver, user, password) rps = con.get_datacenters() for mor, path in rps.iteritems(): if re.match('.*%s' % name, mor): response = str(path) break except Exception, error: response = str_remove_specialchars( error ) if con: con.disconnect() return response def str_remove_specialchars( s ): resp = None if hasattr(s, 'status') and hasattr(s, 'message'): resp = "provider.status: " + str(s.status) + " provider.message: failure "+ str(s.message) else: resp = "failure " + str(s) response = resp response = response.replace(pypacksrc.dcvt_delimiter," ") return response def add_nic_vm_and_connect_to_net(vCenterserver, username, password, datacentername, vm, dvswitch_uuid, portgroupKey, network_name="VM Network", nic_type="vmxnet3", network_type="standard"): ### add a NIC # The network Name must be set as the device name to create the NIC. # Different network card types are: "VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3" net_device = None con = vs_connect(vCenterserver, username, password) vm_obj = con.get_vm_by_name(vm,datacenter=datacentername) if not vm_obj: raise Exception("VM %s not found" % vm) #Find nic device for dev in vm_obj.properties.config.hardware.device: if dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"]: net_device = dev._obj break request = VI.ReconfigVM_TaskRequestMsg() _this = request.new__this(vm_obj._mor) _this.set_attribute_type(vm_obj._mor.get_attribute_type()) request.set_element__this(_this) spec = request.new_spec() dev_change = spec.new_deviceChange() dev_change.set_element_device(net_device) #dev_change.set_element_operation("edit") if network_name: dev_change.set_element_operation("add") if nic_type == "e1000": nic_ctlr = VI.ns0.VirtualE1000_Def("nic_ctlr").pyclass() elif nic_type == "e1000e": nic_ctlr = VI.ns0.VirtualE1000e_Def("nic_ctlr").pyclass() elif nic_type == "pcnet32": nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass() elif nic_type == "vmxnet": nic_ctlr = VI.ns0.VirtualVmxnet_Def("nic_ctlr").pyclass() elif nic_type == "vmxnet2": nic_ctlr = VI.ns0.VirtualVmxnet2_Def("nic_ctlr").pyclass() elif nic_type == "vmxnet3": nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass() if network_type == "standard": # Standard switch nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass() nic_backing.set_element_deviceName(network_name) elif network_type == "dvs": nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def("nic_backing_port").pyclass() nic_backing_port.set_element_switchUuid(dvswitch_uuid) nic_backing_port.set_element_portgroupKey(portgroupKey) # http://www.vmware.com/support/developer/vc-sdk/visdk400pubs/ReferenceGuide/vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo.html nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def("nic_backing").pyclass() nic_backing.set_element_port(nic_backing_port) # How they do it in powershell # http://www.lucd.info/2010/03/04/dvswitch-scripting-part-8-get-and-set-network-adapters/ # How they do it in ruby # https://github.com/fog/fog/pull/1431/files nic_ctlr.set_element_addressType("generated") nic_ctlr.set_element_backing(nic_backing) nic_ctlr.set_element_key(4) dev_change.set_element_device(nic_ctlr) spec.set_element_deviceChange([dev_change]) request.set_element_spec(spec) ret = con._proxy.ReconfigVM_Task(request)._returnval #Wait for the task to finish task = VITask(ret, con) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: return "VM successfully reconfigured" elif status == task.STATE_ERROR: return "failure reconfiguring vm: " + str(task.get_error_message()) else: return "failure reconfiguring vm network_name is mandatory" def disconnect_nic_from_network(vCenterserver, username, password, datacentername, vmname, dvswitch_uuid, portgroupKey, network_name="VM Network", nic_type="vmxnet3", network_type="standard"): con = vs_connect(vCenterserver, username, password) vm_obj = con.get_vm_by_name(vmname, datacenter=datacentername) #Disconnect 3rd adaptar if its connected to network "VM Network" #network_name = "VM Network" device_name = "Network adapter 3" #Find Virtual Nic device net_device = None for dev in vmname.properties.config.hardware.device: if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"] and dev.deviceInfo.label == network_name and dev.deviceInfo.summary == device_name): net_device = dev._obj break if not net_device: s.disconnect() raise Exception("The vm seems to lack a Virtual Nic") #Disconnect the device net_device.Connectable.Connected = True #Invoke ReconfigVM_Task request = VI.ReconfigVM_TaskRequestMsg() _this = request.new__this(vmname._mor) _this.set_attribute_type(vmname._mor.get_attribute_type()) request.set_element__this(_this) spec = request.new_spec() dev_change = spec.new_deviceChange() dev_change.set_element_device(net_device) dev_change.set_element_operation("edit") spec.set_element_deviceChange([dev_change]) request.set_element_spec(spec) ret = s._proxy.ReconfigVM_Task(request)._returnval #Wait for the task to finish task = VITask(ret, s) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: print "VM successfully reconfigured" elif status == task.STATE_ERROR: print "Error reconfiguring vm:", task.get_error_message() s.disconnect() def get_vm_nics(vCenterserver, username, password, datacentername, vm_name): " To reteive status VM should vm power on " con = vs_connect(vCenterserver, username, password) net_device = None vm_obj = con.get_vm_by_name(vm_name,datacenter=datacentername) if not vm_obj: raise Exception("VM %s not found" % vm_name) respdict ={} sVSName = None dvs = None #Find nic device for dev in vm_obj.properties.config.hardware.device: if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"] and hasattr(dev, "backing") and hasattr(dev.backing, "deviceName")): label = dev.deviceInfo.label sVSName = str(dev.backing.deviceName) net_device = dev._obj status= net_device.Connectable.Connected respdict[label]=[sVSName,status] if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"] and hasattr(dev, "backing") and hasattr(dev.backing, "port")): label = dev.deviceInfo.label #label=unicode(label1, "utf-8") pgRef = str(dev.backing.port.portgroupKey) PGname = get_portgroupname_by_ref(vCenterserver, username, password,datacentername, pgRef) net_device = dev._obj status = net_device.Connectable.Connected respdict[label]=[PGname,status] if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"] and not hasattr(dev.backing, "deviceName") and not hasattr(dev.backing, "port") ): label = dev.deviceInfo.label respdict[label]=["No connexion","no status"] return respdict def remove_nic_vm(vCenterserver, username, password, datacentername, vm_name, networklabel): con = vs_connect(vCenterserver, username, password) net_device = None vm_obj = con.get_vm_by_name(vm_name,datacenter=datacentername) if not vm_obj: raise Exception("VM %s not found" % vm_name) #Find nic device for dev in vm_obj.properties.config.hardware.device: if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"] and hasattr(dev, "backing") and dev.deviceInfo.label == networklabel): net_device = dev._obj break if not net_device: raise Exception("The vm_name seems to lack a Virtual Nic") request = VI.ReconfigVM_TaskRequestMsg() _this = request.new__this(vm_obj._mor) _this.set_attribute_type(vm_obj._mor.get_attribute_type()) request.set_element__this(_this) spec = request.new_spec() dev_change = spec.new_deviceChange() dev_change.set_element_operation("remove") dev_change.set_element_device(net_device) # Submit the device change spec.set_element_deviceChange([dev_change]) request.set_element_spec(spec) ret = con._proxy.ReconfigVM_Task(request)._returnval # Wait for the task to finish task = VITask(ret, con) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: return "VM successfully reconfigured" elif status == task.STATE_ERROR: return "failure reconfiguring vm_name: " + str(vm_obj, task.get_error_message()) else: return " failure VM not found" def connect_publicNIC_to_publicNet(vCenterserver, username, password, datacentername, vm_name, network_name, netlabel): ''' Switch existing NIC to a different network con: VIServer object datacentername: datacenter name vm_name: VIVirtualMachine name network_name: network name ''' con = vs_connect(vCenterserver, username, password) net_device = None vm_obj = con.get_vm_by_name(vm_name,datacenter=datacentername) if not vm_obj: raise Exception("VM %s not found" % vm_name) #Find nic device for dev in vm_obj.properties.config.hardware.device: if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"] and hasattr(dev, "deviceInfo") and (dev.deviceInfo.label == netlabel)): net_device = dev._obj if not net_device: raise Exception("The vm_name seems to lack a Virtual Nic") if hasattr(net_device.Backing,"DeviceName"): net_device.Connectable.Connected = True net_device.Backing.set_element_deviceName(network_name) if hasattr(net_device.Backing,"Port"): #TODO convert device baching net_device.Connectable.Connected = True request = VI.ReconfigVM_TaskRequestMsg() _this = request.new__this(vm_obj._mor) _this.set_attribute_type(vm_obj._mor.get_attribute_type()) request.set_element__this(_this) spec = request.new_spec() dev_change = spec.new_deviceChange() dev_change.set_element_device(net_device) dev_change.set_element_operation("edit") spec.set_element_deviceChange([dev_change]) request.set_element_spec(spec) ret = con._proxy.ReconfigVM_Task(request)._returnval #Wait for the task to finish task = VITask(ret, con) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: return "VM successfully reconfigured" elif status == task.STATE_ERROR: return "failure reconfiguring vm_name: " + str(task.get_error_message()) def disconnect_publicNIC_from_publicNet(vCenterserver, username, password, datacentername, vm_name, netlabel): ''' Switch existing NIC to a different network con: VIServer object datacentername: datacenter name vm_name: VIVirtualMachine name ''' con = vs_connect(vCenterserver, username, password) net_device = None vm_obj = con.get_vm_by_name(vm_name,datacenter=datacentername) if not vm_obj: raise Exception("VM %s not found" % vm_name) #Find nic device for dev in vm_obj.properties.config.hardware.device: if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"] and hasattr(dev, "deviceInfo") and (dev.deviceInfo.label == netlabel)): net_device = dev._obj if not net_device: raise Exception("The vm_name seems to lack a Virtual Nic") if hasattr(net_device.Backing,"DeviceName"): net_device.Connectable.Connected = False if hasattr(net_device.Backing,"Port"): net_device.Connectable.Connected = False #TODO convert device baching request = VI.ReconfigVM_TaskRequestMsg() _this = request.new__this(vm_obj._mor) _this.set_attribute_type(vm_obj._mor.get_attribute_type()) request.set_element__this(_this) spec = request.new_spec() dev_change = spec.new_deviceChange() dev_change.set_element_device(net_device) dev_change.set_element_operation("edit") spec.set_element_deviceChange([dev_change]) request.set_element_spec(spec) ret = con._proxy.ReconfigVM_Task(request)._returnval #Wait for the task to finish task = VITask(ret, con) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: return "VM successfully reconfigured" elif status == task.STATE_ERROR: return "failure reconfiguring vm_name: " + str(task.get_error_message()) def add_new_nic(server, datacentername, vm, network_name): ''' add new NIC to a VM server: VIServer object datacentername: datacenter name vm: VIVirtualMachine name network_name: network name ''' net_device = None vm_obj = server.get_vm_by_name(vm,datacenter=datacentername) if not vm_obj: raise Exception("VM not found") request = VI.ReconfigVM_TaskRequestMsg() _this = request.new__this(vm_obj._mor) _this.set_attribute_type(vm_obj._mor.get_attribute_type()) request.set_element__this(_this) spec = request.new_spec() #add a nic. dev_change = spec.new_deviceChange() dev_change.set_element_operation("add") nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass() nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass() nic_backing.set_element_deviceName(network_name) nic_ctlr.set_element_addressType("generated") nic_ctlr.set_element_backing(nic_backing) nic_ctlr.set_element_key(4) dev_change.set_element_device(nic_ctlr) spec.set_element_deviceChange([dev_change]) request.set_element_spec(spec) ret = server._proxy.ReconfigVM_Task(request)._returnval #Wait for the task to finish task = VITask(ret, server) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: return "VM successfully reconfigured" elif status == task.STATE_ERROR: return "failure reconfiguring vm: " + str(task.get_error_message()) def get_network_interfaces(vm_obj): vif_types = ["VirtualEthernetCard", "VirtualE1000", "VirtualE1000e", "VirtualPCNet32", "VirtualVmxnet"] vifs = [] for device in vm_obj.properties.config.hardware.device: if device._type in vif_types: vifs.append(device) return vifs def change_dvs_net(server, datacentername, vm, pg_map): """ Reconfigure dVS portgroups according to the mappings in the pg_map dict server: VIServer object datacentername: datacenter name vm_obj: VIVirtualMachine object pg_map: dict must contain the source portgroup as key and the destination portgroup as value """ vm_obj = server.get_vm_by_name(vm,datacenter=datacentername) if not vm_obj: raise Exception("VM %s not found" % vm) #Find virtual NIC devices if vm_obj: net_device = [] for dev in vm_obj.properties.config.hardware.device: if dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"]: net_device.append(dev) # Throw an exception if there is no NIC found if len(net_device) == 0: raise Exception("The vm seems to lack a Virtual Nic") # Use pg_map to set the new Portgroups for dev in net_device: old_portgroup = dev.Backing.Port.PortgroupKey if pg_map.has_key(old_portgroup): dev.backing.port._obj.set_element_portgroupKey(pg_map[old_portgroup]) dev.backing.port._obj.set_element_portKey('') # Invoke ReconfigVM_Task request = VI.ReconfigVM_TaskRequestMsg() _this = request.new__this(vm_obj._mor) _this.set_attribute_type(vm_obj._mor.get_attribute_type()) request.set_element__this(_this) # Build a list of device change spec objects devs_changed = [] for dev in net_device: spec = request.new_spec() dev_change = spec.new_deviceChange() dev_change.set_element_device(dev._obj) dev_change.set_element_operation("edit") devs_changed.append(dev_change) # Submit the device change list spec.set_element_deviceChange(devs_changed) request.set_element_spec(spec) ret = server._proxy.ReconfigVM_Task(request)._returnval # Wait for the task to finish task = VITask(ret, server) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: return "VM successfully reconfigured" elif status == task.STATE_ERROR: return "failure reconfiguring vm: " + str(task.get_error_message()) else: return " failure VM not found" def poweron_vm(vCenterserver, username, password,datacentername,vm_name): con = vs_connect(vCenterserver, username, password) vm_obj = con.get_vm_by_name(vm_name,datacenter=datacentername) vmstatus=vm_obj.get_status() if (vmstatus=='POWERED OFF'): vm_obj.power_on() return "VM successfully powered on" return "VM on uncorrect status: "+ vmstatus def poweroff_vm(vCenterserver, username, password,datacentername,vm_name): con = vs_connect(vCenterserver, username, password) vm_obj = con.get_vm_by_name(vm_name,datacenter=datacentername) vmstatus=vm_obj.get_status() if (vmstatus=='POWERED ON'): vm_obj.power_off() return "VM successfully powerer off" return "VM on uncorrect status: "+ vmstatus def delete_vm(vCenterserver, username, password,datacentername,vm_name): con = vs_connect(vCenterserver, username, password) vm_obj = con.get_vm_by_name(vm_name,datacenter=datacentername) vmstatus=vm_obj.get_status() if (vmstatus=='POWERED OFF'): vm_obj.destroy() return "VM successfully deleted" return "VM on uncorrect status: "+ vmstatus def reboot_vm(vCenterserver, username, password,datacentername,vm_name): con = vs_connect(vCenterserver, username, password) vm_obj = con.get_vm_by_name(vm_name,datacenter=datacentername) vmstatus=vm_obj.get_status() if (vmstatus=='POWERED ON'): vm_obj.reboot_guest() return "VM successfully rebooted" return "VM on uncorrect status: "+ vmstatus def list_available_template(vCenterserver, username, password): resp=[] con = vs_connect(vCenterserver, username, password) template_list = con.get_registered_vms(advanced_filters={'config.template':True}) for t in template_list: vm = con.get_vm_by_path(t) prop = vm.get_properties() resp.append(prop['name']) return resp def list_snapshotname_per_vm(vCenterserver, username, password,datacentername,vm_name): con = vs_connect(vCenterserver, username, password) vm = con.get_vm_by_name(vm_name,datacenter=datacentername) resp=[] if vm: snapshots = vm.get_snapshots() for snapshot in snapshots: name= snapshot.get_name() resp.append(name) return resp def list_snapshotpath_per_vm(vCenterserver, username, password,datacentername,vm_name): con = vs_connect(vCenterserver, username, password) vm = con.get_vm_by_name(vm_name,datacenter=datacentername) resp=[] if vm: snapshots = vm.get_snapshots() for snapshot in snapshots: path= snapshot.get_path() resp.append(path) return resp def createsnapshot_per_vm(vCenterserver, username, password,datacentername,vm_name,snapshotname): con = vs_connect(vCenterserver, username, password) vm = con.get_vm_by_name(vm_name, datacenter=datacentername) if vm: r = vm.create_snapshot(name=snapshotname) snapshots = list_snapshotname_per_vm(vCenterserver, username, password,datacentername,vm_name) if(snapshotname in snapshots): return "snapshot creation succeeded" return "Failure" def delete_snapshot_per_snapshotpath(vCenterserver, username, password, datacentername, vm_name, path): con = vs_connect(vCenterserver, username, password) vm = con.get_vm_by_name(vm_name, datacenter = datacentername) if vm: r = vm.delete_snapshot_by_path(path = path) paths = list_snapshotpath_per_vm(vCenterserver, username, password, datacentername, vm_name) if(not(path in paths)): return "snapshot deletion succeeded" return "Failure" def delete_snapshot_per_snapshotname(vCenterserver, username, password, datacentername, vm_name, name): con = vs_connect(vCenterserver, username, password) vm = con.get_vm_by_name(vm_name, datacenter = datacentername) if vm: r = vm.delete_named_snapshot(name = name) names = list_snapshotname_per_vm(vCenterserver, username, password, datacentername, vm_name) if(not(name in names)): return "snapshot deletion succeeded" return "Failure" #revert_to_named_snapshot def revert_to_snapshot_per_snapshotname(vCenterserver, username, password, datacentername, vm_name, snapshotname): con = vs_connect(vCenterserver, username, password) vm = con.get_vm_by_name(vm_name, datacenter = datacentername) if vm: try: r = vm.revert_to_named_snapshot(name = snapshotname) return r except VIException: return "failure" return "failure" def customizeNICS_settingIP_hostname_password(vCenterserver, username, password, vm_mor, NIC1,NIC2,hostname,adminpass ,os_type): """ :param vCenterserver: :param username: :param password: :param vm_mor: :param NIC1: :param NIC2: :param os_type: :param hostname: :param adminpass: :return: """ con = vs_connect(vCenterserver, username, password, unverify=True) request = VI.CustomizeVM_TaskRequestMsg() _this = request.new__this(vm_mor) _this.set_attribute_type(vm_mor.get_attribute_type()) request.set_element__this(_this) spec = request.new_spec() if os_type=="LINUX": identity = VI.ns0.CustomizationLinuxPrep_Def("identity").pyclass() identity.set_element_domain("domain name") hostName = VI.ns0.CustomizationFixedName_Def("hostName").pyclass() hostName.set_element_name(hostname) identity.set_element_hostName(hostName) spec.set_element_identity(identity) request.set_element_spec(spec) # TODO configure root password for linux os if os_type == "WIN": # customization = spec.new_customization() # spec.set_element_customization(customization) # globalIPSettings = customization.new_globalIPSettings() # customization.set_element_globalIPSettings(globalIPSettings) identity = VI.ns0.CustomizationSysprep_Def("identity").pyclass() spec.set_element_identity(identity) guiUnattended = identity.new_guiUnattended() guiUnattended.set_element_autoLogon(True) guiUnattended.set_element_autoLogonCount(1) if adminpass: passw = guiUnattended.new_password() guiUnattended.set_element_password(passw) passw.set_element_value(adminpass) passw.set_element_plainText(True) # http://msdn.microsoft.com/en-us/library/ms912391(v=winembedded.11).aspx guiUnattended.set_element_timeZone(85) # GMT Standard Time identity.set_element_guiUnattended(guiUnattended) userData = identity.new_userData() userData.set_element_fullName("PySphere") userData.set_element_orgName("PySphere") userData.set_element_productId("") computerName = VI.ns0.CustomizationFixedName_Def(hostname).pyclass() computerName.set_element_name(hostname.replace("_", "")) userData.set_element_computerName( computerName ) identity.set_element_userData(userData) identification = identity.new_identification() # TODO JOIN DOAMIN # identification.set_element_domainAdmin("MyDomainAdminUser") # domainAdminPassword = identification.new_domainAdminPassword() # domainAdminPassword.set_element_plainText(True) # domainAdminPassword.set_element_value("MyDomainAdminPassword") # identification.set_element_domainAdminPassword(domainAdminPassword) # identification.set_element_joinDomain("MyDomain") identity.set_element_identification(identification) globalIPSettings = spec.new_globalIPSettings() spec.set_element_globalIPSettings(globalIPSettings) if NIC1 and NIC2: nicSetting1 = spec.new_nicSettingMap() nicSetting2 = spec.new_nicSettingMap() spec.set_element_nicSettingMap([ getnicSetting(nicSetting1,NIC1), getnicSetting(nicSetting2,NIC2)]) elif NIC1: nicSetting1 = spec.new_nicSettingMap() spec.set_element_nicSettingMap([getnicSetting(nicSetting1, NIC1), ]) request.set_element_spec(spec) task = con._proxy.CustomizeVM_Task(request)._returnval vi_task = VITask(task, con) status = vi_task.wait_for_state([vi_task.STATE_SUCCESS, vi_task.STATE_ERROR]) return status def getnicSetting(nicSetting,NIC): adapter = nicSetting.new_adapter() if NIC['IP_SETTING'] == "FIXED": fixedip = VI.ns0.CustomizationFixedIp_Def("ipAddress").pyclass() fixedip.set_element_ipAddress(NIC['ip_address']) adapter.set_element_ip(fixedip) adapter.set_element_subnetMask(NIC['netmask']) if NIC['gateway']: adapter.set_element_gateway([NIC['gateway']]) if NIC['IP_SETTING']== "DHCP": dhcpip = VI.ns0.CustomizationDhcpIpGenerator_Def("ipAddress").pyclass() adapter.set_element_ip(dhcpip) nicSetting.set_element_adapter(adapter) return nicSetting
python
import numpy as np from ss_generator import geometry def get_internal_coordinates_from_ca_list(ca_list): '''Get the list of ds, thetas and taus from a ca list.''' ds = [] thetas = [] taus = [] for i in range(len(ca_list) - 1): ds.append(np.linalg.norm(ca_list[i + 1] - ca_list[i])) for i in range(1, len(ca_list) - 1): thetas.append(geometry.angle(ca_list[i - 1] - ca_list[i], ca_list[i + 1] - ca_list[i])) for i in range(1, len(ca_list) - 2): taus.append(geometry.dihedral(ca_list[i - 1], ca_list[i], ca_list[i + 1], ca_list[i + 2])) return ds, thetas, taus def generate_segment_from_internal_coordinates(ds, thetas, taus): '''Generate a protein segment from a set of internal coordinates. Return a list of Ca coordinates. ''' # Make sure that the sizes of internal coordinates are correct if len(ds) < 3 or len(thetas) < 2 or len(taus) < 1 \ or len(ds) != len(thetas) + 1 or len(ds) != len(taus) + 2: raise Exception("Incompatible sizes of internal coordinates.") # Make the first three Ca atoms ca_list = [] ca_list.append(ds[0] * np.array([np.sin(thetas[0]),np.cos(thetas[0]), 0])) ca_list.append(np.array([0, 0, 0])) ca_list.append(np.array([0, ds[1], 0])) # Make the rest of Ca atoms for i in range(len(taus)): ca_list.append(geometry.cartesian_coord_from_internal_coord( ca_list[i], ca_list[i + 1], ca_list[i + 2], ds[i + 2], thetas[i + 1], taus[i])) return ca_list def get_peptide_bond_parameters(): '''Print peptide parameters.''' d = {'c_n_length' : 1.32869, 'n_ca_length' : 1.458, 'ca_c_length' : 1.52326, 'c_n_ca_angle' : np.radians(121.7), 'n_ca_c_angle' : np.radians(111.2), 'ca_c_n_angle' : np.radians(116.2), 'omega' : np.radians(180)} p1 = np.array([0, 0, 0]) p2 = np.array([0, 0, d['ca_c_length']]) p3 = p2 + d['c_n_length'] * np.array([ np.sin(d['ca_c_n_angle']), 0, -np.cos(d['ca_c_n_angle'])]) p4 = geometry.cartesian_coord_from_internal_coord( p1, p2, p3, d['n_ca_length'], d['n_ca_c_angle'], d['omega']) d['theta_c'] = geometry.angle(p4 - p1, p2 - p1) d['theta_n'] = geometry.angle(p1 - p4, p3 - p4) return d def get_n_for_pp_bond_forward(ca1, ca2, v_c): '''Get the coordinate of the N atom in a peptide bond. Inputs are the two ends of the peptide bond and the direction from ca1 to the position of C. ''' params = get_peptide_bond_parameters() x = geometry.normalize(ca1 - ca2) y = -geometry.normalize(v_c - np.dot(v_c, x) * x) return ca2 + params['n_ca_length'] * (np.cos(params['theta_n']) * x \ + np.sin(params['theta_n']) * y) def get_c_for_pp_bond_forward(ca1, ca2, v_n, z_sign=1): '''Get the coordinate of the C atom in a peptide bond. Inputs are the two ends of the peptide bond, the direction from ca1 to the position of the previous N and the sign of Z direction that is used to pick one solution from two. ''' params = get_peptide_bond_parameters() frame = geometry.create_frame_from_three_points(ca1 + v_n, ca1, ca2) beta = geometry.angle(v_n, ca2 - ca1) gamma = z_sign * np.arccos((np.cos(params['n_ca_c_angle']) - np.cos(params['theta_c']) * np.cos(beta)) \ / (np.sin(params['theta_c']) * np.sin(beta))) c_local = params['ca_c_length'] * np.array([np.sin(params['theta_c']) * np.cos(gamma), np.cos(params['theta_c']), np.sin(params['theta_c']) * np.sin(gamma)]) return ca1 + np.dot(np.transpose(frame), c_local) def get_o_for_peptide_bond(c, n, ca2): '''Get the coordinate of the O atom in a peptide bond.''' return geometry.cartesian_coord_from_internal_coord(ca2, n, c, 1.24, np.radians(125), 0) def thread_ca_list_forward(ca_list, initial_c_direction, z_sign=1): '''Thread backbones through a ca list. Return a list for residue dictionaries. ''' params = get_peptide_bond_parameters() # Make the initial residue residue_list = [{'ca' : ca_list[0], 'c' : ca_list[0] + params['ca_c_length'] * geometry.normalize(initial_c_direction)}] # Make the rest of residues for i in range(1, len(ca_list)): residue = {'ca' : ca_list[i]} v_c = residue_list[i - 1]['c'] - residue_list[i - 1]['ca'] residue['n'] = get_n_for_pp_bond_forward(ca_list[i - 1], ca_list[i], v_c) if i < len(ca_list) - 1: residue['c'] = get_c_for_pp_bond_forward(ca_list[i], ca_list[i + 1], residue['n'] - residue['ca'], z_sign=z_sign) residue['o'] = get_o_for_peptide_bond(residue_list[i - 1]['c'], residue['n'], residue['ca']) residue_list.append(residue) return residue_list
python
from .base_api import BaseApi class CatalogApi(BaseApi): def _build_url(self, endpoint): catalog_endpoint = "/api/catalog_system" return self.base_url + catalog_endpoint + endpoint def get_category(self, category_id=1): endpoint = f"/pvt/category/{category_id}" return self._call_api(endpoint) def get_category_tree(self, level=3): endpoint = f"/pub/category/tree/{level}/" return self._call_api(endpoint) def get_brand(self, brand_id: int): endpoint = f"/pvt/brand/{brand_id}" return self._call_api(endpoint) def get_product_specification(self, product_id: int): endpoint = f"/pvt/products/{product_id}/specification" return self._call_api(endpoint) def get_product(self, product_id: int): endpoint = f"/pvt/products/ProductGet/{product_id}" return self._call_api(endpoint) def get_product_variations(self, product_id: int): endpoint = f"/pub/products/variations/{product_id}" return self._call_api(endpoint) def get_product_review_rate(self, product_id: int): # This one has an odd endpoint endpoint = f"/api/addon/pvt/review/GetProductRate/{product_id}" url = self.base_url + endpoint return self.get_result(url) def get_list_all_skus(self, page=1, page_size=1000): endpoint = f"/pvt/sku/stockkeepingunitids?page={page}&pagesize={page_size}" return self._call_api(endpoint) def get_sku(self, sku_id): endpoint = f"/pvt/sku/stockkeepingunitbyid/{sku_id}" return self._call_api(endpoint) def get_sales_channel(self): endpoint = f"/pvt/saleschannel/list" return self._call_api(endpoint) def get_sales_channel_by_id(self, sales_channel_id=1): endpoint = f"/pub/saleschannel/{sales_channel_id}" return self._call_api(endpoint) def get_seller_by_id(self, seller_id=1): endpoint = f"/pvt/seller/{seller_id}" return self._call_api(endpoint)
python
import cv2 import numpy as np import matplotlib.pyplot as plt import os vid=cv2.VideoCapture('/Users/lazycoder/Desktop/IEEE/video.mp4') #img=cv2.imread('/Users/lazycoder/Desktop/IEEE/Screenshot 2020-11-06 at 7.50.01 PM.png') wht = 320 classFile = '/Users/lazycoder/Desktop/IEEE/coco.names.txt' classNames = [] confThreshold = 0.5 nmsThreshold = 0.3 # the more less it is, the more powerfull nms becomes with open(classFile,'rt') as f: classNames = f.read().rstrip('\n').split('\n') modelConfiguration = '/Users/lazycoder/Desktop/IEEE/YOLO/yolov3.cfg' modelWeights = '/Users/lazycoder/Desktop/IEEE/YOLO/yolov3.weights' net = cv2.dnn.readNetFromDarknet(modelConfiguration,modelWeights) net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) def findobjects(outputs,img): hT, wT, cT = img.shape bbox = [] #will contain x,y,w &h classIds = [] confs = [] for outputs in outputs: for det in outputs: #we will call each box as a detection. scores = det[5:] #removing top 5 outputs classId = np.argmax(scores) confidence = scores[classId] if confidence > confThreshold: if classNames[classId]=="person": w,h = int(det[2]*wT) , int(det[3]*hT) #mutiplying as det[2] and so are in %. x,y = int((det[0]*wT)- w/2), int((det[1]*hT)- h/2) bbox.append([x,y,w,h]) classIds.append(classId) confs.append(float(confidence)) indices = cv2.dnn.NMSBoxes(bbox,confs,confThreshold,nmsThreshold) for i in indices: i = i[0] box = bbox[i] x,y,w,h = box[0], box[1], box[2], box[3] cv2.rectangle(img,(x,y),(x+w,y+h), (0,255,0),2) cv2.circle(img, (int(x+w/2), int(y+h/2)), 2, (0, 0, 255), 2) #locating center of each pedestrian total.append([x,y,w,h]) i=len(indices) while i>0: j=len(indices) #safe_count=0 #risk_count=0 while j>i: #print(data[i-1],data[j-1]) box1=bbox[indices[i-1][0]] x1,y1,w1,h1 = box1[0], box1[1], box1[2], box1[3] box2=bbox[indices[j-1][0]] x2,y2,w2,h2 = box2[0], box2[1], box2[2], box2[3] #distance formula: if(((int(x1+w1/2)-int(x2+w2/2))**2+(int(y1+h1/2)-int(y2+h2/2))**2)**1/2 < (x1+w1)*4): cv2.line(img,(int(x1+w1/2), int(y1+h1/2)),(int(x2+w2/2), int(y2+h2/2)),(255,0,0),1) cv2.rectangle(img,(x1,y1),(x1+w1,y1+h1), (0,0,255),2) cv2.rectangle(img,(x2,y2),(x2+w2,y2+h2), (0,0,255),2) red.append([x1,y1,w1,h1]) red.append([x2,y2,w2,h2]) j=j-1 i=i-1 def func(pct, allvalues): absolute = int(pct / 100.*np.sum(allvalues)) return "{:.1f}%\n({:d} g)".format(pct, absolute) while True: success, img = vid.read() blob = cv2.dnn.blobFromImage(img,1/255,(wht,wht),[0,0,0],1,crop=False) net.setInput(blob) layerNames = net.getLayerNames() outputNames=[layerNames[i[0]-1] for i in net.getUnconnectedOutLayers()] outputs = net.forward(outputNames) hT, wT, cT = img.shape red=[] total=[] green=[] findobjects(outputs, img) unique_data = [list(x) for x in set(tuple(x) for x in red)] RS = ['Risk Count','Safe Count'] data = [len(unique_data), len(total)-len(unique_data)] explode = (0.1, 0.3) colors = ("Red","Green") wp = { 'linewidth' : 1, 'edgecolor' : "Brown" } fig, ax = plt.subplots(figsize =(10, 7)) wedges, texts, autotexts=ax.pie(data, autopct = lambda pct: func(pct, data), explode = explode, labels = RS, shadow = True, colors = colors, startangle = 90, wedgeprops = wp, textprops = dict(color ="black")) ax.legend(wedges, RS,title ="Count",loc ="center left", bbox_to_anchor =(1, 0)) plt.setp(autotexts, size = 8, weight ="bold") ax.set_title("Social Distancing Monitor") plt.savefig('plot') pplot=cv2.imread('plot.png') cv2.putText(pplot,"Risk Count: {}".format(str(len(unique_data))),(20,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,250),2) cv2.putText(pplot,"Safe Count: {}".format(len(total)-len(unique_data)),(450,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,250,0),2) cv2.imshow('Social Distancing Monitor',pplot) cv2.imshow('Monitor',img) plt.close() #avoid memory leak os.remove('plot.png') if cv2.waitKey(1) & 0xFF ==ord('q'): break
python
# Copyright (C) 2020 FUJITSU # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock from tacker.common import exceptions from tacker.tests.unit import base from tacker.tests.unit import fake_request from tacker.tests.unit.vnfm.infra_drivers.kubernetes import fakes from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs class TestTransformer(base.TestCase): def setUp(self): super(TestTransformer, self).setUp() self.yaml_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "kubernetes_api_resource/") self.k8s_client_dict = fakes.fake_k8s_client_dict() self.transfromer = translate_outputs.Transformer( None, None, None, self.k8s_client_dict ) def test_deploy_k8s_create_false(self): kubernetes_objects = [] k8s_obj = fakes.fake_k8s_dict() kubernetes_objects.append(k8s_obj) self.assertRaises(exceptions.CreateApiFalse, self.transfromer.deploy_k8s, kubernetes_objects) @mock.patch.object(translate_outputs.Transformer, "_select_k8s_client_and_api") def test_deploy_k8s(self, mock_k8s_client_and_api): req = \ fake_request.HTTPRequest.blank( 'apis/apps/v1/namespaces/curryns/deployments') mock_k8s_client_and_api.return_value = req kubernetes_objects = [] k8s_obj = fakes.fake_k8s_dict() kubernetes_objects.append(k8s_obj) new_k8s_objs = self.transfromer.deploy_k8s(kubernetes_objects) self.assertEqual(type(new_k8s_objs), list) self.assertIsNotNone(new_k8s_objs) self.assertEqual(new_k8s_objs[0]['status'], 'Creating') def test_deployment(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['deployment.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'Deployment') self.assertEqual(k8s_objs[0].get('object').api_version, 'apps/v1') def test_api_service(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['api-service.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'APIService') self.assertEqual(k8s_objs[0].get('object').api_version, 'apiregistration.k8s.io/v1') def test_cluster_role(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['cluster-role.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'ClusterRole') self.assertEqual(k8s_objs[0].get('object').api_version, 'rbac.authorization.k8s.io/v1') def test_cluster_role_binding(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['cluster-role-binding.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'ClusterRoleBinding') self.assertEqual(k8s_objs[0].get('object').api_version, 'rbac.authorization.k8s.io/v1') def test_config_map(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['config-map.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') self.assertEqual(k8s_objs[0].get('object').kind, 'ConfigMap') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_daemon_set(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['daemon-set.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'DaemonSet') self.assertEqual(k8s_objs[0].get('object').api_version, 'apps/v1') def test_horizontal_pod_autoscaler(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['horizontal-pod-autoscaler.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'default') self.assertEqual(k8s_objs[0].get('object').kind, 'HorizontalPodAutoscaler') self.assertEqual(k8s_objs[0].get('object').api_version, 'autoscaling/v1') def test_job(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['job.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'Job') self.assertEqual(k8s_objs[0].get('object').api_version, 'batch/v1') def test_lease(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['lease.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'default') self.assertEqual(k8s_objs[0].get('object').kind, 'Lease') self.assertEqual(k8s_objs[0].get('object').api_version, 'coordination.k8s.io/v1') def test_local_subject_access_review(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['local-subject-access-review.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns') self.assertEqual(k8s_objs[0].get('object').kind, 'LocalSubjectAccessReview') self.assertEqual(k8s_objs[0].get('object').api_version, 'authorization.k8s.io/v1') def test_namespace(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['namespace.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'Namespace') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_network_policy(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['network-policy.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'NetworkPolicy') self.assertEqual(k8s_objs[0].get('object').api_version, 'networking.k8s.io/v1') def test_node(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['node.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'Node') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_persistent_volume(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['persistent-volume.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'PersistentVolume') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_persistent_volume_claim(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['persistent-volume-claim.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'PersistentVolumeClaim') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_pod(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['pod.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'Pod') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_priority_class(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['priority-class.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'PriorityClass') self.assertEqual(k8s_objs[0].get('object').api_version, 'scheduling.k8s.io/v1') def test_replica_set(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['replica-set.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'ReplicaSet') self.assertEqual(k8s_objs[0].get('object').api_version, 'apps/v1') def test_resource_quota(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['resource-quota.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') self.assertEqual(k8s_objs[0].get('object').kind, 'ResourceQuota') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_role(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['role.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns') self.assertEqual(k8s_objs[0].get('object').kind, 'Role') self.assertEqual(k8s_objs[0].get('object').api_version, 'rbac.authorization.k8s.io/v1') def test_role_binding(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['role-bindings.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns') self.assertEqual(k8s_objs[0].get('object').kind, 'RoleBinding') self.assertEqual(k8s_objs[0].get('object').api_version, 'rbac.authorization.k8s.io/v1') def test_secret(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['secret.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'default') self.assertEqual(k8s_objs[0].get('object').kind, 'Secret') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_self_subject_access_review(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['self-subject-access-review.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'SelfSubjectAccessReview') self.assertEqual(k8s_objs[0].get('object').api_version, 'authorization.k8s.io/v1') def test_self_subject_rules_review(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['self-subject-rule-review.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'SelfSubjectRulesReview') self.assertEqual(k8s_objs[0].get('object').api_version, 'authorization.k8s.io/v1') def test_service(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['service.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'default') self.assertEqual(k8s_objs[0].get('object').kind, 'Service') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_service_account(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['service-account.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'default') self.assertEqual(k8s_objs[0].get('object').kind, 'ServiceAccount') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_stateful_set(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['stateful-set.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'StatefulSet') self.assertEqual(k8s_objs[0].get('object').api_version, 'apps/v1') def test_storage_class(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['storage-class.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'StorageClass') self.assertEqual(k8s_objs[0].get('object').api_version, 'storage.k8s.io/v1') def test_subject_access_review(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['subject-access-review.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'SubjectAccessReview') self.assertEqual(k8s_objs[0].get('object').api_version, 'authorization.k8s.io/v1') def test_token_review(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['token-review.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), '') self.assertEqual(k8s_objs[0].get('object').kind, 'TokenReview') self.assertEqual(k8s_objs[0].get('object').api_version, 'authentication.k8s.io/v1') def test_limit_range(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['limit-range.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') self.assertEqual(k8s_objs[0].get('object').kind, 'LimitRange') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_pod_template(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['pod-template.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') self.assertEqual(k8s_objs[0].get('object').kind, 'PodTemplate') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_volume_attachment(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['volume-attachment.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') self.assertEqual(k8s_objs[0].get('object').kind, 'VolumeAttachment') self.assertEqual(k8s_objs[0].get('object').api_version, 'storage.k8s.io/v1') def test_bindings(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['bindings.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') self.assertEqual(k8s_objs[0].get('object').kind, 'Binding') self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_controller_revision(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['controller-revision.yaml'], self.yaml_path ) self.assertIsNotNone(k8s_objs[0].get('object')) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') self.assertEqual(k8s_objs[0].get('object').kind, 'ControllerRevision') self.assertEqual(k8s_objs[0].get('object').api_version, 'apps/v1')
python
"""2020 - Day 3 Part 1: Toboggan Trajectory.""" from textwrap import dedent import pytest from src.year2020.day03a import solve @pytest.mark.parametrize( "task,expected", [ ( dedent( """ ..##....... #...#...#.. .#....#..#. ..#.#...#.# .#...##..#. ..#.##..... .#.#.#....# .#........# #.##...#... #...##....# .#..#...#.# """ ), 7, ), ], ) def test_solve(task, expected): assert solve(task) == expected
python
#!/bin/python3 import math import os import random import re import sys # Complete the rotLeft function below. def rotLeft(a, d): print(a) newArray = [ None for i in range(0, len(a)) ] #a is array of integers #d is #rotations for i in range(len(a)-1,-1,-1): newIndex = (i-d) % len(a) newArray[newIndex] = a[i] #print(newArray) return newArray if __name__ == '__main__': a = [1,2,3,4,5] d = 4 result = rotLeft(a, d) print(result)
python
import httpx from django.conf import settings def hcaptcha_verified(request): if settings.HCAPTCHA_ENABLED: if request.method == "POST": if request.POST.get("h-captcha-response"): # check hCaptcha h_captcha_response = request.POST.get("h-captcha-response") data = {"secret": settings.HCAPTCHA_SECRET_KEY, "response": h_captcha_response} r = httpx.post(settings.HCAPTCHA_VERIFY_URL, data=data) result = r.json() if result["success"]: return True else: return False else: return False else: return False else: return True
python
# -*- coding: utf-8 -*- import importlib import os import subprocess import sys import pip import pkg_resources import pytest from django.core.management import call_command from django.test import TestCase from io import StringIO from pip._internal.exceptions import InstallationError class PipCheckerTests(TestCase): def test_pipchecker_when_requirements_file_does_not_exist(self): with self.assertRaises(InstallationError): call_command('pipchecker', '-r', 'not_exist.txt') def test_pipchecker_with_not_installed_requirement(self): requirements_path = './requirements.txt' out = StringIO() f = open(requirements_path, 'wt') f.write('not-installed==1.0.0') f.close() call_command('pipchecker', '-r', requirements_path, stdout=out) value = out.getvalue() subprocess.call([sys.executable, '-m', 'pip', 'uninstall', '--yes', '-r', requirements_path]) os.remove(requirements_path) self.assertTrue(value.endswith('not installed\n')) def test_pipchecker_with_outdated_requirement(self): requirements_path = './requirements.txt' out = StringIO() f = open(requirements_path, 'wt') f.write('djangorestframework==3.0.0') f.close() subprocess.call([sys.executable, '-m', 'pip', 'install', '-r', requirements_path]) pip._vendor.pkg_resources = importlib.reload(pip._vendor.pkg_resources) call_command('pipchecker', '-r', requirements_path, stdout=out) value = out.getvalue() subprocess.call([sys.executable, '-m', 'pip', 'uninstall', '--yes', '-r', requirements_path]) os.remove(requirements_path) self.assertTrue(value.endswith('available\n')) @pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher") def test_pipchecker_with_up_to_date_requirement(self): requirements_path = './requirements.txt' out = StringIO() f = open(requirements_path, 'wt') f.write('djangorestframework') f.close() subprocess.call([sys.executable, '-m', 'pip', 'install', '-r', requirements_path]) pip._vendor.pkg_resources = importlib.reload(pip._vendor.pkg_resources) call_command('pipchecker', '-r', requirements_path, stdout=out) value = out.getvalue() subprocess.call([sys.executable, '-m', 'pip', 'uninstall', '--yes', '-r', requirements_path]) os.remove(requirements_path) self.assertEqual(value, '') def test_pipchecker_with_github_url_requirement(self): requirements_path = './requirements.txt' out = StringIO() f = open(requirements_path, 'wt') f.write('git+https://github.com/jmrivas86/django-json-widget') f.close() subprocess.call([sys.executable, '-m', 'pip', 'install', 'django-json-widget']) pip._vendor.pkg_resources = importlib.reload(pip._vendor.pkg_resources) call_command('pipchecker', '-r', requirements_path, stdout=out) value = out.getvalue() subprocess.call([sys.executable, '-m', 'pip', 'uninstall', '--yes', '-r', requirements_path]) os.remove(requirements_path) self.assertTrue(value.endswith('repo is not frozen\n'), value) def test_pipchecker_with_outdated_requirement_on_pip20_1(self): subprocess.call([sys.executable, '-m', 'pip', 'install', '-U', 'pip==20.1']) importlib.reload(pip) requirements_path = './requirements.txt' out = StringIO() f = open(requirements_path, 'wt') f.write('djangorestframework==3.0.0') f.close() subprocess.call([sys.executable, '-m', 'pip', 'install', '-r', requirements_path]) importlib.reload(pkg_resources) call_command('pipchecker', '-r', requirements_path, stdout=out) value = out.getvalue() subprocess.call([sys.executable, '-m', 'pip', 'uninstall', '--yes', '-r', requirements_path]) os.remove(requirements_path) self.assertTrue(value.endswith('available\n')) def test_pipchecker_with_long_up_to_date_requirements(self): requirements_path = './requirements.txt' out = StringIO() f = open(requirements_path, 'wt') f.write('appdirs') f.write('asgiref') f.write('attrs') f.write('black') f.write('certifi') f.write('chardet') f.write('click') f.write('distlib') f.write('Django') f.write('django-cors-headers') f.write('django-debug-toolbar') f.write('djangorestframework') f.write('filelock') f.write('idna') f.write('iniconfig') f.write('mypy-extensions') f.write('packaging') f.write('pathspec') f.write('Pillow') f.write('pluggy') f.write('psycopg2-binary') f.write('py') f.write('pyparsing') f.write('pytest') f.write('pytz') f.write('regex') f.write('requests') f.write('sentry-sdk') f.write('shortuuid') f.write('six') f.write('sqlparse') f.write('toml') f.write('typed-ast') f.write('typing-extensions') f.write('urllib3') f.write('whitenoise') f.write('zipp') subprocess.call([sys.executable, '-m', 'pip', 'install', '-r', requirements_path]) pip._vendor.pkg_resources = importlib.reload(pip._vendor.pkg_resources) call_command('pipchecker', '-r', requirements_path, stdout=out) value = out.getvalue() subprocess.call([sys.executable, '-m', 'pip', 'uninstall', '--yes', '-r', requirements_path]) os.remove(requirements_path) self.assertTrue(value.endswith("Retrying in 60 seconds!") or value == '')
python
#! /usr/bin/env python from bs4 import BeautifulSoup from modules.utils import settings class AhgoraScrapper(object): __source = "" __scrapper = None __table = None def __init__(self, source=""): self.__source = source self.__scrapper = BeautifulSoup(self.__source) def appointments_table(self): if self.__table is None: self.__table = self.__scrapper.find_all("table")[1] #Its the second return self.__table def appointment_rows(self): rows = [] for row in self.appointments_table().find_all("tr")[2:]: cols = row.find_all("td") date = cols[0].text.strip() appointments = cols[2].text.split(", ") if cols[2].text != "" else [] i = 0 while i < (settings.MAX_APPOINTMENTS - len(appointments)): appointments.append("-") rows.append({"date":date, "appointments":appointments}) return rows
python
# first find percentages per_men = (heart_df.sex.value_counts()[1])/(heart_df.sex.value_counts()[0]+heart_df.sex.value_counts()[1]) per_wom = (heart_df.sex.value_counts()[0])/(heart_df.sex.value_counts()[0]+heart_df.sex.value_counts()[1]) per_men, per_wom labels = 'Men', 'Women' explode = (0, 0.1) # only "explode" the 2nd slice sizes = [per_men, per_wom] # First and last time I will use a pie chart!! fig1, ax1 = plt.subplots() ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show()
python
import tensorflow as tf from keras.models import Model from keras.layers import Input, Dense #from keras.utils import to_categorical from keras import backend as K from keras import metrics, optimizers, applications, callbacks from keras.callbacks import ModelCheckpoint from keras.callbacks import LearningRateScheduler import numpy as np from wx_hyperparam import WxHyperParameter import xgboost as xgb __author__ = 'Sungsoo Park' #set default global hyper paramerters wx_hyperparam = WxHyperParameter(learning_ratio=0.001) def cw_ann_reg_model(x_train, y_train, x_val, y_val, hyper_param=wx_hyperparam, hidden_layer_size=128): input_dim = len(x_train[0]) inputs = Input((input_dim,)) hidden = Dense(hidden_layer_size)(inputs) fc_out = Dense(1)(hidden) model = Model(input=inputs, output=fc_out) #build a optimizer sgd = optimizers.SGD(lr=hyper_param.learning_ratio, decay=hyper_param.weight_decay, momentum=hyper_param.momentum, nesterov=True) model.compile(loss='mean_squared_error', optimizer=sgd, metrics=[metrics.mse]) #call backs def step_decay(epoch): exp_num = int(epoch/10)+1 return float(hyper_param.learning_ratio/(10 ** exp_num)) best_model_path="./slp_cw_ann_weights_best"+".hdf5" save_best_model = ModelCheckpoint(best_model_path, monitor="val_loss", verbose=hyper_param.verbose, save_best_only=True, mode='min') change_lr = LearningRateScheduler(step_decay) #run train history = model.fit(x_train, y_train, validation_data=(x_val,y_val), epochs=hyper_param.epochs, batch_size=hyper_param.batch_size, shuffle=True, callbacks=[save_best_model, change_lr], verbose=hyper_param.verbose) #load best model model.load_weights(best_model_path) return model def connection_weight(x_train, y_train, x_val, y_val, n_selection=100, hidden_layer_size=128, hyper_param=wx_hyperparam, num_cls=2): input_dim = len(x_train[0]) # make model and do train model = cw_ann_reg_model(x_train, y_train, x_val, y_val, hyper_param=hyper_param, hidden_layer_size=hidden_layer_size) #load weights weights = model.get_weights() #get feature importance using connection weight algo (Olden 2004) wt_ih = weights[0]#.transpose() #input-hidden weights wt_ho = weights[1]#.transpose() #hidden-out weights dot_wt = wt_ih * wt_ho sum_wt = np.sum(dot_wt,axis=1) selected_idx = np.argsort(sum_wt)[::-1][0:n_selection] selected_weights = sum_wt[selected_idx] #get evaluation acc from best model loss, val_acc = model.evaluate(x_val, y_val) K.clear_session() return selected_idx, selected_weights, val_acc def DoFeatureSelectionConnectionWeight(train_x, train_y, val_x, val_y, test_x, test_y, f_list, hp, n_sel = 14): ITERATION = 5 feature_num = len(f_list) all_weight = np.zeros(feature_num) all_count = np.ones(feature_num) accs = [] for i in range(0, ITERATION): sel_idx, sel_weight, test_acc = connection_weight(train_x, train_y, val_x, val_y, n_selection=min(n_sel*100, feature_num), hyper_param=hp) accs.append(test_acc) for j in range(0,min(n_sel*100, feature_num)): all_weight[sel_idx[j]] += sel_weight[j] all_count[sel_idx[j]] += 1 all_weight = all_weight / all_count sort_index = np.argsort(all_weight)[::-1] sel_index = sort_index[:n_sel]#top n_sel sel_index = np.asarray(sel_index) sel_weight = all_weight[sel_index] gene_names = np.asarray(f_list) sel_genes = gene_names[sel_index] return sel_index, sel_genes, sel_weight, np.mean(accs,axis=0) def DoFeatureSelectionWX(train_x, train_y, val_x, val_y, test_x, test_y, f_list, hp, n_sel = 14, sel_option='top'): ITERATION = 10 feature_num = len(f_list) all_weight = np.zeros(feature_num) all_count = np.ones(feature_num) accs = [] for i in range(0, ITERATION): sel_idx, sel_weight, test_acc = WxSlp(train_x, train_y, val_x, val_y, test_x, test_y, n_selection=min(n_sel*100, feature_num), hyper_param=hp) accs.append(test_acc) for j in range(0,min(n_sel*100, feature_num)): all_weight[sel_idx[j]] += sel_weight[j] all_count[sel_idx[j]] += 1 all_weight = all_weight / all_count sort_index = np.argsort(all_weight)[::-1] if sel_option == 'top': sel_index = sort_index[:n_sel] sel_index = np.asarray(sel_index) sel_weight = all_weight[sel_index] gene_names = np.asarray(f_list) sel_genes = gene_names[sel_index] return sel_index, sel_genes, sel_weight, np.mean(accs,axis=0) # from sklearn.metrics import roc_auc_score def NaiveSLPmodel(x_train, y_train, x_val, y_val, hyper_param=wx_hyperparam): input_dim = len(x_train[0]) inputs = Input((input_dim,)) fc_out = Dense(2, kernel_initializer='zeros', bias_initializer='zeros', activation='softmax')(inputs) model = Model(input=inputs, output=fc_out) #build a optimizer sgd = optimizers.SGD(lr=hyper_param.learning_ratio, decay=hyper_param.weight_decay, momentum=hyper_param.momentum, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) #call backs def step_decay(epoch): exp_num = int(epoch/10)+1 return float(hyper_param.learning_ratio/(10 ** exp_num)) best_model_path="./slp_wx_weights_best"+".hdf5" save_best_model = ModelCheckpoint(best_model_path, monitor="val_loss", verbose=hyper_param.verbose, save_best_only=True, mode='min') change_lr = LearningRateScheduler(step_decay) #run history = model.fit(x_train, y_train, validation_data=(x_val,y_val), epochs=hyper_param.epochs, batch_size=hyper_param.batch_size, shuffle=True, callbacks=[save_best_model, change_lr]) #load best model model.load_weights(best_model_path) return model def WxSlp(x_train, y_train, x_val, y_val, test_x, test_y, n_selection=100, hyper_param=wx_hyperparam, num_cls=2):#suppot 2 class classification only now. sess = tf.Session() K.set_session(sess) input_dim = len(x_train[0]) # make model and do train model = NaiveSLPmodel(x_train, y_train, x_val, y_val, hyper_param=hyper_param) #load weights weights = model.get_weights() #cacul WX scores num_data = {} running_avg={} tot_avg={} Wt = weights[0].transpose() #all weights of model Wb = weights[1].transpose() #all bias of model for i in range(num_cls): tot_avg[i] = np.zeros(input_dim) # avg of input data for each output class num_data[i] = 0. for i in range(len(x_train)): c = y_train[i].argmax() x = x_train[i] tot_avg[c] = tot_avg[c] + x num_data[c] = num_data[c] + 1 for i in range(num_cls): tot_avg[i] = tot_avg[i] / num_data[i] #data input for first class wx_00 = tot_avg[0] * Wt[0]# + Wb[0]# first class input avg * first class weight + first class bias wx_01 = tot_avg[0] * Wt[1]# + Wb[1]# first class input avg * second class weight + second class bias #data input for second class wx_10 = tot_avg[1] * Wt[0]# + Wb[0]# second class input avg * first class weight + first class bias wx_11 = tot_avg[1] * Wt[1]# + Wb[1]# second class input avg * second class weight + second class bias wx_abs = np.zeros(len(wx_00)) for idx, _ in enumerate(wx_00): wx_abs[idx] = np.abs(wx_00[idx] - wx_01[idx]) + np.abs(wx_11[idx] - wx_10[idx]) selected_idx = np.argsort(wx_abs)[::-1][0:n_selection] selected_weights = wx_abs[selected_idx] #get evaluation acc from best model loss, test_acc = model.evaluate(test_x, test_y) K.clear_session() return selected_idx, selected_weights, test_acc
python
# -*- coding: utf-8 -*- import os DEBUG = True # Assumes the app is located in the same directory # where this file resides APP_DIR = os.path.dirname(os.path.abspath(__file__)) def parent_dir(path): '''Return the parent of a directory.''' return os.path.abspath(os.path.join(path, os.pardir)) PROJECT_ROOT = parent_dir(APP_DIR) # In order to deploy to Github pages, you must build the static files to # the project root FREEZER_DESTINATION = PROJECT_ROOT # Since this is a repo page (not a Github user page), # we need to set the BASE_URL to the correct url as per GH Pages' standards FREEZER_BASE_URL = "http://localhost" FREEZER_REMOVE_EXTRA_FILES = False # IMPORTANT: If this is True, all app files # will be deleted when you run the freezer FLATPAGES_MARKDOWN_EXTENSIONS = ['codehilite'] FLATPAGES_ROOT = os.path.join(APP_DIR, '../__pages') FLATPAGES_EXTENSION = '.md'
python
def extractBananas(item): """ Parser for 'Bananas' """ badwords = [ 'iya na kao manga chapters', ] if any([bad in item['tags'] for bad in badwords]): return None vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None tagmap = [ ('isekai joushu chapters', 'Struggling Hard As The Lord Of A Castle In A Different World', 'translated'), ('dungeon harem wn chapters', 'The Dungeon Harem I Built With My Elf Sex Slave', 'translated'), ('erufu seidorei wn', 'The Dungeon Harem I Built With My Elf Sex Slave', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) chp_prefixes = [ ('AARASL', 'An A-ranked Adventurer’s “Slow-living”', 'translated'), ('Isekai Taneuma', 'Isekai Taneuma', 'translated'), ('Gang of Yuusha', 'Gang of Yusha', 'translated'), ('Gang of Yusha', 'Gang of Yusha', 'translated'), ('The Revenge of the Soul Eater', 'Soul Eater of the Rebellion', 'translated'), ('Soul Eater of the Rebellion', 'Soul Eater of the Rebellion', 'translated'), ('Sparta Teikoku ', 'Sparta Teikoku Kenkoku Senki ', 'translated'), ] for prefix, series, tl_type in chp_prefixes: if item['title'].lower().startswith(prefix.lower()): return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
python
import sys sys.path.append(".") import numpy as np import pytest from text_classification import data @pytest.mark.parametrize('texts, preprocessed_texts', [ ('Hello', 'hello'), ('HELLO', 'hello'), ('Hello, world!', 'hello world'), ('Hello, world!', 'hello world') ]) def test_preprocess_texts(texts, preprocessed_texts): assert data.preprocess_texts(texts=[texts]) == [preprocessed_texts] @pytest.mark.parametrize('sequences, padded_sequences', [ ([[1, 2, 3]], [[1, 2, 3]]), ([[1, 2], [1, 2, 3, 4]], [[1, 2, 0, 0], [1, 2, 3, 4]]) ]) def test_pad_sequences(sequences, padded_sequences): assert data.pad_sequences(sequences=sequences).tolist() == padded_sequences
python
# coding: utf-8 from __future__ import print_function import platform import sys import os INTERP = platform.python_implementation() IRONPY = "ironpy" in INTERP.lower() PY2 = sys.version_info[0] == 2 if PY2: sys.dont_write_bytecode = True unicode = unicode else: unicode = str WINDOWS = False if platform.system() == "Windows": WINDOWS = [int(x) for x in platform.version().split(".")] COLORS = not WINDOWS or WINDOWS >= [10, 0, 14393] # 1607 / LTSB-2016 # [ determine runtime environment ] # # setting the following members: # env: top of the python environment # doc: help files and documentation # src: our source code directory # app: ~/.r0c || %appdata%/r0c # log: logfiles and client config class Pod(object): pass EP = Pod() def init_envpaths(): # look for our documentation in PYTHONPATH found = False for env_root in sys.path: doc_rel = "share/doc/r0c/help/" if env_root.endswith("/test/.."): return if env_root.endswith(os.sep + "site-packages"): for n in range(4): dirname = os.path.realpath(env_root + "/" + ("../" * n)) + "/" if os.path.isfile(dirname + doc_rel + "help-topics.md"): EP.env = dirname EP.doc = dirname + doc_rel EP.src = env_root + "/r0c/" found = True break if found: break if found: if WINDOWS: EP.app = os.environ["APPDATA"] + "/r0c/" else: EP.app = os.path.expanduser("~") + "/.r0c/" else: # check if we're running from source tree if os.path.isfile("./docs/help-topics.md"): EP.env = "/" EP.doc = "./docs/" EP.src = "./r0c/" EP.app = "./" else: raise RuntimeError( '\n\n could not find "help-topics.md", your r0c is broken\n' ) # frequently used paths derived from those detected above EP.log = os.path.realpath(EP.app + "/log") # ensure they're all absolute for key in "env doc src app log".split(" "): path = os.path.realpath(getattr(EP, key)) setattr(EP, key, path.rstrip("/\\") + os.sep) # what seems to be the officer problem # raise RuntimeError('\n' + '\n'.join(key + ': ' + getattr(EP, key) for key in 'env src app doc log'.split(' ')) + '\n') init_envpaths()
python
#! /usr/bin/env python # Copyright 2018-2019 Mailgun Technologies Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from gubernator import ratelimit_pb2 as pb import gubernator import argparse if __name__ == "__main__": parser = argparse.ArgumentParser(description='Gubernator CLI') parser.add_argument('--endpoint', '-e', action="store", dest="endpoint", default='127.0.0.1:9090') parser.add_argument('--timeout', '-t', action="store", dest="timeout", default=None) parser.add_argument('--namespace', '-n', action="store", dest="namespace", default="cli_ns") parser.add_argument('--key', '-k', action="store", dest="unique_key", default="cli_key") parser.add_argument('--hits', '-H', action="store", dest="hits", type=int, default=1) parser.add_argument('--duration', '-d', action="store", dest="duration", type=int, default=10000) parser.add_argument('--limit', '-l', action="store", dest="limit", type=int, default=5) opts = parser.parse_args() req = pb.Requests() rate_limit = req.requests.add() rate_limit.algorithm = pb.TOKEN_BUCKET rate_limit.duration = opts.duration rate_limit.limit = opts.limit rate_limit.namespace = opts.namespace rate_limit.unique_key = opts.unique_key rate_limit.hits = opts.hits client = gubernator.V1Client(endpoint=opts.endpoint) resp = client.GetRateLimits(req, timeout=opts.timeout) print(resp)
python
import numpy as np import h5py import scipy.io as sio import cv2 import glob from PIL import Image def calc_scannetv2(data_root,n_class): masks = [] size = (320,240) with open('./datasets/scannet/scannetv2_{}.txt'.format('train')) as f: scans = f.readlines() scans = [x.strip() for x in scans] for scan in scans: ms = glob.glob("{}/{}/label/*.png".format(data_root, scan)) masks.extend(ms) mask_numpy = [] num_images = np.zeros((n_class)) for index in range(len(masks)): mask = np.array(Image.open(masks[index])) mask = cv2.resize(mask, size, interpolation=cv2.INTER_NEAREST) num_images[np.unique(mask)] += 1 mask_numpy.append(mask) mask_numpy = np.array(mask_numpy) counts = np.array(np.unique(mask_numpy, return_counts=True)).T freqs = counts [:,1] / num_images weights = np.median(freqs) / freqs; np.savetxt('./datasets/scannet/scannetv2_weigths.txt',weights) def calc_weigths(dataset,data_root): if dataset == "scannetv2": n_class = 41 calc_scannetv2(data_root,n_class) else: print ("Dataset {} is not implemented".format(dataset)) def main(): data_root = '/usr/data/cvpr_shared/common_datasets/scannet/tasks/scannet_frames_25k' calc_weigths("scannetv2",data_root) if __name__ == '__main__': main()
python
from aoc import AOC aoc = AOC(year=2020, day=15) series = aoc.load().numbers_by_line()[0] seen = {} n = 0 for idx, x in enumerate(series[:-1]): seen[x] = idx last = series[-1] n = len(series) while n < 30_000_000: if last in seen: next = n - 1 - seen[last] else: next = 0 seen[last] = n - 1 last = next n += 1 if n == 2020: aoc.p1(last) aoc.p2(last)
python
""" Configuration loader using 'git-config'. """ import logging from git_pw import utils LOG = logging.getLogger(__name__) # TODO(stephenfin): We should eventually download and store these # automagically DEFAULT_STATES = [ 'new', 'under-review', 'accepted', 'rejected', 'rfc', 'not-applicable', 'changes-requested', 'awaiting-upstream', 'superseded', 'deferred'] class Config(object): def __init__(self): self._git_config = {} def __getattribute__(self, name): # attempt to use any attributes first value = object.__getattribute__(self, name) if value: LOG.debug("Retrieved '{}' setting from cache".format(name)) return value # fallback to reading from git config otherwise value = utils.git_config('pw.{}'.format(name)) if value: LOG.debug("Retrieved '{}' setting from git-config".format(name)) setattr(self, name, value) return value CONF = Config()
python
import threading from json import load from time import time, sleep from classes.logger import Logger from classes.product import Product from webbot import Browser class Site(threading.Thread): def __init__(self, tid, config_filename, headless = False): threading.Thread.__init__(self) self.tid = tid self.start_time = time() self.log = Logger(tid).log self.web = Browser(showWindow=headless) with open(config_filename) as task_file: self.T = load(task_file) with open('config.json') as config_file: self.C = load(config_file) def wait(self, time): self.log('sleeping {} second(s)'.format(time)) sleep(time) def login(self): self.web.go_to('https://catalog.usmint.gov/account-login') self.web.type(self.T["email"] , into='Login') self.web.type(self.T["password"] , into='Password') self.web.click('Sign In') def get_products(self): self.log('getting some products') self.web.go_to(self.T["link"]) def add_to_cart(self): self.log('adding product to cart', 'blue') self.web.click('Add to Bag') # self.wait() def checkout(self): self.log('checking out') while not self.web.exists('Checkout', loose_match=False): self.wait(0.02) self.web.click('Checkout') self.web.click(id="shipping-method") self.web.click('Next Day') self.wait(0.1) # self.web.type(self.T["email"] , into='Login') # self.web.type(self.T["password"] , into='Password') # self.web.click('Checkout as Registered User') self.web.click(id="dwfrm_singleshipping_addressList") self.web.click(self.T["address"]) self.wait(0.2) self.web.click(id="dwfrm_billing_paymentMethods_creditCardList") self.web.click(self.T["card"]) self.web.type(self.T["cvv"] , id="dwfrm_billing_paymentMethods_creditCard_cvn") while not self.web.exists('Continue to Final Review', loose_match=False): self.wait(0.02) self.web.click('Continue to Final Review') # self.wait() def run(self): self.login() self.get_products() self.add_to_cart() self.checkout() self.wait(30) self.log('time to checkout: {} sec'.format(abs(self.start_time-time())), 'green')
python
""" Copyright 2019 Software Reliability Lab, ETH Zurich Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from PIL import Image, ImageDraw import os from shutil import copyfile from core.view import View import numpy as np import copy import pprint import os import shutil from tqdm import tqdm from core.features.handcrafted_feature_functions import compute_centered_vertically_different_views, \ compute_centered_horizontally_different_views, popular_margin_vertical, popular_margin_horizontal, \ popular_aspect_ratio, compute_intersections, inside_screen, compute_similar_alignment_horizontally, \ compute_similar_alignment_vertically, add_raw_coordinates, compute_centered_horizontally, \ compute_centered_vertically, compute_same_dimensions_score # categorize mistakes for evaluation maxNumberOfCandidates = 17 device_width = 360 # 1440 device_height = 512 # 2560 directory = "./dataset/data/dsplus/test/" target_directory = "./dataset/data/ablation_dataset/" downsample = 4 prefix = "dsplus_" def draw_views(views, device_width, device_height, target_name): image = Image.new('RGB', (int(device_width), int(device_height))) draw = ImageDraw.Draw(image) draw.rectangle(((0, 0), (device_width + 1, device_height + 1)), fill="white") for view in views: view.draw_downsampled(draw, downsample) try: image.save(target_name, "PNG") except OSError as e: print("Could not save image: ", target_name, e) def read_views(path): views = [] with open(path, "r") as ins: for line in ins: line = line.replace(" ", "").replace("\n", "") numbers = line.split(",") views.append(View(int(int(numbers[0])), int(int(numbers[1])), int(int(numbers[2])), int(int(numbers[3])))) if len(views) == 0: print(path) return views def create_directory_if_necessary(directory): if not os.path.exists(directory): os.makedirs(directory) def transfer_files(good_views, bad_views, original_views, good_filename, bad_filename, original_file_name, directory, target): create_directory_if_necessary(target) copyfile(directory + good_filename, target + good_filename) draw_views(good_views, device_width, device_height, target + good_filename.split(".txt")[0] + ".png") copyfile(directory + bad_filename, target + bad_filename) draw_views(bad_views, device_width, device_height, target + bad_filename.split(".txt")[0] + ".png") copyfile(directory + original_file_name, target + original_file_name) draw_views(original_views, device_width, device_height, target + original_file_name.split(".txt")[0] + ".png") # check that there are not more than 1 def differing_view(views, bad_views): # watch out for non rico datasets! for i, val in enumerate(views): if not views[i].equal(bad_views[i]): return i return -1 # watch out when changing the order in compute_vector to adapt the indexes... def naming_map(): return {"perserve_inside_screeen": [0, 0], "perserve_intersections": [1, 1], "perserve_margin_0_horizontally": [2, 2], "perserve_margin_horizontally": [3, 10], "perserve_margin_0_vertically": [11, 11], "perserve_margin_vertically": [12, 19], "perserve_aspect_ratio1-0": [20, 20], "perserve_centering_horizontally_one_view": [21, 21], "perserve_centering_horizontally_views": [22, 22], "perserve_centering_vertically_one_view": [23, 23], "perserve_centering_vertically_views": [24, 24], "perserve_similar_dimensions": [25, 25], "perserve_popular_aspect_ratios": [26, 26], } def compute_handcrafted_vector(views): vector = [] vector.append(inside_screen(views, views[0].width, views[0].height)) vector.append(compute_intersections(views)) vector.append(compute_similar_alignment_horizontally(views)) for i in [8, 14, 16, 20, 24, 30, 32, 48]: vector.append(popular_margin_horizontal(views, [i * 2])) vector.append(compute_similar_alignment_vertically(views)) for i in [8, 14, 16, 20, 24, 30, 32, 48]: vector.append(popular_margin_vertical(views, [i * 2])) vector.append(popular_aspect_ratio(views, [1.0 / 1.0])) vector.append(compute_centered_horizontally(views)) vector.append(compute_centered_horizontally_different_views(views)) vector.append(compute_centered_vertically(views)) vector.append(compute_centered_vertically_different_views(views)) vector.append(compute_same_dimensions_score(views)) vector.append( popular_aspect_ratio(views, [9.0 / 16.0, 9.0 / 16.0]) + popular_aspect_ratio(views, [3.0 / 4.0, 4.0 / 3.0])) return vector def compute_vector(views, views_original): vector = [] array1 = compute_handcrafted_vector(views) array_org1 = compute_handcrafted_vector(views_original) vector = (np.asarray(array1) - np.asarray(array_org1)).tolist() return vector mistakes = np.zeros(27) def good_file(bad_name, root_dir): # 16 candidates for i in range(0, maxNumberOfCandidates): name = bad_name.split("-")[0] + "-" + bad_name.split("-")[1] + "-" + bad_name.split("-")[2] + "-" + str( i) + "_1.txt" if os.path.isfile(os.path.join(root_dir, name)): return True, name name = bad_name.split("-")[0] + "-" + bad_name.split("-")[1] + "-" + bad_name.split("-")[2] + "-tr_1.txt" if os.path.isfile(os.path.join(root_dir, name)): return True, name print("Good file does not exist for ", bad_name) return False, "Does not exist" def original_file(filename): return filename.split("-")[0] + "-" + filename.split("-")[2] + "-original.txt" # check which features appear with each other correlations = {} for key in naming_map().keys(): correlations[key] = {} for key1 in naming_map().keys(): correlations[key][key1] = 0 yes = {'yes', 'y', 'ye', ''} no = {'no', 'n'} if os.path.isdir(target_directory): print("Folder already exists on,", target_directory) choice = input("Do you want to delete the existing folder? ").lower() if choice in yes: print("Deleting existing folder") shutil.rmtree(target_directory) elif choice in no: print("Aborting") exit() else: sys.stdout.write("Please respond with 'yes' or 'no'") print("Creating folder on", target_directory) fileList = [s for s in os.listdir(directory) if ("_0.txt" in s)] # and (sum(1 for line in open(os.path.join(directory,s))) == i))] numberOfUniqueSamples = 0 for k, bad_filename in enumerate(tqdm(fileList)): bad_views = read_views(directory + bad_filename) good_filename = good_file(bad_filename, directory)[1] good_views = read_views(directory + good_filename) original_file_name = original_file(bad_filename) original_views = read_views(directory + original_file_name) if os.path.isfile(directory + good_filename): full = np.asarray(compute_vector(bad_views, original_views)) with_distn = np.asarray(compute_vector(good_views, original_views)) res = (full - with_distn) res = abs(res) mistakes = mistakes + res categories = [] for key, indexes in naming_map().items(): # print(key) # print(res[indexes[0]:indexes[1]+1]) # +1 since it is excluding the upper limit # non exclusive property if res[indexes[0]:indexes[1] + 1].sum() != 0: # if we want the exclusive property: -> not a single one is true there if res.sum == res[indexes[0]:indexes[1] + 1].sum(): numberOfUniqueSamples = numberOfUniqueSamples + 1 target = target_directory + "/" + prefix + key + "/" # _directory + "/" + key + "/" transfer_files(good_views, bad_views, original_views, good_filename, bad_filename, original_file_name, directory, target) categories.append(key) for category in categories: for tcategory in categories: correlations[category][tcategory] = correlations[category][tcategory] + 1 # print(fileList) np.set_printoptions(suppress=True) # print(mistakes) correlationsVerbose = copy.deepcopy(correlations) for category in correlations.keys(): print("category", category) for tcategory in correlations.keys(): percentage = -1 if float(correlations[category][category]) > 0: percentage = float(correlations[category][tcategory]) / float(correlations[category][category]) correlationsVerbose[category][tcategory] = '{}, {:.2f}%'.format(correlations[category][tcategory], percentage) pp = pprint.PrettyPrinter(depth=6) pp.pprint(correlationsVerbose) print("numberOfUniqueSamples", numberOfUniqueSamples)
python
# Copyright (c) Yiming Wang # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import torch from fairseq import metrics, options, search from fairseq.data import ConcatDataset from fairseq.tasks import FairseqTask, register_task from espresso.data import ( AsrDictionary, AsrTextDataset, ScpCachedDataset, SpeechDataset, ) logger = logging.getLogger(__name__) @register_task('speech_recognition_espresso') class SpeechRecognitionEspressoTask(FairseqTask): """ Transcribe from speech (source) to token text (target). Args: dictionary (~fairseq.data.AsrDictionary): dictionary for the output tokens word_dict (~fairseq.data.AsrDictionary): dictionary for the words (for decoding with word-based LMs) feat_in_channels (int): input feature channels .. note:: The speech recognition task is compatible with :mod:`speech-train`, :mod:`speech-recognize` and :mod:`fairseq-interactive`. The speech recognition task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.speech_recognition_parser :prog: """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off parser.add_argument('--train-feat-files', nargs='+', help='path(s) to scp feature file(s) for training, ' 'will be iterated upon during epochs in round-robin manner') parser.add_argument('--train-text-files', nargs='+', help='path(s) to text file(s) for training, where ' 'each should matches with one in --train-feat-files, ' 'will be iterated upon during epochs in round-robin manner') parser.add_argument('--valid-feat-files', nargs='+', help='path(s) to scp feature file(s) for validation') parser.add_argument('--valid-text-files', nargs='+', help='path(s) to text file(s) for validation, where ' 'each should matches with one in --valid-feat-files') parser.add_argument('--test-feat-files', nargs='+', help='path(s) to scp feature file(s) for test') parser.add_argument('--test-text-files', nargs='*', default=None, help='path(s) to text file(s) for test. if not None, ' 'each one should matches with one in --test-feat-files') parser.add_argument('--train-subset-feat-files', nargs='+', help='path(s) to scp feature file(s) for validation') parser.add_argument('--train-subset-text-files', nargs='+', help='path(s) to text file(s) for validation, where ' 'each should matches with one in --train-subset-feat-files') parser.add_argument('--dict', default=None, type=str, help='path to the dictionary') parser.add_argument('--non-lang-syms', default=None, type=str, help='path to a file listing non-linguistic symbols, e.g., <NOISE> ' 'etc. One entry per line. To be filtered out when calculating WER/CER.') parser.add_argument('--word-dict', default=None, type=str, help='path to the word dictionary. Only relevant for decoding') parser.add_argument('--wer-output-filter', default=None, type=str, help='path to wer_output_filter file for WER evaluation') parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL', help='pad the source on the left') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left') parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of frames in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') parser.add_argument('--feat-in-channels', default=1, type=int, metavar='N', help='feature input channels') # fmt: off @classmethod def load_dictionary(cls, filename, non_lang_syms=None): """Load the dictionary from the filename Args: filename (str): the filename non_lang_syms (str): non_lang_syms filename """ return AsrDictionary.load(filename, f_non_lang_syms=non_lang_syms) @classmethod def build_dictionary(cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8): """Disable this method """ raise NotImplementedError def __init__(self, args, dictionary, word_dict=None): super().__init__(args) self.dictionary = dictionary self.word_dict = word_dict self.feat_in_channels = args.feat_in_channels torch.backends.cudnn.deterministic = True # Compansate for the removel of :func:`torch.rand()` from # :func:`fairseq.distributed_utils.distributed_init()` by fairseq, # to make previous experiments reproducible. torch.rand(1) @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) # load dictionaries dict_path = os.path.join(os.path.dirname(args.train_text_files[0]), 'dict.txt') \ if args.dict is None and args.train_text_files is not None else args.dict assert dict_path is not None, 'Please specify --dict' dictionary = cls.load_dictionary(dict_path, non_lang_syms=args.non_lang_syms) logger.info('dictionary: {} types'.format(len(dictionary))) if args.word_dict is not None: word_dict = cls.load_dictionary(args.word_dict) logger.info('word dictionary: {} types'.format(len(word_dict))) return cls(args, dictionary, word_dict) else: return cls(args, dictionary) def load_dataset(self, split, epoch=0, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ src_datasets = [] tgt_datasets = [] if split == 'train': feat_files = self.args.train_feat_files text_files = self.args.train_text_files assert len(feat_files) > 0 and len(feat_files) == len(text_files) feat_files = [feat_files[epoch % len(feat_files)]] text_files = [text_files[epoch % len(text_files)]] elif split == 'valid': feat_files = self.args.valid_feat_files text_files = self.args.valid_text_files elif split == 'test': feat_files = self.args.test_feat_files text_files = self.args.test_text_files # can be empty if text_files is None: text_files = [None] * len(feat_files) elif split == 'train_subset': feat_files = self.args.train_subset_feat_files text_files = self.args.train_subset_text_files else: raise ValueError('split should be one of "train", "valid", "test", "train_subset"') assert len(feat_files) > 0 and len(feat_files) == len(text_files) file_pairs = zip(feat_files, text_files) for feat, text in file_pairs: assert ScpCachedDataset.exists(feat), feat + ' does not exists' assert text is None or AsrTextDataset.exists(text), text + ' does not exists' src_datasets.append(ScpCachedDataset(feat, ordered_prefetch=True)) logger.info('{} {} examples'.format(feat, len(src_datasets[-1]))) if text is not None: tgt_datasets.append(AsrTextDataset(text, self.dictionary)) logger.info('{} {} examples'.format(text, len(tgt_datasets[-1]))) if not combine: break if len(tgt_datasets) > 0: assert len(src_datasets) == len(tgt_datasets) self.feat_dim = src_datasets[0].feat_dim if len(src_datasets) == 1: src_dataset = src_datasets[0] tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None else: for i in range(1, len(src_datasets)): assert self.feat_dim == src_datasets[i].feat_dim, \ 'feature dimension does not match across multiple scp files' sample_ratios = [1] * len(src_datasets) sample_ratios[0] = self.args.upsample_primary src_dataset = ConcatDataset(src_datasets, sample_ratios) tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios) \ if len(tgt_datasets) > 0 else None self.datasets[split] = SpeechDataset( src_dataset, src_dataset.sizes, tgt_dataset, tgt_dataset.sizes if tgt_dataset is not None else None, self.dictionary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, ) # update the counts of <eos> and <unk> in dictionary with training data if split == 'train': self.dictionary.count[self.dictionary.eos()] = len(tgt_dataset) unk_count = 0 for i in range(len(tgt_dataset)): unk_count += (tgt_dataset[i][0] == self.dictionary.unk()).int().sum().item() self.dictionary.count[self.dictionary.unk()] = unk_count def build_generator(self, args): if args.score_reference: args.score_reference = False logger.warning( '--score-reference is not applicable to speech recognition, ignoring it.' ) from fairseq.sequence_generator import SequenceGenerator # Choose search strategy. Defaults to Beam Search. sampling = getattr(args, 'sampling', False) sampling_topk = getattr(args, 'sampling_topk', -1) sampling_topp = getattr(args, 'sampling_topp', -1.0) diverse_beam_groups = getattr(args, 'diverse_beam_groups', -1) diverse_beam_strength = getattr(args, 'diverse_beam_strength', 0.5), match_source_len = getattr(args, 'match_source_len', False) diversity_rate = getattr(args, 'diversity_rate', -1) if ( sum( int(cond) for cond in [ sampling, diverse_beam_groups > 0, match_source_len, diversity_rate > 0, ] ) > 1 ): raise ValueError('Provided Search parameters are mutually exclusive.') assert sampling_topk < 0 or sampling, '--sampling-topk requires --sampling' assert sampling_topp < 0 or sampling, '--sampling-topp requires --sampling' if sampling: search_strategy = search.Sampling(self.target_dictionary, sampling_topk, sampling_topp) elif diverse_beam_groups > 0: search_strategy = search.DiverseBeamSearch( self.target_dictionary, diverse_beam_groups, diverse_beam_strength) elif match_source_len: # this is useful for tagging applications where the output # length should match the input length, so we hardcode the # length constraints for simplicity search_strategy = search.LengthConstrainedBeamSearch( self.target_dictionary, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0, ) elif diversity_rate > -1: search_strategy = search.DiverseSiblingsSearch(self.target_dictionary, diversity_rate) else: search_strategy = search.BeamSearch(self.target_dictionary) return SequenceGenerator( self.target_dictionary, beam_size=getattr(args, 'beam', 5), max_len_a=getattr(args, 'max_len_a', 0), max_len_b=getattr(args, 'max_len_b', 200), min_len=getattr(args, 'min_len', 1), normalize_scores=(not getattr(args, 'unnormalized', False)), len_penalty=getattr(args, 'lenpen', 1), unk_penalty=getattr(args, 'unkpen', 0), temperature=getattr(args, 'temperature', 1.), match_source_len=getattr(args, 'match_source_len', False), no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0), search_strategy=search_strategy, eos_factor=getattr(args, 'eos_factor', None), ) def build_dataset_for_inference(self, src_tokens, src_lengths): return SpeechDataset(src_tokens, src_lengths) def build_model(self, args): # build the greedy decoder for validation with WER from espresso.tools.simple_greedy_decoder import SimpleGreedyDecoder self.decoder_for_validation = SimpleGreedyDecoder(self.target_dictionary, for_validation=True) return super().build_model(args) def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) ( logging_output['word_error'], logging_output['word_count'], logging_output['char_error'], logging_output['char_count'], ) = self._inference_with_wer(self.decoder_for_validation, sample, model) return loss, sample_size, logging_output def inference_step(self, generator, models, sample, prefix_tokens=None, lm_weight=0.0): with torch.no_grad(): return generator.generate( models, sample, prefix_tokens=prefix_tokens, lm_weight=lm_weight, ) def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) word_error = sum(log.get('word_error', 0) for log in logging_outputs) word_count = sum(log.get('word_count', 0) for log in logging_outputs) char_error = sum(log.get('char_error', 0) for log in logging_outputs) char_count = sum(log.get('char_count', 0) for log in logging_outputs) if word_count > 0: metrics.log_scalar('wer', float(word_error) / word_count * 100, word_count, round=4) if char_count > 0: metrics.log_scalar('cer', float(char_error) / char_count * 100, char_count, round=4) def max_positions(self): """Return the max sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions) @property def target_dictionary(self): """Return the target :class:`~fairseq.data.AsrDictionary`.""" return self.dictionary @property def word_dictionary(self): """Return the target :class:`~fairseq.data.AsrDictionary`.""" return self.word_dict def _inference_with_wer(self, decoder, sample, model): from espresso.tools import wer scorer = wer.Scorer(self.target_dictionary, wer_output_filter=self.args.wer_output_filter) tokens, lprobs, _ = decoder.decode([model], sample) pred = tokens[:, 1:].data.cpu() # bsz x len target = sample['target'] assert pred.size(0) == target.size(0) # compute word error stats scorer.reset() for i in range(target.size(0)): utt_id = sample['utt_id'][i] ref_tokens = sample['target_raw_text'][i] pred_tokens = self.target_dictionary.string(pred.data[i]) scorer.add_evaluation( utt_id, ref_tokens, pred_tokens, bpe_symbol=self.args.remove_bpe, ) return ( scorer.tot_word_error(), scorer.tot_word_count(), scorer.tot_char_error(), scorer.tot_char_count(), )
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import asyncore import socket import pickle import importlib import struct import ipaddress from ClusterInfo import ClusterInfo from Commands import Commands class JobManagerCommandHandler(asyncore.dispatcher): def __init__(self, svr_sock, job_manager): asyncore.dispatcher.__init__(self, sock=svr_sock) self.jm = job_manager def handle_read(self): data = self.recv(8192) if data: message = pickle.loads(data) command = message['cmd'] job_name = message['job_name'] if command == 'submit': job_file = message['job_file'] self.jm.add_job(job_file, job_name) elif command == 'prepare': self.jm.prepare_job(job_name) elif command == 'run': self.jm.run_job(job_name) elif command == 'pause': self.jm.pause_job(job_name) elif command == 'cancel': self.jm.cancel_job(job_name) class JobManager(asyncore.dispatcher): def __init__(self): asyncore.dispatcher.__init__(self) self.cluster_info = ClusterInfo() self.jobs = {} address = self.cluster_info.job_manager_info.ip_addr port = self.cluster_info.job_manager_info.port self.create_socket() self.set_reuse_addr() self.bind((address, port)) self.listen(1) def handle_accepted(self, sock, addr): handler = JobManagerCommandHandler(sock, self) def add_job(self, job_file, job_name): # read job global dst_mac module_name = job_file.rstrip('.py') module = importlib.import_module(module_name) job = module.UserJob(job_name) # build job job.define_dataflow() # add job to JobManager's attributes self.jobs[job_name] = job nw_interfaces = {} for tm_name, dlg in job.dlgs.items(): for tlg in dlg.tlgs: for op in tlg.operators: for suc in job.df.successors(op): if not dlg.has_operator(suc): for d in job.dlgs.values(): if d.has_operator(suc): edge = (op, suc) indices = job.df.interfaces[edge] interface = (suc.name, indices[1]) if not nw_interfaces.get(interface): data_mac, data_addr, data_port = self.cluster_info\ .task_manager_infos[d.tm_name]\ .reserve_data_interface() nw_interfaces[interface] = (data_addr, data_port, data_mac) for pre in job.df.predecessors(op): if not dlg.has_operator(pre): for d in job.dlgs.values(): if d.has_operator(pre): edge = (pre, op) indices = job.df.interfaces[edge] interface = (op.name, indices[0]) if not nw_interfaces.get(interface): data_mac, data_addr, data_port = self.cluster_info\ .task_manager_infos[tm_name]\ .reserve_data_interface() nw_interfaces[interface] = (data_addr, data_port, data_mac) # distribute tasks for dlg in job.dlgs.values(): tm_addr = self.cluster_info.task_manager_infos[dlg.tm_name].manager_address tm_port = self.cluster_info.task_manager_infos[dlg.tm_name].manager_port if dlg.device_type == 'CPU': message = {'cmd': 'submit', 'job_file': job_file, 'job_name': job_name, 'interface': nw_interfaces} self.__send_message(tm_addr, tm_port, message) elif dlg.device_type == 'FPGA': assert len(dlg.tlgs) == 1 assert len(dlg.tlgs[0].operators) == 1 op = dlg.tlgs[0].operators[0] logic_in_port = int(nw_interfaces[(op.name, 0)][1]) if len(tuple(job.df.successors(op))) > 0: suc = tuple(job.df.successors(op))[0] suc_if_index = job.df.interfaces[(op, suc)][1] dst_mac = nw_interfaces[(suc.name, suc_if_index)][2] dst_addr = nw_interfaces[(suc.name, suc_if_index)][0] logic_out_port = int(nw_interfaces[(suc.name, suc_if_index)][1]) else: dst_addr = 0 dst_mac = 0 logic_out_port = 0 dst_mac_array = dst_mac.split(':') message = struct.pack('<I', Commands.submit) + struct.pack('<H', logic_in_port)\ + struct.pack('<H', logic_out_port)\ + struct.pack('<I', int(ipaddress.IPv4Address(dst_addr)))\ + struct.pack('<BBBBBB', int(dst_mac_array[0], 16), int(dst_mac_array[1], 16), int(dst_mac_array[2], 16), int(dst_mac_array[3], 16), int(dst_mac_array[4], 16), int(dst_mac_array[5], 16)) self.__send_message(tm_addr, tm_port, message, encoded=True, udp=True) def prepare_job(self, job_name): for dlg in self.jobs[job_name].dlgs.values(): tm_addr = self.cluster_info.task_manager_infos[dlg.tm_name].manager_address tm_port = self.cluster_info.task_manager_infos[dlg.tm_name].manager_port if dlg.device_type is not 'FPGA': message = {'cmd': 'prepare', 'job_name': job_name} self.__send_message(tm_addr, tm_port, message) else: message = struct.pack('<I', Commands.prepare) + struct.pack('<I', 0) self.__send_message(tm_addr, tm_port, message, encoded=True, udp=True) def run_job(self, job_name): for dlg in self.jobs[job_name].dlgs.values(): tm_addr = self.cluster_info.task_manager_infos[dlg.tm_name].manager_address tm_port = self.cluster_info.task_manager_infos[dlg.tm_name].manager_port if dlg.device_type is not 'FPGA': message = {'cmd': 'run', 'job_name': job_name} self.__send_message(tm_addr, tm_port, message) else: message = struct.pack('<I', Commands.run) + struct.pack('<I', 0) self.__send_message(tm_addr, tm_port, message, encoded=True, udp=True) def pause_job(self, job_name): for dlg in self.jobs[job_name].dlgs.values(): tm_addr = self.cluster_info.task_manager_infos[dlg.tm_name].manager_address tm_port = self.cluster_info.task_manager_infos[dlg.tm_name].manager_port if dlg.device_type is not 'FPGA': message = {'cmd': 'pause', 'job_name': job_name} self.__send_message(tm_addr, tm_port, message) else: message = struct.pack('<I', Commands.pause) + struct.pack('<I', 0) self.__send_message(tm_addr, tm_port, message, encoded=True, udp=True) def cancel_job(self, job_name): for dlg in self.jobs[job_name].dlgs.values(): tm_addr = self.cluster_info.task_manager_infos[dlg.tm_name].manager_address tm_port = self.cluster_info.task_manager_infos[dlg.tm_name].manager_port if dlg.device_type is not 'FPGA': message = {'cmd': 'cancel', 'job_name': job_name} self.__send_message(tm_addr, tm_port, message) else: message = struct.pack('<I', Commands.cancel) + struct.pack('<I', 0) self.__send_message(tm_addr, tm_port, message, encoded=True, udp=True) del(self.jobs[job_name]) @staticmethod def __send_message(address, port, message, encoded=False, udp=False): if not encoded: message = pickle.dumps(message) if udp: client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) client_sock.sendto(message, (address, port)) else: client_sock = socket.socket() client_sock.connect((address, port)) client_sock.send(message) client_sock.close()
python
import pyglet from pyglet.window import key from ctypes import pointer, sizeof import random from math import * sign = lambda x: copysign(1, x) class field: def __init__(self, dots, func, speed, lifespan, realSize, screenSize, theta=0, shift=(0, 0), imag=False, norm=False): self.num = dots self.F = func self.speed = speed self.ar = lifespan # 0 indicates particles are immortal self.rlsz = (realSize*scsz[0]/scsz[1], realSize) self.scsz = screenSize self.rrat = (self.scsz[0]/self.rlsz[0]/2, self.scsz[0]/self.rlsz[0]/2) # real ratio self.fast = False # double time self.theta = theta # use linear transfrom to rotate vector field function self.shift = shift # just a standard translation self.imag = imag # true if the function returns a complex self.norm = norm # true to normalize the vector field if self.theta: self.c = cos(theta) self.s = sin(theta) self.F = lambda x, y: self.rotate(*func(*self.protate(x-self.shift[0], y-self.shift[1]))) elif shift[0] or shift[1]: self.F = lambda x, y: func(x-self.shift[0], y-self.shift[1]) self.reset() # Now create a vertex buffer object. For speeed self.vbo_id = pyglet.gl.GLuint() pyglet.gl.glGenBuffers(1, pointer(self.vbo_id)) pyglet.gl.glBindBuffer(pyglet.gl.GL_ARRAY_BUFFER, self.vbo_id) pyglet.gl.glBufferData(pyglet.gl.GL_ARRAY_BUFFER, sizeof(self.data), 0, pyglet.gl.GL_STATIC_DRAW) def rotate(self, x, y): # rotate a point by the angle specified in initialization return (self.c*x - self.s*y, self.s*x + self.c*y) def protate(self, x, y): # rotate a point by the negative of the angle specified in initialization return (self.c*x + self.s*y, -self.s*x + self.c*y) def reset(self): self.pts = [] self.age = [] # generate all particles within the field of the screen # origin as center and rlsz as coordinate of top for f in range(self.num): self.pts.append(self.new()) self.age.append(0) self.flatten() def new(self): ## p = (self.rlsz[0]*(2*random.random()-1), self.rlsz[1]*(2*random.random()-1)) ## for f in range(3): ## if 2 < abs(p[0])+abs(p[1]) and 2 < abs(p[0]-12)+abs(p[1]) and 2 < abs(p[0]+12)+abs(p[1]) : ## p = (self.rlsz[0]*(2*random.random()-1), self.rlsz[1]*(2*random.random()-1)) ## return p return (self.rlsz[0]*(2*random.random()-1), self.rlsz[1]*(2*random.random()-1)) def update(self): for f in range(self.num): self.age[f] += 1 try: force = self.F(*self.pts[f]) if self.imag: force = (force.real, force.imag) if self.norm: temp = hypot(*force) force = (force[0]/temp, force[1]/temp) except: # In case of math error, send dot to the shadow realm. force = (0, -3*self.rlsz[1]/(self.speed+self.fast*self.speed)) self.pts[f] = (self.pts[f][0]+(self.speed+self.fast*self.speed)*force[0], self.pts[f][1]+(self.speed+self.fast*self.speed)*force[1]) if (self.rlsz[0] < self.pts[f][0] or self.pts[f][0] < -self.rlsz[0] or self.rlsz[1] < self.pts[f][1] or self.pts[f][1] < -self.rlsz[1]): # oops, we're out of bounds, regenerate the dot self.pts[f] = self.new() self.age[f] = 0 elif self.ar and self.ar*2*random.random() < self.age[f]: # the dot has reached the end of its lifespan, regenerate the dot self.pts[f] = self.new() self.age[f] = 0 self.flatten() def flatten(self): # transforms data into screen coordinates # then puts it in proper opengl type lis = [] for f in range(self.num): lis.append(self.rrat[0]*(self.pts[f][0]+self.rlsz[0])) lis.append(self.rrat[1]*(self.pts[f][1]+self.rlsz[1])) self.data = (pyglet.gl.GLfloat*(self.num*2))(*lis) def draw(self): ## pyglet.gl.glBindBuffer(pyglet.gl.GL_ARRAY_BUFFER, self.vbo_id) # don't need this since only one vbo pyglet.gl.glBufferSubData(pyglet.gl.GL_ARRAY_BUFFER, 0, sizeof(self.data), self.data) ## pyglet.gl.glColor3f(255, 255, 255) # set color of points pyglet.gl.glVertexPointer(2, pyglet.gl.GL_FLOAT, 0, 0) pyglet.gl.glDrawArrays(pyglet.gl.GL_POINTS, 0, self.num) if __name__ == "__main__": config = pyglet.gl.Config(double_buffer=False) window = pyglet.window.Window(caption='vector field', fullscreen=True, config=config, vsync=0) window.set_exclusive_mouse() fps_display = pyglet.window.FPSDisplay(window=window) scsz = window.get_size() # various cool vector fields ##dots = field(10000, lambda x, y:(sin(y), sin(x)), 1/45, 0, 12, scsz) # cinnamon roll ##dots = field(5000, lambda x, y:(sin(y)**2, sin(x)), 1/5, 0, 10, scsz) # snake ##dots = field(10000, lambda x, y:(x, y/sin(sqrt(x**2+y**2))), 1/600, 0, 10, scsz) # eye ##dots = field(10000, lambda x, y:(x**2-y**2, 2*x*y), 1/60, 600, 10, scsz) # z^2 ##dots = field(5000, lambda x, y:(1+(y**2-x**2)/(x**2+y**2)**2, -2*x*y/(x**2+y**2)**2), 1/300, 0, 2, scsz) # cylinder flow ##dots = field(5000, lambda x, y:((y**2-x**2)/(x**2+y**2)**2, -2*x*y/(x**2+y**2)**2), 1/300, 0, 2, scsz) # dipole ##dots = field(5000, lambda x, y:(cos(exp(x+10)), sin((x+10)**2)/y), 1/60, 0, 10, scsz) # chaotic strings ##dots = field(10000, lambda x, y:(-sign(y%12-6)*cos(2**(abs(y%12-6)+0.65)), sign(x%12-6)*cos(2**(abs(x%12-6)+0.65))), 1/60, 0, 10, scsz) # bubble frame ##dots = field(10000, lambda x, y:((2*x**3-2*x)/(2*y**3-y), (2*y**3-2*y)/(2*x**3-x)), 1/3000, 0, 2, scsz, theta=pi/4, shift=(0, 0.25)) # the fish ##dots = field(10000, lambda x, y:(sin(2*y), cos(x**2+y**2+1/(3*y**2+0.3)-3/(atan((x**2-y**2+13)/2)+pi/2))), 1/120, 0, 5, scsz) # balance dots = field(10000, lambda x, y:(1)/(x+y*1j), 1/800, 0, 2, scsz, shift=(0, 0), imag=True, norm=True) # using a function allows for more complicated calculations on the vector field ##def F(x, y): #### x = x%12-6 # modular repeat x #### y = y%12-6 # modular repeat y ## return (x, y) ## ##dots = field(10000, F, 1/600, 0, 1, scsz) pause = False stain = False fpshow = False @window.event def on_key_press(symbol, modifiers): global dots, pause, stain, fpshow if symbol == key.SPACE: # press space to pause pause = not pause elif symbol == key.N: # press N to go forward one frame dots.update() elif symbol == key.S: # press S to toggle stain stain = not stain elif symbol == key.P: # press P to toggle fps reading fpshow = not fpshow elif symbol == key.R: # press R to reset field dots.reset() elif symbol == key.F: # press F to toggle fast mode dots.fast = 30*(not dots.fast) elif symbol == key.ESCAPE: # press escape to exit pyglet.app.exit() def update(dt): global dots, pause, stain, fpshow if not pause: dots.update() if not stain: pyglet.gl.glClear(pyglet.gl.GL_COLOR_BUFFER_BIT) dots.draw() if fpshow: fps_display.draw() FPS = 60 pyglet.clock.schedule_interval(update, 1/FPS) ##pyglet.gl.glClearColor(0.2, 0.4, 0.5, 1.0) # set the color that clears the screen ##pyglet.gl.glPointSize(1) # set the size of the points pyglet.gl.glEnableClientState(pyglet.gl.GL_VERTEX_ARRAY) pyglet.app.run()
python
from rest_framework import serializers from auth.models import Skill, Social, User class SocialSerializer(serializers.ModelSerializer): class Meta: model = Social fields = ("name", "logo", "link") def __str__(self) -> str: return self.name class SkillSerializer(serializers.ModelSerializer): class Meta: model = Skill fields = ("id","name", "logo", "description") def __str__(self) -> str: return self.name class UserSerializer(serializers.ModelSerializer): class Meta: model = User fields = ("social", "skills") def __str__(self) -> str: return self.name
python
""" Implementation of logical and physical relational operators """ from ..baseops import UnaryOp from ..exprs import * from ..schema import * from ..tuples import * from ..db import Mode from ..util import cache, OBTuple from itertools import chain ######################################################## # # Source Operators # ######################################################## class Source(UnaryOp): pass class SubQuerySource(Source): """ Allows subqueries in the FROM clause of a query Mainly responsible for giving the subquery an alias """ def __init__(self, c, alias=None): super(SubQuerySource, self).__init__(c) self.alias = alias def __iter__(self): for row in self.c: yield row def init_schema(self): """ A source operator's schema should be initialized with the same tablename as the operator's alias """ self.schema = self.c.schema.copy() self.schema.set_tablename(self.alias) return self.schema class DummyScan(Source): def __iter__(self): yield ListTuple(Schema([])) def init_schema(self): self.schema = Schema([]) return self.schema def __str__(self): return "DummyScan()" class Scan(Source): """ A scan operator over a table in the Database singleton. """ def __init__(self, tablename, alias=None): super(Scan, self).__init__() self.tablename = tablename self.alias = alias or tablename from ..db import Database self.db = Database.db() def init_schema(self): """ A source operator's schema should be initialized with the same tablename as the operator's alias """ self.schema = self.db.schema(self.tablename).copy() self.schema.set_tablename(self.alias) return self.schema def __iter__(self): # initialize a single intermediate tuple irow = ListTuple(self.schema, []) for row in self.db[self.tablename]: irow.row = row.row yield irow def __str__(self): return "Scan(%s AS %s)" % (self.tablename, self.alias) class ScanWithProject(Source): def __init__(self, tablename, exprs, aliases=[], alias=None): super(ScanWithProject, self).__init__() print("scan with project:", tablename) self.tablename = tablename self.alias = alias or tablename self.exprs = exprs self.aliases = aliases from ..db import Database self.db = Database.db() def init_schema(self): """ A source operator's schema should be initialized with the same tablename as the operator's alias """ # print("table:", self.tablename,"schema: ", self.db.schema(self.tablename)) self.schema = Schema([]) if len(self.exprs) > 0: for alias, expr in zip(self.aliases, self.exprs): # print("alias:", alias, "expr:", expr) typ = expr.get_type() self.schema.attrs.append(Attr(alias, typ)) else: self.schema = self.db.schema(self.tablename) self.schema.set_tablename(self.alias) # print("table:", self.tablename, "schema:", self.schema) return self.schema def __iter__(self): # initialize a single intermediate tuple irow = ListTuple(self.schema, []) if self.db.mode == Mode.COLUMN_ALL: columns = [] # Load all the columns into memory for _, expr in enumerate(self.exprs): col_index = expr.aname # attribute name col = self.db[self.tablename][(None, col_index)] columns.append(col) # Iterate through all the rows, construct ListTuple for row_index in range(len(self.db[self.tablename])): irow.row = [col[row_index] for col in columns] yield irow else: for row in self.db[self.tablename].diskIter(): for i, (exp) in enumerate(self.exprs): # TODO: BUG in find_idx, can't use exp(row) here irow.row[i] = row[self.db[self.tablename].attr_to_idx[exp.aname]] yield irow def __str__(self): return "ScanWithProject(%s AS %s)" % (self.tablename, self.alias) class TableFunctionSource(UnaryOp): """ Scaffold for a table UDF function that outputs a relation. Not implemented. """ def __init__(self, function, alias=None): super(TableFunctionSource, self).__init__(function) self.function = function self.alias = alias def __iter__(self): raise Exception("TableFunctionSource: Not implemented") def __str__(self): return "TableFunctionSource(%s)" % self.alias
python
import os import dotenv import errno import click import io import sys import pathlib class Config: """Accommodate config file creation by setting and getting it's class variables.""" user_access_key = "" user_secret_key = "" user_url = "nos.wjv-1.neo.id" user_gmt_policy = "notset" admin_url = "" admin_port = "" admin_username = "" admin_password = "" use_https = "true" use_neo = "false" def dump_config(self, options, cfg): cfg_file = config_file() config = "" for option in options: value = getattr(cfg, option[0]) option = f"OBS_{option[0].upper()}" config += f"{option}={value}\n" try: create_config_dir() with io.open(cfg_file, "w") as fp: fp.write(config) click.secho(f"\nConfiguration saved to {cfg_file}", fg="green") except IOError as e: click.secho( f"\nWriting config file failed: {cfg_file}: {e.strerror}", fg="yellow", bold=True, err=True, ) sys.exit() def create_config_dir(): home = os.path.expanduser("~") config_dir = os.path.join(home, ".config", "neo-obs") pathlib.Path(config_dir).mkdir(parents=True, exist_ok=True) def config_file(): home = os.path.expanduser("~") cfg_file = os.path.join(home, ".config", "neo-obs", "obs.env") return cfg_file def is_config_exists(): cfg_file = config_file() return os.path.isfile(cfg_file) def load_config_file(): cfg_file = config_file() # load_dotenv didn't have it's own exception if is_config_exists(): dotenv.load_dotenv(cfg_file, override=True) else: raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), cfg_file)
python
# coding=utf-8 # Author: Diego González Chávez # email : diegogch@cbpf.br / diego.gonzalez.chavez@gmail.com # # This class controls the: # Radio Frequency Amplifier model 60/20S1G18A # by Amplifier Research # # TODO: # Make documentation import numpy as _np from .instruments_base import InstrumentBase as _InstrumentBase from .instruments_base import findResource __all__ = ['AR_RF_Amplifier'] class AR_RF_Amplifier(_InstrumentBase): def __init__(self, ResourceName, logFile=None): super().__init__(ResourceName, logFile) self._IDN = 'RF Amplifier' self.VI.write_termination = self.VI.LF self.VI.read_termination = self.VI.LF self.write('R') @property def ID(self): '''ID''' return self.query('*IDN?') def Output(self, out): ''' Enable or disable power supply output Usage : Output('ON'/'OFF') ''' if out in ['ON', 'OFF']: state = {'ON':1, 'OFF':0}[out] self.write('P%d' %state) else: self._log('ERR ', 'Output error code') @property def gain(self): ''' Gain level (in %) ''' gain_bin = int(self.query('G?').strip('G')) return gain_bin/4095*100 @gain.setter def gain(self, vGain): gain_bin = round(vGain/100*4095) self.write('G%d' %gain_bin) def Band(self, band): ''' Select the high or low band amplifier Usage : Band('HIGH'/'LOW') ''' if band in ['HIGH', 'LOW']: self.write('BAND%s' %band[0]) else: self._log('ERR ', 'Band error code')
python
XXXXXX XXXXXXX XXXXXXXBB BBBBBBBBBBBB BB BBBB XXXXXXXXBBBBBBBBBBBB BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBX
python
from hodolbot.classes import View from hodolbot.controllers import covid19_handler class Covid19View(View): command = "코로나" @staticmethod def get(): return covid19_handler()
python
# template script to create some easy plots for the chip problem import numpy as np import matplotlib.pyplot as plt import simnet as sn # set the path for the .npz files base_dir = 'network_checkpoint_chip_2d/val_domain/results/' # load the .npz files pred_data = np.load(base_dir + 'Val_pred.npz', allow_pickle=True) true_data = np.load(base_dir + 'Val_true.npz', allow_pickle=True) pred_data = np.atleast_1d(pred_data.f.arr_0)[0] true_data = np.atleast_1d(true_data.f.arr_0)[0] # remove the variables created for parameterization (uncomment when visualizing parameteric results) #pred_data.pop('chip_width') #pred_data.pop('chip_height') #true_data.pop('chip_width') #true_data.pop('chip_height') # plot only one set of variables sn.plot_utils.field.plot_field(pred_data, 'chip_predicted', coordinates=['x', 'y'], resolution=256) # plot the comparison between a set of variables sn.plot_utils.field.plot_field_compare(true_data, pred_data, 'chip_comparison', coordinates=['x', 'y'], resolution=256)
python
# -*- coding: utf-8 -*- from model.contact import Contact import random def test_delete_some_contact(app, db): if len(db.get_contacts_list()) == 0: app.contact.create(Contact(firstname="Test delete first contact")) old_contacts = db.get_contacts_list() contact = random.choice(old_contacts) app.contact.delete_contact_by_id(contact.id) new_contacts = db.get_contacts_list() old_contacts.remove(contact) assert old_contacts == new_contacts # def test_delete_all_contacts(app): # if app.contact.count() == 0: # app.contact.create(Contact(firstname="Test delete all contacts")) # app.contact.delete_all_contacts() # assert app.contact.count() == 0
python
# Copyright (c) 2014 Evalf # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ The parallel module provides tools aimed at parallel computing. At this point all parallel solutions use the ``fork`` system call and are supported on limited platforms, notably excluding Windows. On unsupported platforms parallel features will disable and a warning is printed. """ from . import log, numpy, numeric import os, sys, multiprocessing, tempfile, mmap, traceback, signal, collections.abc procid = None # current process id, None for unforked def shempty(shape, dtype=float): '''create uninitialized array in shared memory''' if numeric.isint(shape): shape = shape, else: assert all(numeric.isint(sh) for sh in shape) dtype = numpy.dtype(dtype) size = (numpy.product(shape) if shape else 1) * dtype.itemsize if size == 0: return numpy.empty(shape, dtype) # `mmap(-1,...)` will allocate *anonymous* memory. Although linux' man page # mmap(2) states that anonymous memory is initialized to zero, we can't rely # on this to be true for all platforms (see [SO-mmap]). [SO-mmap]: # https://stackoverflow.com/a/17896084 return numpy.frombuffer(mmap.mmap(-1, size), dtype).reshape(shape) def shzeros(shape, dtype=float): '''create zero-initialized array in shared memory''' array = shempty(shape, dtype=dtype) array.fill(0) return array def pariter(iterable, nprocs): '''iterate in parallel Fork into ``nprocs`` subprocesses, then yield items from iterable such that all processes receive a nonoverlapping subset of the total. It is up to the user to prepare shared memory and/or locks for inter-process communication. The following creates a data vector containing the first four quadratics:: data = shzeros(shape=[4], dtype=int) for i in pariter(range(4), 2): data[i] = i**2 data As a safety measure nested pariters are blocked by setting the global ``procid`` variable; all secundary pariters will be treated like normal serial iterators. Parameters ---------- iterable : :class:`collections.abc.Iterable` The collection of items to be distributed over processors nprocs : :class:`int` Maximum number of processers to use Yields ------ Items from iterable, distributed over at most nprocs processors. ''' global procid if procid is not None: log.warning('ignoring pariter for already forked process') yield from iterable return if isinstance(iterable, collections.abc.Sized): nprocs = min(nprocs, len(iterable)) if nprocs <= 1: yield from iterable return if not hasattr(os, 'fork'): raise NotImplementedError('pariter requires os.fork, which is unavailable on this platform') shared_iter = multiprocessing.RawValue('i', nprocs) # shared integer pointing at first unyielded item lock = multiprocessing.Lock() # lock to avoid race conditions in incrementing shared_iter children = [] # list of forked processes, non-empty only in primary process try: for procid in range(1, nprocs): child_pid = os.fork() if not child_pid: signal.signal(signal.SIGINT, signal.SIG_IGN) # disable sigint (ctrl+c) handler break children.append(child_pid) else: procid = 0 iiter = procid # first index is 0 .. nprocs-1, with shared_iter at nprocs for n, it in enumerate(iterable): if n < iiter: # fast forward to iiter continue assert n == iiter yield it with lock: iiter = shared_iter.value # claim next value shared_iter.value = iiter + 1 except: fail = 1 if procid == 0: raise # reraise in main process # in child processes print traceback then exit excval = sys.exc_info()[1] if isinstance(excval, GeneratorExit): log.error('generator failed with unknown exception') elif not isinstance(excval, KeyboardInterrupt): log.error(traceback.format_exc()) else: fail = 0 finally: if procid != 0: # before anything else can fail: os._exit(fail) # cumminicate exit status to main process procid = None # unset global variable totalfail = fail while children: child_pid, child_status = os.wait() children.remove(child_pid) if child_status: totalfail += 1 if fail: # failure in main process: exception has been reraised log.error('pariter failed in {} out of {} processes; reraising exception for main process'.format(totalfail, nprocs)) elif totalfail: # failure in child process: raise exception raise Exception('pariter failed in {} out of {} processes'.format(totalfail, nprocs)) def parmap(func, iterable, nprocs, shape=(), dtype=float): '''parallel equivalent to builtin map function Produces an array of ``func(item)`` values for all items in ``iterable``. Because of shared memory restrictions ``func`` must yield numpy arrays of predetermined shape and type. Parameters ---------- func : :any:`callable` Takes item from iterable, returns numpy array of ``shape`` and ``dtype`` iterable : :class:`collections.abc.Iterable` Collection of items nprocs : :class:`int` Maximum number of processers to use shape : :class:`tuple` Return shape of ``func``, defaults to scalar dtype : :class:`tuple` Return dtype of ``func``, defaults to float Returns ------- Array of shape ``len(iterable),+shape`` and dtype ``dtype`` ''' n = len(iterable) out = shzeros((n,)+shape, dtype=dtype) for i, item in pariter(enumerate(iterable), nprocs=min(n,nprocs)): out[i] = func(item) return out # vim:sw=2:sts=2:et
python
import logging import os from checkov.cloudformation import cfn_utils from checkov.cloudformation.checks.resource.registry import cfn_registry from checkov.cloudformation.parser import parse from checkov.common.output.record import Record from checkov.common.output.report import Report from checkov.common.runners.base_runner import BaseRunner, filter_ignored_paths from checkov.runner_filter import RunnerFilter from checkov.cloudformation.parser.node import dict_node from checkov.cloudformation.context_parser import ContextParser CF_POSSIBLE_ENDINGS = [".yml", ".yaml", ".json", ".template"] class Runner(BaseRunner): check_type = "cloudformation" def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), collect_skip_comments=True): report = Report(self.check_type) definitions = {} definitions_raw = {} parsing_errors = {} files_list = [] if external_checks_dir: for directory in external_checks_dir: cfn_registry.load_external_checks(directory) if files: for file in files: (definitions[file], definitions_raw[file]) = parse(file) if root_folder: for root, d_names, f_names in os.walk(root_folder): filter_ignored_paths(root, d_names, runner_filter.excluded_paths) filter_ignored_paths(root, f_names, runner_filter.excluded_paths) for file in f_names: file_ending = os.path.splitext(file)[1] if file_ending in CF_POSSIBLE_ENDINGS: files_list.append(os.path.join(root, file)) for file in files_list: relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}' try: (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file) except TypeError: logging.info(f'CloudFormation skipping {file} as it is not a valid CF template') # Filter out empty files that have not been parsed successfully, and filter out non-CF template files definitions = {k: v for k, v in definitions.items() if v and isinstance(v, dict_node) and v.__contains__("Resources") and isinstance(v["Resources"], dict_node)} definitions_raw = {k: v for k, v in definitions_raw.items() if k in definitions.keys()} for cf_file in definitions.keys(): # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path, # or there will be no leading slash; root_folder will always be none. # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above). # The goal here is simply to get a valid path to the file (which cf_file does not always give). if cf_file[0] == '/': path_to_convert = (root_folder + cf_file) if root_folder else cf_file else: path_to_convert = (os.path.join(root_folder, cf_file)) if root_folder else cf_file file_abs_path = os.path.abspath(path_to_convert) if isinstance(definitions[cf_file], dict_node) and 'Resources' in definitions[cf_file].keys(): cf_context_parser = ContextParser(cf_file, definitions[cf_file], definitions_raw[cf_file]) logging.debug("Template Dump for {}: {}".format(cf_file, definitions[cf_file], indent=2)) cf_context_parser.evaluate_default_refs() for resource_name, resource in definitions[cf_file]['Resources'].items(): resource_id = cf_context_parser.extract_cf_resource_id(resource, resource_name) # check that the resource can be parsed as a CF resource if resource_id: entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines(resource) if entity_lines_range and entity_code_lines: # TODO - Variable Eval Message! variable_evaluations = {} skipped_checks = ContextParser.collect_skip_comments(entity_code_lines) entity = {resource_name: resource} results = cfn_registry.scan(cf_file, entity, skipped_checks, runner_filter) tags = cfn_utils.get_resource_tags(entity) for check, check_result in results.items(): record = Record(check_id=check.id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=cf_file, file_line_range=entity_lines_range, resource=resource_id, evaluations=variable_evaluations,check_class=check.__class__.__module__, file_abs_path=file_abs_path, entity_tags=tags) report.add_record(record=record) return report
python
# encoding: utf-8 from themonkey import * def calc_wordmetrics(wordfreqdict, charnlpdict): wordmetricdict = {} for word, freq in wordfreqdict.iteritems(): numsylls = word.count("-") + 1 word_nodash = word.replace("-","").replace(" ","").strip() numphones = len(word_nodash) phonsurprise = calc_phonsuprisal_by_len(word_nodash,numphones,charnlpdict) wordmetricdict[word] = (str(freq), str(numphones), str(numsylls), str(phonsurprise)) return wordmetricdict if __name__ == "__main__": parser = argparse.ArgumentParser(description = "Calculate Phonotactic Surprisal with Existing Language Model") parser.add_argument("wordfreqfile", help="word frequency file (eg output by bigguy.py)") parser.add_argument("psfile", help="char negative log probability file (*_ps.* files output by themonkey.py)") parser.add_argument("outputfile", help="output filename") args = parser.parse_args() wordfreqdict = {} with open(args.wordfreqfile, "r") as fin: next(fin) for line in fin: components = line.split(",") word = components[0] freq = components[1] wordfreqdict[word] = int(freq) charnlpdict = {} with open(args.psfile, "r") as fin: for line in fin: char, nlp = line.split(",") charnlpdict[char] = float(nlp) wordmetricdict = calc_wordmetrics(wordfreqdict, charnlpdict) with open(args.outputfile,"w") as f: f.write("word,wordfreq,numphones,numsylls,phonsuprise\n") for word, tup in wordmetricdict.iteritems(): f.write("%s,%s\n"%(word,",".join(tup))) print "Phonontactic Surprisal has been calculated ( ゚o゚)"
python
import argparse from time import sleep from datetime import datetime import paho.mqtt.client as mqtt import RPi.GPIO as gpio PIN = 14 TOPIC = "home/power/meter" RECONNECT_DELAY_SECS = 2 DEFAULT_MQTT_PORT = 1883 FLASH_SECS = 0.02 FLASH_TOLERANCE_PCT = 10 def on_connect(client, userdata, flags, rc): print "Connected with result code " + str(rc) def on_disconnect(client, userdata, rc): print "Disconnected from MQTT server with code: %s" % rc while rc != 0: sleep(RECONNECT_DELAY_SECS) print "Reconnecting to MQTT server..." rc = client.reconnect() def publish_power(watts): watts = round(watts, 2) client.publish(TOPIC, payload=watts) print "Published value of %s Watts." % watts def within_tolerance(val, nominal, tolerance_percent): tol = tolerance_percent/100.0 return nominal*(1-tol) <= val <= nominal*(1+tol) def handle_change(val, last_val, on_dt, off_dt): print "Value changed to %r" % val now = datetime.now() if val == 1: return now, off_dt if off_dt is None: return on_dt, now if on_dt is None: return on_dt, off_dt on_secs = (now - on_dt).total_seconds() if not within_tolerance(on_secs, FLASH_SECS, FLASH_TOLERANCE_PCT): print "Detected flash duration was outside tolerance: %s" % on_secs return None, None secs_since_last_off = (now - off_dt).total_seconds() print "Time since last flash: %r" % secs_since_last_off publish_power(3600.0 / secs_since_last_off) return on_dt, now if __name__ == "__main__": p = argparse.ArgumentParser() p.add_argument("user") p.add_argument("password") p.add_argument("host") p.add_argument("--port", type=int, default=DEFAULT_MQTT_PORT) args = p.parse_args() client = mqtt.Client(client_id="power", clean_session=False) client.on_connect = on_connect client.username_pw_set(args.user, args.password) client.connect(args.host, args.port, 60) client.loop_start() gpio.setwarnings(False) gpio.setmode(gpio.BCM) gpio.setup(PIN, gpio.IN) last_val = 0 on_dt = None off_dt = None try: while True: sleep(0.0025) val = gpio.input(PIN) if val != last_val: on_dt, off_dt = handle_change(val, last_val, on_dt, off_dt) last_val = val except KeyboardInterrupt: pass finally: client.loop_stop()
python
""" Test my new feature Some more info if you want Should work with python2 and python3! """ import unittest # if you need data from oletools/test-data/DIR/, uncomment these lines: ## Directory with test data, independent of current working directory #from tests.test_utils import DATA_BASE_DIR class TestMyFeature(unittest.TestCase): """ Tests my cool new feature """ def test_this(self): """ check that this works """ pass # your code here def test_that(self): """ check that that also works """ pass # your code here def helper_function(self, filename): """ to be called from other test functions to avoid copy-and-paste this is not called by unittest directly, only from your functions """ pass # your code here # e.g.: msodde.main(join(DATA_DIR, filename)) # just in case somebody calls this file as a script if __name__ == '__main__': unittest.main()
python
#---------------------------------------------------------------------- # Copyright (c) 2011-2015 Raytheon BBN Technologies # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and/or hardware specification (the "Work") to # deal in the Work without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Work, and to permit persons to whom the Work # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Work. # # THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS # IN THE WORK. #---------------------------------------------------------------------- from __future__ import absolute_import import json import logging import os import sys import M2Crypto.SSL from ..util.paths import getAbsPath from ..util import OmniError from ..util import credparsing as credutils from ..util import json_encoding from ..xmlrpc import client as xmlrpcclient from ...sfa.trust.credential import Credential class Framework_Base(): """ Framework_Base is an abstract class that identifies the minimal set of functions that must be implemented in order to add a control framework to omni. Instructions for adding a new framework: Create "framework_X" in the frameworks directory, where X is your control framework. Create a Framework class in the file that inherits "Framework_Base" and fill out each of the functions. Edit the sample "omni_config" file and add a section for your framework, giving the section the same name as X used in framework_X. For instance, 'sfa' or 'gcf'. Your framework's section of the omni config *MUST* have a cert and key entry, which omni will use when talking to the GENI Aggregate managers. """ def __init__(self, config): self.cert = getAbsPath(config['cert']) if not os.path.exists(self.cert): sys.exit("Frameworks certfile %s doesn't exist" % self.cert) if not os.path.getsize(self.cert) > 0: sys.exit("Frameworks certfile %s is empty" % self.cert) self.key = getAbsPath(config['key']) if not os.path.exists(self.key): sys.exit("Frameworks keyfile %s doesn't exist" % self.key) if not os.path.getsize(self.key) > 0: sys.exit("Frameworks keyfile %s is empty" % self.key) self.sslctx = None def init_user_cred( self, opts ): """Initialize user credential either from file (if --usercredfile) or else to None. Must call this method in framework's __init__ in order for --usercredfile to be handled properly. Returns the usercred - in XML string format. """ try: if self.user_cred_struct is not None: pass except: self.user_cred_struct = None # read the usercred from supplied file cred = None if opts.usercredfile and os.path.exists(opts.usercredfile) and os.path.isfile(opts.usercredfile) and os.path.getsize(opts.usercredfile) > 0: # read the user cred from the given file if hasattr(self, 'logger'): logger = self.logger else: logger = logging.getLogger("omni.framework") logger.info("Getting user credential from file %s", opts.usercredfile) # cred = _load_cred(logger, opts.usercredfile) with open(opts.usercredfile, 'r') as f: cred = f.read() try: cred = json.loads(cred, encoding='ascii', cls=json_encoding.DateTimeAwareJSONDecoder) if cred and isinstance(cred, dict) and \ cred.has_key('geni_type') and \ cred.has_key('geni_value') and \ cred['geni_type'] == Credential.SFA_CREDENTIAL_TYPE and \ cred['geni_value'] is not None: self.user_cred_struct = cred except Exception, e: logger.debug("Failed to get a JSON struct from cred in file %s. Treat as a string: %s", opts.usercredfile, e) cred2 = credutils.get_cred_xml(cred) if cred2 is None or cred2 == "": logger.info("Did NOT get valid user cred from %s", opts.usercredfile) if opts.devmode: logger.info(" ... but using it anyhow") else: cred = None else: # This would force a saved user cred in struct to be XML. Is that correct? #cred = cred2 target = "" try: target = credutils.get_cred_target_urn(logger, cred) if "+authority+sa" in target: self.logger.debug("Got target %s - PG user creds list the user as the owner only", target) target = credutils.get_cred_owner_urn(logger, cred) except: if not opts.devmode: logger.warn("Failed to parse target URN from user cred?") logger.info("Read user %s credential from file %s", target, opts.usercredfile) elif opts.usercredfile: if hasattr(self, 'logger'): logger = self.logger else: logger = logging.getLogger("omni.framework") logger.info("NOT getting user credential from file %s - file doesn't exist or is empty", opts.usercredfile) return cred def get_version(self): """ Returns a dict of the GetVersion return from the control framework. And an error message if any. """ raise NotImplementedError('get_version') def get_user_cred(self): """ Returns a user credential from the control framework as a string. And an error message if any. """ raise NotImplementedError('get_user_cred') def get_slice_cred(self, urn): """ Retrieve a slice with the given urn and returns the signed credential as a string. """ raise NotImplementedError('get_slice_cred') def create_slice(self, urn): """ If the slice already exists in the framework, it returns that. Otherwise it creates the slice and returns the new slice as a string. """ raise NotImplementedError('create_slice') def delete_slice(self, urn): """ Removes the slice from the control framework. """ raise NotImplementedError('delete_slice') def list_aggregates(self): """ Get a list of available GENI Aggregates from the control framework. Returns: a dictionary where keys are urns and values are aggregate urls """ raise NotImplementedError('list_aggregates') def list_my_slices(self, username): """ Get a list of slices for this user. Returns: a list of slice URNs """ raise NotImplementedError('list_my_slices') def list_my_projects(self, username): """ '''List projects owned by the user (name or URN) provided, returning a list of structs, containing PROJECT_URN, PROJECT_UID, EXPIRED, and PROJECT_ROLE. EXPIRED is a boolean.''' """ raise NotImplementedError('list_my_projects') def list_ssh_keys(self, username=None): """ Get a list of SSH key pairs for the given user or the configured current user if not specified. Private key will be omitted if not known or found. Returns: a list of structs containing SSH key pairs ('public_key', 'private_key' (may be omitted)) """ raise NotImplementedError('list_ssh_keys') def slice_name_to_urn(self, name): """Convert a slice name to a slice urn.""" # Default implementation just converts to generic URN. raise NotImplementedError('slice_name_to_urn') def renew_slice(self, urn, requested_expiration): """Renew a slice. urn is framework urn, already converted via slice_name_to_urn. requested_expiration is a datetime object. Returns the expiration date as a datetime. If there is an error, print it and return None. """ raise NotImplementedError('renew_slice') def make_client(self, url, keyfile, certfile, verbose=False, timeout=None, allow_none=False): """Create an API client. This is currently an XML-RPC client over SSL with a client side certificate.""" return xmlrpcclient.make_client(url, keyfile, certfile, verbose=verbose, timeout=timeout, allow_none=allow_none) # See xmlrpc/client.py where this would be used to use M2Crypto for the SSL client # supporting entering the password only once. But this had problems and is not used. def ssl_context(self, retries=2): """Returns an SSL Context or an exception is raised.""" if hasattr(self, 'logger'): logger = self.logger else: logger = logging.getLogger("omni.framework") logger.warning("*** Creating an SSL Context! ***") if not self.sslctx: # Initialize the M2Crypto SSL Context attempts = 0 while attempts <= retries: sslctx = M2Crypto.SSL.Context() try: sslctx.load_cert_chain(self.cert, self.key) self.sslctx = sslctx break except M2Crypto.SSL.SSLError, err: logger.error('Wrong pass phrase for private key.') attempts = attempts + 1 if attempts > retries: logger.error("Wrong pass phrase after %d tries.", attempts) raise OmniError(err) else: logger.info('.... please retry.') return self.sslctx def get_user_cred_struct(self): """ Returns a user credential from the control framework as a string in a struct. And an error message if any. Struct is as per AM API v3: { geni_type: <string>, geni_version: <string>, geni_value: <the credential as a string> } """ cred, message = self.get_user_cred() if cred: cred = self.wrap_cred(cred) return cred, message def get_slice_cred_struct(self, urn): """ Retrieve a slice with the given urn and returns the signed credential as a string in the AM API v3 struct: { geni_type: <string>, geni_version: <string>, geni_value: <the credential as a string> } """ cred = self.get_slice_cred(urn) return self.wrap_cred(cred) def wrap_cred(self, cred): """ Wrap the given cred in the appropriate struct for this framework. """ if hasattr(self, 'logger'): logger = self.logger else: logger = logging.getLogger("omni.framework") if isinstance(cred, dict): logger.debug("Called wrap on a cred that's already a dict? %s", cred) return cred elif not isinstance(cred, str): logger.warn("Called wrap on non string cred? Stringify. %s", cred) cred = str(cred) cred_type, cred_version = credutils.get_cred_type(cred) ret = dict(geni_type=cred_type, geni_version=cred_version, \ geni_value=cred) return ret # get the slice members (urn, email) and their public ssh keys and # slice role def get_members_of_slice(self, slice_urn): raise NotImplementedError('get_members_of_slice') # get the members (urn, email) and their role in the project def get_members_of_project(self, project_name): '''Look up members of the project with the given name. Return is a list of member dictionaries containing PROJECT_MEMBER (URN), EMAIL, PROJECT_MEMBER_UID, and PROJECT_ROLE. ''' raise NotImplementedError('get_members_of_project') # add a new member to a slice (giving them rights to get a slice credential) def add_member_to_slice(self, slice_urn, member_name, role = 'MEMBER'): raise NotImplementedError('add_member_to_slice') # remove a member from a slice def remove_member_from_slice(self, slice_urn, member_name): raise NotImplementedError('remove_member_from_slice') # Record new slivers at the CH database # write new sliver_info to the database using chapi # Manifest is the XML when using APIv1&2 and none otherwise # expiration is the slice expiration # slivers is the return struct from APIv3+ or None # If am_urn is not provided, infer it from the url # If both are not provided, infer the AM from the sliver URNs def create_sliver_info(self, manifest, slice_urn, aggregate_url, expiration, slivers, am_urn): raise NotImplementedError('create_sliver_info') # use the CH database to convert an aggregate url to the corresponding urn def lookup_agg_urn_by_url(self, agg_url): raise NotImplementedError('lookup_agg_urn_by_url') # given the slice urn and aggregate urn, find the associated sliver urns from the CH db # Return an empty list if none found def list_sliverinfo_urns(self, slice_urn, aggregate_urn): raise NotImplementedError('list_sliverinfo_urns') # update the expiration time for a sliver recorded at the CH, # If we get an argument error indicating the sliver was not yet recorded, try # to record it def update_sliver_info(self, aggregate_urn, slice_urn, sliver_urn, expiration): raise NotImplementedError('update_sliver_info') # delete the sliver from the CH database of slivers in a slice def delete_sliver_info(self, sliver_urn): raise NotImplementedError('delete_sliver_info') # Find all slivers the SA lists for the given slice # Return a struct by AM URN containing a struct: sliver_urn = sliver info struct # Compare with list_sliverinfo_urns which only returns the sliver URNs def list_sliver_infos_for_slice(self, slice_urn): return {}
python
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import absolute_import import sys import random import numpy as np from utils.rank_io import * from layers import DynamicMaxPooling import scipy.sparse as sp import inputs class PairBasicGenerator(object): def __init__(self, data_root, config): self.__name = 'PairBasicGenerator' self.config = config rel_file = data_root + config['relation_file'] self.rel = read_relation(filename=rel_file) self.batch_size = config['batch_size'] self.check_list = ['relation_file', 'batch_size'] self.point = 0 if config['use_iter']: self.pair_list_iter = self.make_pair_iter(self.rel) self.pair_list = [] else: self.pair_list = self.make_pair_static(self.rel) self.pair_list_iter = None def check(self): for e in self.check_list: if e not in self.config: print('[%s] Error %s not in config' % (self.__name, e), end='\n') return False return True def make_pair_static(self, rel): rel_set = {} pair_list = [] for label, d1, d2 in rel: if d1 not in rel_set: rel_set[d1] = {} if label not in rel_set[d1]: rel_set[d1][label] = [] rel_set[d1][label].append(d2) for d1 in rel_set: label_list = sorted(rel_set[d1].keys(), reverse = True) for hidx, high_label in enumerate(label_list[:-1]): for low_label in label_list[hidx+1:]: for high_d2 in rel_set[d1][high_label]: for low_d2 in rel_set[d1][low_label]: pair_list.append( (d1, high_d2, low_d2) ) print('Pair Instance Count:', len(pair_list), end='\n') return pair_list def make_pair_iter(self, rel): rel_set = {} pair_list = [] for label, d1, d2 in rel: if d1 not in rel_set: rel_set[d1] = {} if label not in rel_set[d1]: rel_set[d1][label] = [] rel_set[d1][label].append(d2) while True: rel_set_sample = random.sample(rel_set.keys(), self.config['query_per_iter']) for d1 in rel_set_sample: label_list = sorted(rel_set[d1].keys(), reverse = True) for hidx, high_label in enumerate(label_list[:-1]): for low_label in label_list[hidx+1:]: for high_d2 in rel_set[d1][high_label]: for low_d2 in rel_set[d1][low_label]: pair_list.append( (d1, high_d2, low_d2) ) yield pair_list def get_batch_static(self): pass def get_batch_iter(self): pass def get_batch(self): if self.config['use_iter']: return next(self.batch_iter) else: return self.get_batch_static() def get_batch_generator(self): pass @property def num_pairs(self): return len(self.pair_list) def reset(self): self.point = 0 class PairBasicGenerator_linear(object): def __init__(self, data_root, config): self.__name = 'PairBasicGenerator_linear' self.config = config rel_file = data_root + config['relation_file'] self.rel = read_relation_linear(filename = rel_file) self.batch_size = config['batch_size'] self.check_list = ['relation_file', 'batch_size'] self.point = 0 if config['use_iter']: self.pair_list_iter = self.make_pair_iter_linear(self.rel) self.pair_list = [] else: self.pair_list = self.make_pair_static_linear(self.rel) self.pair_list_iter = None def check(self): for e in self.check_list: if e not in self.config: print('[%s] Error %s not in config' % (self.__name, e), end='\n') return False return True def make_pair_static_linear(self, rel): rel_set = {} pair_list = [] for label, d1, d2, d3, d4 in rel: if d1 not in rel_set: rel_set[d1] = {} if label not in rel_set[d1]: rel_set[d1][label] = [] rel_set[d1][label].append((d2, d3, d4)) for d1 in rel_set: label_list = sorted(rel_set[d1].keys(), reverse = True) for hidx, high_label in enumerate(label_list[:-1]): for low_label in label_list[hidx+1:]: for high_tuple in rel_set[d1][high_label]: for low_tuple in rel_set[d1][low_label]: high_d2 = high_tuple[0] high_d3 = high_tuple[1] high_d4 = high_tuple[2] low_d2 = low_tuple[0] low_d3 = low_tuple[1] low_d4 = low_tuple[2] pair_list.append((d1, high_d2, high_d3, high_d4, low_d2, low_d3, low_d4)) print('Pair Instance Count:', len(pair_list), end='\n') return pair_list def make_pair_iter_linear(self, rel): rel_set = {} pair_list = [] for label, d1, d2, d3, d4 in rel: if d1 not in rel_set: rel_set[d1] = {} if label not in rel_set[d1]: rel_set[d1][label] = [] rel_set[d1][label].append((d2, d3, d4)) while True: rel_set_sample = random.sample(rel_set.keys(), self.config['query_per_iter']) for d1 in rel_set_sample: label_list = sorted(rel_set[d1].keys(), reverse = True) for hidx, high_label in enumerate(label_list[:-1]): for low_label in label_list[hidx+1:]: for high_tuple in rel_set[d1][high_label]: for low_tuple in rel_set[d1][low_label]: high_d2 = high_tuple[0] high_d3 = high_tuple[1] high_d4 = high_tuple[2] low_d2 = low_tuple[0] low_d3 = low_tuple[1] low_d4 = low_tuple[2] pair_list.append((d1, high_d2, high_d3, high_d4, low_d2, low_d3, low_d4)) yield pair_list def get_batch_static(self): pass def get_batch_iter(self): pass def get_batch(self): if self.config['use_iter']: return next(self.batch_iter) else: return self.get_batch_static() def get_batch_generator(self): pass @property def num_pairs(self): return len(self.pair_list) def reset(self): self.point = 0 class PairGenerator(PairBasicGenerator): def __init__(self, data_root, config): super(PairGenerator, self).__init__(data_root, config=config) self.__name = 'PairGenerator' self.config = config self.data1 = config['data1'] self.data2 = config['data2'] self.data1_maxlen = config['text1_maxlen'] self.data2_maxlen = config['text2_maxlen'] self.fill_word = config['vocab_size'] - 1 self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen']) if config['use_iter']: self.batch_iter = self.get_batch_iter() if not self.check(): raise TypeError('[PairGenerator] parameter check wrong.') print('[PairGenerator] init done', end='\n') def get_batch_static(self): X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32) X1_len = np.zeros((self.batch_size*2,), dtype=np.int32) X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32) X2_len = np.zeros((self.batch_size*2,), dtype=np.int32) Y = np.zeros((self.batch_size*2,), dtype=np.int32) Y[::2] = 1 X1[:] = self.fill_word X2[:] = self.fill_word for i in range(self.batch_size): d1, d2p, d2n = random.choice(self.pair_list) d1_cont = list(self.data1[d1]) d2p_cont = list(self.data2[d2p]) d2n_cont = list(self.data2[d2n]) d1_len = min(self.data1_maxlen, len(d1_cont)) d2p_len = min(self.data2_maxlen, len(d2p_cont)) d2n_len = min(self.data2_maxlen, len(d2n_cont)) X1[i*2, :d1_len], X1_len[i*2] = d1_cont[:d1_len], d1_len X2[i*2, :d2p_len], X2_len[i*2] = d2p_cont[:d2p_len], d2p_len X1[i*2+1, :d1_len], X1_len[i*2+1] = d1_cont[:d1_len], d1_len X2[i*2+1, :d2n_len], X2_len[i*2+1] = d2n_cont[:d2n_len], d2n_len return X1, X1_len, X2, X2_len, Y def get_batch_iter(self): while True: self.pair_list = next(self.pair_list_iter) for _ in range(self.config['batch_per_iter']): X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32) X1_len = np.zeros((self.batch_size*2,), dtype=np.int32) X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32) X2_len = np.zeros((self.batch_size*2,), dtype=np.int32) Y = np.zeros((self.batch_size*2,), dtype=np.int32) Y[::2] = 1 X1[:] = self.fill_word X2[:] = self.fill_word for i in range(self.batch_size): d1, d2p, d2n = random.choice(self.pair_list) d1_len = min(self.data1_maxlen, len(list(self.data1[d1]))) d2p_len = min(self.data2_maxlen, len(list(self.data2[d2p]))) d2n_len = min(self.data2_maxlen, len(list(self.data2[d2n]))) X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len yield X1, X1_len, X2, X2_len, Y def get_batch_generator(self): while True: X1, X1_len, X2, X2_len, Y = self.get_batch() if self.config['use_dpool']: yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'dpool_index': DynamicMaxPooling.dynamic_pooling_index(X1_len, X2_len, self.config['text1_maxlen'], self.config['text2_maxlen'])}, Y) else: yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y) class Triletter_PairGenerator(PairBasicGenerator): def __init__(self, data_root, config): super(Triletter_PairGenerator, self).__init__(data_root, config=config) self.__name = 'Triletter_PairGenerator' self.data1 = config['data1'] self.data2 = config['data2'] self.dtype = config['dtype'].lower() if self.dtype == 'cdssm': self.data1_maxlen = config['text1_maxlen'] self.data2_maxlen = config['text2_maxlen'] self.vocab_size = config['vocab_size'] self.fill_word = self.vocab_size - 1 self.check_list.extend(['data1', 'data2', 'dtype', 'vocab_size', 'word_triletter_map_file']) if config['use_iter']: self.batch_iter = self.get_batch_iter() if not self.check(): raise TypeError('[Triletter_PairGenerator] parameter check wrong.') self.word_triletter_map = self.read_word_triletter_map(data_root + self.config['word_triletter_map_file']) print('[Triletter_PairGenerator] init done', end='\n') def read_word_triletter_map(self, wt_map_file): word_triletter_map = {} for line in open(wt_map_file): r = line.strip().split() word_triletter_map[int(r[0])] = list(map(int, r[1:])) return word_triletter_map def map_word_to_triletter(self, words): triletters = [] for wid in words: triletters.extend(self.word_triletter_map[wid]) return triletters def transfer_feat2sparse(self, dense_feat): data = [] indices = [] indptr = [0] for feat in dense_feat: for val in feat: indices.append(val) data.append(1) indptr.append(indptr[-1] + len(feat)) res = sp.csr_matrix((data, indices, indptr), shape=(len(dense_feat), self.vocab_size), dtype="float32") return sp.csr_matrix((data, indices, indptr), shape=(len(dense_feat), self.vocab_size), dtype="float32") def transfer_feat2fixed(self, feats, max_len, fill_val): num_feat = len(feats) nfeat = np.zeros((num_feat, max_len), dtype=np.int32) nfeat[:] = fill_val for i in range(num_feat): rlen = min(max_len, len(feats[i])) nfeat[i,:rlen] = feats[i][:rlen] return nfeat def get_batch_static(self): X1_len = np.zeros((self.batch_size*2,), dtype=np.int32) X2_len = np.zeros((self.batch_size*2,), dtype=np.int32) Y = np.zeros((self.batch_size*2,), dtype=np.int32) Y[::2] = 1 X1, X2 = [], [] for i in range(self.batch_size): d1, d2p, d2n = random.choice(self.pair_list) d1_len = len(list(self.data1[d1])) d2p_len = len(list(self.data2[d2p])) d2n_len = len(list(self.data2[d2n])) X1_len[i*2], X1_len[i*2+1] = d1_len, d1_len X2_len[i*2], X2_len[i*2+1] = d2p_len, d2n_len X1.append(self.map_word_to_triletter(self.data1[d1])) X1.append(self.map_word_to_triletter(self.data1[d1])) X2.append(self.map_word_to_triletter(self.data2[d2p])) X2.append(self.map_word_to_triletter(self.data2[d2n])) if self.dtype == 'dssm': return self.transfer_feat2sparse(X1).toarray(), X1_len, self.transfer_feat2sparse(X2).toarray(), X2_len, Y elif self.dtype == 'cdssm': return self.transfer_feat2fixed(X1, self.data1_maxlen, self.fill_word), X1_len, \ self.transfer_feat2fixed(X2, self.data2_maxlen, self.fill_word), X2_len, Y def get_batch_iter(self): while True: self.pair_list = next(self.pair_list_iter) for _ in range(self.config['batch_per_iter']): X1_len = np.zeros((self.batch_size*2,), dtype=np.int32) X2_len = np.zeros((self.batch_size*2,), dtype=np.int32) Y = np.zeros((self.batch_size*2,), dtype=np.int32) Y[::2] = 1 X1, X2 = [], [] for i in range(self.batch_size): d1, d2p, d2n = random.choice(self.pair_list) d1_cont = list(self.data1[d1]) d2p_cont = list(self.data2[d2p]) d2n_cont = list(self.data2[d2n]) d1_len = len(d1_cont) d2p_len = len(d2p_cont) d2n_len = len(d2n_cont) X1_len[i*2], X1_len[i*2+1] = d1_len, d1_len X2_len[i*2], X2_len[i*2+1] = d2p_len, d2n_len X1.append(self.map_word_to_triletter(d1_cont)) X1.append(self.map_word_to_triletter(d1_cont)) X2.append(self.map_word_to_triletter(d2p_cont)) X2.append(self.map_word_to_triletter(d2n_cont)) if self.dtype == 'dssm': yield self.transfer_feat2sparse(X1).toarray(), X1_len, self.transfer_feat2sparse(X2).toarray(), X2_len, Y elif self.dtype == 'cdssm': yield self.transfer_feat2fixed(X1, self.data1_maxlen, self.fill_word), X1_len, \ self.transfer_feat2fixed(X2, self.data2_maxlen, self.fill_word), X2_len, Y def get_batch_generator(self): while True: X1, X1_len, X2, X2_len, Y = self.get_batch() yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y) class DRMM_PairGenerator(PairBasicGenerator): def __init__(self, data_root, config): super(DRMM_PairGenerator, self).__init__(data_root, config=config) self.__name = 'DRMM_PairGenerator' self.data1 = config['data1'] self.data2 = config['data2'] self.data1_maxlen = config['text1_maxlen'] #self.data2_maxlen = config['text2_maxlen'] self.embed = config['embed'] if 'bin_num' in config: self.hist_size = config['bin_num'] else: self.hist_size = config['hist_size'] self.fill_word = config['vocab_size'] - 1 self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'embed']) self.use_hist_feats = False if 'hist_feats_file' in config: hist_feats = read_features_without_id(data_root + config['hist_feats_file']) self.hist_feats = {} for idx, (label, d1, d2) in enumerate(self.rel): self.hist_feats[(d1, d2)] = hist_feats[idx] self.use_hist_feats = True if config['use_iter']: self.batch_iter = self.get_batch_iter() if not self.check(): raise TypeError('[DRMM_PairGenerator] parameter check wrong.') print('[DRMM_PairGenerator] init done', end='\n') def cal_hist(self, t1, t2, data1_maxlen, hist_size): mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32) t1_cont = list(self.data1[t1]) t2_cont = list(self.data2[t2]) d1len = len(t1_cont) if self.use_hist_feats: assert (t1, t2) in self.hist_feats curr_pair_feats = list(self.hist_feats[(t1, t2)]) caled_hist = np.reshape(curr_pair_feats, (d1len, hist_size)) if d1len < data1_maxlen: mhist[:d1len, :] = caled_hist[:, :] else: mhist[:, :] = caled_hist[:data1_maxlen, :] else: t1_rep = self.embed[t1_cont] t2_rep = self.embed[t2_cont] mm = t1_rep.dot(np.transpose(t2_rep)) for (i,j), v in np.ndenumerate(mm): if i >= data1_maxlen: break vid = int((v + 1.) / 2. * ( hist_size - 1.)) mhist[i][vid] += 1. mhist += 1. mhist = np.log10(mhist) return mhist def get_batch_static(self): X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32) X1_len = np.zeros((self.batch_size*2,), dtype=np.int32) X2 = np.zeros((self.batch_size*2, self.data1_maxlen, self.hist_size), dtype=np.float32) X2_len = np.zeros((self.batch_size*2,), dtype=np.int32) Y = np.zeros((self.batch_size*2,), dtype=np.int32) Y[::2] = 1 X1[:] = self.fill_word for i in range(self.batch_size): d1, d2p, d2n = random.choice(self.pair_list) d1_cont = list(self.data1[d1]) d2p_cont = list(self.data2[d2p]) d2n_cont = list(self.data2[d2n]) d1_len = min(self.data1_maxlen, len(d1_cont)) d2p_len = len(d2p_cont) d2n_len = len(d2n_cont) X1[i*2, :d1_len], X1_len[i*2] = d1_cont[:d1_len], d1_len X1[i*2+1, :d1_len], X1_len[i*2+1] = d1_cont[:d1_len], d1_len X2[i*2], X2_len[i*2] = self.cal_hist(d1, d2p, self.data1_maxlen, self.hist_size), d2p_len X2[i*2+1], X2_len[i*2+1] = self.cal_hist(d1, d2n, self.data1_maxlen, self.hist_size), d2n_len return X1, X1_len, X2, X2_len, Y def get_batch_iter(self): while True: self.pair_list = next(self.pair_list_iter) for _ in range(self.config['batch_per_iter']): X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32) X1_len = np.zeros((self.batch_size*2,), dtype=np.int32) X2 = np.zeros((self.batch_size*2, self.data1_maxlen, self.hist_size), dtype=np.float32) X2_len = np.zeros((self.batch_size*2,), dtype=np.int32) Y = np.zeros((self.batch_size*2,), dtype=np.int32) Y[::2] = 1 X1[:] = self.fill_word #X2[:] = 0. for i in range(self.batch_size): d1, d2p, d2n = random.choice(self.pair_list) d1_cont = list(self.data1[d1]) d2p_cont = list(self.data2[d2p]) d2n_cont = list(self.data2[d2n]) d1_len = min(self.data1_maxlen, len(d1_cont)) d2p_len = len(d2p_cont) d2n_len = len(d2n_cont) X1[i*2, :d1_len], X1_len[i*2] = d1_cont[:d1_len], d1_len X1[i*2+1, :d1_len], X1_len[i*2+1] = d1_cont[:d1_len], d1_len X2[i*2], X2_len[i*2] = self.cal_hist(d1, d2p, self.data1_maxlen, self.hist_size), d2p_len X2[i*2+1], X2_len[i*2+1] = self.cal_hist(d1, d2n, self.data1_maxlen, self.hist_size), d2n_len yield X1, X1_len, X2, X2_len, Y def get_batch_generator(self): while True: X1, X1_len, X2, X2_len, Y = self.get_batch() yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y) class DRMM_PairGenerator_linear(PairBasicGenerator_linear): def __init__(self, data_root, config): super(DRMM_PairGenerator_linear, self).__init__(data_root, config=config) self.__name = 'DRMM_PairGenerator_linear' self.data1 = config["data1"] self.data2 = config["data2"] self.data3 = config["data3"] self.data4 = config["data4"] self.data1_maxlen = config['text1_maxlen'] self.embed = config['embed'] if 'bin_num' in config: self.hist_size = config['bin_num'] else: self.hist_size = config['hist_size'] self.fill_word = config['vocab_size'] - 1 self.check_list.extend(['data1', 'data2', "data3", "data4", 'text1_maxlen', 'embed']) self.use_hist_feats = False if 'hist_feats_file_title' in config: hist_feats_title = read_features_without_id(data_root + config["hist_feats_file_title"]) hist_feats_question = read_features_without_id(data_root + config["hist_feats_file_question"]) hist_feats_answer = read_features_without_id(data_root + config["hist_feats_file_answer"]) self.hist_feats_title = {} self.hist_feats_question = {} self.hist_feats_answer = {} for idx, (label, d1, d2, d3, d4) in enumerate(self.rel): self.hist_feats_title[(d1, d2)] = hist_feats_title[idx] self.hist_feats_question[(d1, d3)] = hist_feats_question[idx] self.hist_feats_answer[(d1, d4)] = hist_feats_answer[idx] self.use_hist_feats = True if config['use_iter']: self.batch_iter = self.get_batch_iter() if not self.check(): raise TypeError('[DRMM_PairGenerator_linear] parameter check wrong.') print('[DRMM_PairGenerator_linear] init done', end='\n') def cal_hist(self, t1, t2, thisdata2, thishistfeats_variable, data1_maxlen, hist_size): mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32) t1_cont = list(self.data1[t1]) thist2_cont = list(thisdata2[t2]) d1len = len(t1_cont) if self.use_hist_feats: assert (t1, t2) in thishistfeats_variable curr_pair_feats = list(thishistfeats_variable[(t1, t2)]) caled_hist = np.reshape(curr_pair_feats, (d1len, hist_size)) if d1len < data1_maxlen: mhist[:d1len, :] = caled_hist[:, :] else: mhist[:, :] = caled_hist[:data1_maxlen, :] else: t1_rep = self.embed[t1_cont] t2_rep = self.embed[thist2_cont] mm = t1_rep.dot(np.transpose(t2_rep)) for (i,j), v in np.ndenumerate(mm): if i >= data1_maxlen: break vid = int((v + 1.) / 2. * ( hist_size - 1.)) mhist[i][vid] += 1. mhist += 1. mhist = np.log10(mhist) return mhist def get_batch_static(self): X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32) X1_len = np.zeros((self.batch_size*2,), dtype=np.int32) X2 = np.zeros((self.batch_size*2, self.data1_maxlen, self.hist_size), dtype=np.float32) X2_len = np.zeros((self.batch_size*2,), dtype=np.int32) X3 = np.zeros((self.batch_size*2, self.data1_maxlen, self.hist_size), dtype=np.float32) X3_len = np.zeros((self.batch_size*2,), dtype=np.int32) X4 = np.zeros((self.batch_size*2, self.data1_maxlen, self.hist_size), dtype=np.float32) X4_len = np.zeros((self.batch_size*2,), dtype=np.int32) Y = np.zeros((self.batch_size*2,), dtype=np.int32) Y[::2] = 1 X1[:] = self.fill_word for i in range(self.batch_size): d1, d2p, d3p, d4p, d2n, d3n, d4n = random.choice(self.pair_list) d1_cont = list(self.data1[d1]) d2p_cont = list(self.data2[d2p]) d2n_cont = list(self.data2[d2n]) d3p_cont = list(self.data3[d3p]) d3n_cont = list(self.data3[d3n]) d4p_cont = list(self.data4[d4p]) d4n_cont = list(self.data4[d4n]) d1_len = min(self.data1_maxlen, len(d1_cont)) d2p_len = len(d2p_cont) d2n_len = len(d2n_cont) d3p_len = len(d3p_cont) d3n_len = len(d3n_cont) d4p_len = len(d4p_cont) d4n_len = len(d4n_cont) X1[i*2, :d1_len], X1_len[i*2] = d1_cont[:d1_len], d1_len X1[i*2+1, :d1_len], X1_len[i*2+1] = d1_cont[:d1_len], d1_len X2[i*2], X2_len[i*2] = self.cal_hist(d1, d2p, self.data2, self.hist_feats_title, self.data1_maxlen, self.hist_size), d2p_len X2[i*2+1], X2_len[i*2+1] = self.cal_hist(d1, d2n, self.data2, self.hist_feats_title, self.data1_maxlen, self.hist_size), d2n_len X3[i*2], X3_len[i*2] = self.cal_hist(d1, d3p, self.data3, self.hist_feats_question, self.data1_maxlen, self.hist_size), d3p_len X3[i*2+1], X3_len[i*2+1] = self.cal_hist(d1, d3n, self.data3, self.hist_feats_question, self.data1_maxlen, self.hist_size), d3n_len X4[i*2], X4_len[i*2+1] = self.cal_hist(d1, d4p, self.data4, self.hist_feats_answer, self.data1_maxlen, self.hist_size), d4p_len X4[i*2+1], X4_len[i*2+1] = self.cal_hist(d1, d4n, self.data4, self.hist_feats_answer, self.data1_maxlen, self.hist_size), d4n_len return X1, X1_len, X2, X2_len, X3, X3_len, X4, X4_len, Y def get_batch_iter(self): while True: self.pair_list = next(self.pair_list_iter) for _ in range(self.config['batch_per_iter']): X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32) X1_len = np.zeros((self.batch_size*2,), dtype=np.int32) X2 = np.zeros((self.batch_size*2, self.data1_maxlen, self.hist_size), dtype=np.float32) X2_len = np.zeros((self.batch_size*2,), dtype=np.int32) X3 = np.zeros((self.batch_size*2, self.data1_maxlen, self.hist_size), dtype=np.float32) X3_len = np.zeros((self.batch_size*2,), dtype=np.int32) X4 = np.zeros((self.batch_size*2, self.data1_maxlen, self.hist_size), dtype=np.float32) X4_len = np.zeros((self.batch_size*2,), dtype=np.int32) Y = np.zeros((self.batch_size*2,), dtype=np.int32) Y[::2] = 1 X1[:] = self.fill_word #X2[:] = 0. for i in range(self.batch_size): d1, d2p, d2n, d3p, d3n, d4p, d4n = random.choice(self.pair_list) d1_cont = list(self.data1[d1]) d2p_cont = list(self.data2[d2p]) d2n_cont = list(self.data2[d2n]) d3p_cont = list(self.data3[d3p]) d3n_cont = list(self.data3[d3n]) d4p_cont = list(self.data4[d4p]) d4n_cont = list(self.data4[d4n]) d1_len = min(self.data1_maxlen, len(d1_cont)) d2p_len = len(d2p_cont) d2n_len = len(d2n_cont) d3p_len = len(d3p_cont) d3n_len = len(d3n_cont) d4p_len = len(d4p_cont) d4n_len = len(d4n_cont) X1[i*2, :d1_len], X1_len[i*2] = d1_cont[:d1_len], d1_len X1[i*2+1, :d1_len], X1_len[i*2+1] = d1_cont[:d1_len], d1_len X2[i*2], X2_len[i*2] = self.cal_hist(d1, d2p, self.data2, self.hist_feats_title, self.data1_maxlen, self.hist_size), d2p_len X2[i*2+1], X2_len[i*2+1] = self.cal_hist(d1, d2n, self.data2, self.hist_feats_title, self.data1_maxlen, self.hist_size), d2n_len X3[i*2], X3_len[i*2] = self.cal_hist(d1, d3p, self.data3, self.hist_feats_question, self.data1_maxlen, self.hist_size), d3p_len X3[i*2+1], X3_len[i*2+1] = self.cal_hist(d1, d3n, self.data3, self.hist_feats_question, self.data1_maxlen, self.hist_size), d3n_len X4[i*2], X4_len[i*2+1] = self.cal_hist(d1, d4p, self.data4, self.hist_feats_answer, self.data1_maxlen, self.hist_size), d4p_len X4[i*2+1], X4_len[i*2+1] = self.cal_hist(d1, d4n, self.data4, self.hist_feats_answer, self.data1_maxlen, self.hist_size), d4n_len yield X1, X1_len, X2, X2_len, X3, X3_len, X4, X4_len, Y def get_batch_generator(self): while True: X1, X1_len, X2, X2_len, X3, X3_len, X4, X4_len, Y = self.get_batch() yield ({"query": X1, "query_len": X1_len, "title": X2, "title_len": X2_len, "question": X3, "question_len": X3_len, "answer": X4, "answer_len":X4_len}, Y) class PairGenerator_Feats(PairBasicGenerator): def __init__(self, data_root, config): super(PairGenerator_Feats, self).__init__(data_root, config=config) self.__name = 'PairGenerator' self.config = config self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen', 'pair_feat_size', 'pair_feat_file', 'query_feat_size', 'query_feat_file']) if not self.check(): raise TypeError('[PairGenerator] parameter check wrong.') self.data1 = config['data1'] self.data2 = config['data2'] self.data1_maxlen = config['text1_maxlen'] self.data2_maxlen = config['text2_maxlen'] self.fill_word = config['vocab_size'] - 1 self.pair_feat_size = config['pair_feat_size'] self.query_feat_size = config['query_feat_size'] pair_feats = read_features_without_id(config['pair_feat_file']) self.query_feats = read_features_with_id(config['query_feat_file']) self.pair_feats = {} for idx, (label, d1, d2) in enumerate(self.rel): self.pair_feats[(d1, d2)] = pair_feats[idx] if config['use_iter']: self.batch_iter = self.get_batch_iter() print('[PairGenerator] init done', end='\n') def get_batch_static(self): X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32) X1_len = np.zeros((self.batch_size*2,), dtype=np.int32) X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32) X2_len = np.zeros((self.batch_size*2,), dtype=np.int32) X3 = np.zeros((self.batch_size * 2, self.pair_feat_size), dtype=np.float32) X4 = np.zeros((self.batch_size * 2, self.query_feat_size), dtype=np.float32) Y = np.zeros((self.batch_size*2,), dtype=np.int32) Y[::2] = 1 X1[:] = self.fill_word X2[:] = self.fill_word for i in range(self.batch_size): d1, d2p, d2n = random.choice(self.pair_list) d1_len = min(self.data1_maxlen, len(self.data1[d1])) d2p_len = min(self.data2_maxlen, len(self.data2[d2p])) d2n_len = min(self.data2_maxlen, len(self.data2[d2n])) X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len X3[i*2, :self.pair_feat_size] = self.pair_feats[(d1, d2p)][:self.pair_feat_size] X4[i*2, :self.query_feat_size] = self.query_feats[d1][:self.query_feat_size] X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len X3[i*2+1, :self.pair_feat_size] = self.pair_feats[(d1, d2n)][:self.pair_feat_size] X4[i*2+1, :self.query_feat_size] = self.query_feats[d1][:self.query_feat_size] return X1, X1_len, X2, X2_len, X3, X4, Y def get_batch_iter(self): while True: self.pair_list = next(self.pair_list_iter) for _ in range(self.config['batch_per_iter']): X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32) X1_len = np.zeros((self.batch_size*2,), dtype=np.int32) X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32) X2_len = np.zeros((self.batch_size*2,), dtype=np.int32) X3 = np.zeros((self.batch_size*2, self.pair_feat_size), dtype=np.float32) X4 = np.zeros((self.batch_size*2, self.query_feat_size), dtype=np.int32) Y = np.zeros((self.batch_size*2,), dtype=np.int32) Y[::2] = 1 X1[:] = self.fill_word X2[:] = self.fill_word for i in range(self.batch_size): d1, d2p, d2n = random.choice(self.pair_list) d1_len = min(self.data1_maxlen, len(self.data1[d1])) d2p_len = min(self.data2_maxlen, len(self.data2[d2p])) d2n_len = min(self.data2_maxlen, len(self.data2[d2n])) X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len X3[i*2, :self.pair_feat_size] = self.pair_feats[(d1, d2p)][:self.pair_feat_size] X4[i*2, :d1_len] = self.query_feats[d1][:self.query_feat_size] X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len X3[i*2+1, :self.pair_feat_size] = self.pair_feats[(d1, d2n)][:self.pair_feat_size] X4[i*2+1, :d1_len] = self.query_feats[d1][:self.query_feat_size] yield X1, X1_len, X2, X2_len, X3, X4, Y def get_batch_generator(self): while True: X1, X1_len, X2, X2_len, X3, X4, Y = self.get_batch() yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'query_feats': X4, 'pair_feats': X3}, Y)
python
""" Copyright (C) 2018, AIMLedge Pte, Ltd. All rights reserved. """ import pickle import os import face_recognition import cv2 import numpy as np from face_recognizer import FaceRecognizer, logger from scipy.spatial import distance FACE_REGISTRY_PATH = os.path.join(os.path.expanduser('~'), '.config/face-recognition') class EdgeFaceRecognizer(FaceRecognizer): def __init__(self): logger.info('Creating edge face recognizer.') self._registry_faces = [] self._registry_face_names = [] self._registry_face_ids = [] self._registry_face_encodings = [] self._image_scale = 1.0 self._num_upsamples = 2 self._face_detector_type = 'cnn' # hog or 'cnn' self._matching_thr = 0.1 if not os.path.exists(FACE_REGISTRY_PATH): logger.info('Creating face registry at {}'.format(FACE_REGISTRY_PATH)) os.makedirs(FACE_REGISTRY_PATH) self._face_registries = self.list_face_registries() self._active_face_registry = None def create_face_registry(self, registry_name): registry_path = self._get_face_registry_path(registry_name) if os.path.exists(registry_path): logger.info('Face registry already present. Not creating again') else: self._face_registries.append(registry_name) open(registry_path, 'w').close() return registry_name def delete_face_registry(self, registry_name): if registry_name not in self._face_registries: logger.warning('Looks like there is no such registry to delete.'.format( registry_name)) raise ValueError('No such face registry {}'.format(registry_name)) else: registry_path = self._get_face_registry_path(registry_name) os.remove(registry_path) if registry_name == self._active_face_registry: self._registry_face_names = [] self._registry_faces = [] self._registry_face_ids = [] self._registry_face_encodings = [] self._active_face_registry = None logger.info('Removed face registry {}'.format(registry_name)) return registry_name def get_active_face_registry(self): return self._active_face_registry def set_active_face_registry(self, registry_name): if registry_name not in self._face_registries: raise ValueError('Face registry not found {}'.format(registry_name)) # Nothing to do logger.info('Setting active face registry to {}'.format(registry_name)) if self._active_face_registry == registry_name: return registry_name self._load_face_registry(registry_name) self._active_face_registry = registry_name return self._active_face_registry def list_face_registries(self): registry_names = [] for reg_path in os.listdir(FACE_REGISTRY_PATH): file_ext = os.path.basename(reg_path).split('.')[-1] if file_ext == 'pkl': registry_names.append(os.path.basename(reg_path).split('.')[0]) return registry_names def face_registry_details(self, registry_name): if registry_name != self._active_face_registry: raise NotImplementedError('Only able to give active face registry') num_faces = len(self._registry_face_ids) for idx in range(num_faces): yield self._registry_face_ids[idx], self._registry_face_names[idx], \ self._registry_faces[idx] def register_face(self, registry_name, image, name): if registry_name not in self._face_registries: raise ValueError('No such face registry {}'.format(registry_name)) if isinstance(image, str): image = face_recognition.load_image_file(image) face_boxes = face_recognition.face_locations( image, number_of_times_to_upsample=self._num_upsamples, model='cnn') if len(face_boxes) == 0: logger.warning('No faces found in the image') return None elif len(face_boxes) == 1: target_face_box = face_boxes[0] logger.info('Found one face in the image {}'.format(target_face_box)) else: target_face_box = EdgeFaceRecognizer._get_largest_face(face_boxes) logger.info('Found multiple faces in the image. Taking the largest one {}' ''.format(target_face_box)) face_crop = image[target_face_box[0]:target_face_box[2], target_face_box[3]:target_face_box[1], :] encoding = face_recognition.face_encodings(image, known_face_locations=[target_face_box]) new_face_id = self._get_new_face_id() if registry_name != self._active_face_registry: active_reg = self._active_face_registry self._load_face_registry(registry_name) assert registry_name == self._active_face_registry self._registry_faces.append(face_crop) self._registry_face_names.append(name) assert len(encoding) == 1 self._registry_face_encodings.append(encoding[0]) self._registry_face_ids.append(new_face_id) self._save_active_face_registry() # Restore active registry if registry_name != self._active_face_registry: self._load_face_registry(active_reg) return new_face_id def recognize_faces(self, image): resized_image = cv2.resize(image, (0, 0), fx=self._image_scale, fy=self._image_scale) resized_image = resized_image[:, :, ::-1] # Returned face locations are [top(y1), right(x2), bottom(y2), left(x1)] face_locations = face_recognition.face_locations( resized_image, number_of_times_to_upsample=self._num_upsamples, model=self._face_detector_type) if len(face_locations) == 0: return [] face_encodings = face_recognition.face_encodings(resized_image, face_locations) face_encodings = np.array(face_encodings) # rescale face boxes and re-arrange the points in the (x1, x2, y1, # y2) order. detected_face_ids, detected_face_names, recognition_scores = self._match( face_encodings) face_locations = (np.array(face_locations) / self._image_scale).astype( np.int32) if face_locations.shape[0] > 0: face_locations[:, [0, 1, 2, 3]] = face_locations[:, [3, 0, 1, 2]] face_locations = list(map(tuple, face_locations)) output = [] for i in range(len(detected_face_names)): output.append({'face_id': detected_face_ids[i], 'face_name': detected_face_names[i], 'box': face_locations[i], 'detection_score': 1.0, 'recognition_score': recognition_scores[i] } ) return output def deregister_face(self, registry_name, face_id): raise NotImplementedError('Feature not implemented.') def get_face_name(self, registry_name, face_id): if registry_name != self._active_face_registry: raise ValueError('Registry must be active in order to get name') if face_id in self._registry_face_ids: return self._registry_face_names[self._registry_face_ids.index(face_id)] else: raise ValueError('No such face ID') def _find_best_match(self, face_encoding): found = False norm_dist = face_recognition.face_distance(self._registry_face_encodings, face_encoding) closest_match_idx = np.argmin(norm_dist) closest_match_conf = norm_dist[closest_match_idx] if closest_match_conf <= self._matching_thr: found = True return found, closest_match_idx, closest_match_conf def _match(self, face_encodings): assert len(self._registry_face_encodings) > 0 gallary = np.array(self._registry_face_encodings) dist_mat = distance.cdist(gallary, face_encodings, metric='cosine') rows = dist_mat.min(axis=1).argsort() cols = dist_mat.argmin(axis=1)[rows] used_rows = set() used_cols = set() all_face_ids = [-1 for i in range(len(face_encodings))] all_face_names = ['Unknown' for i in range(len(face_encodings))] all_scores = [0 for i in range(len(face_encodings))] for (row, col) in zip(rows, cols): if row in used_rows or col in used_cols: continue if dist_mat[row, col] > self._matching_thr: continue all_face_ids[col] = self._registry_face_ids[row] all_face_names[col] = self._registry_face_names[row] all_scores[col] = (1 - dist_mat[row, col]) * 100 used_rows.add(row) used_cols.add(col) return all_face_ids, all_face_names, all_scores def _get_face_registry_path(self, registry_name): """ :param registry_name: :return: """ return os.path.join(FACE_REGISTRY_PATH, registry_name + '.pkl') def _load_face_registry(self, registry_name): reg_path = self._get_face_registry_path(registry_name) if os.path.exists(reg_path): with open(reg_path, 'rb') as f: try: data = pickle.load(f) self._registry_face_encodings = data['face_encodings'] self._registry_faces = data['face_images'] self._registry_face_names = data['face_names'] self._registry_face_ids = data['face_ids'] self._active_face_registry = registry_name logger.info('Loaded face registry {}. Set it as active face ' 'registry'.format(registry_name)) except Exception as e: logger.warning('Falied to load the face registry {}'.format(e)) def _save_active_face_registry(self): registry_path = self._get_face_registry_path(self._active_face_registry) with open(registry_path, 'wb') as f: pickle.dump({'face_ids': self._registry_face_ids, 'face_names': self._registry_face_names, 'face_images': self._registry_faces, 'face_encodings': self._registry_face_encodings }, f) logger.info('Saved active face registry') def _get_new_face_id(self): return len(self._registry_face_ids) @staticmethod def _get_largest_face(face_boxes): """ :param face_boxes: List of (top, right, bottom , left) :return: """ face_areas = [] for face_box in face_boxes: area = (face_box[1] - face_box[3]) * (face_box[2] - face_box[0]) face_areas.append(area) face_areas = np.array(face_areas) largest_idx = np.argmax(face_areas) return face_boxes[largest_idx]
python
from typing import Any from django.contrib.auth.models import Group from django.test import TestCase from pgq.decorators import task, JobMeta from pgq.models import Job from pgq.queue import AtLeastOnceQueue, AtMostOnceQueue, Queue class PgqDecoratorsTests(TestCase): def test_using_task_decorator_to_add_to_queue(self) -> None: """ The task decorator makes a celery-like task object which can be used for adding tasks to the queue and registering the task to the queue. """ queue = AtLeastOnceQueue(tasks={}) @task(queue) def demotask(queue: Queue, job: Job, args: Any, meta: JobMeta) -> int: return job.id demotask.enqueue({"count": 5}) self.assertIn("demotask", queue.tasks) queue.run_once() def test_atleastonce_retry_during_database_failure(self) -> None: """ Force a database error in the task. Check that it was retried. """ queue = AtLeastOnceQueue(tasks={}) @task(queue, max_retries=2) def failuretask(queue: Queue, job: Job, args: Any, meta: JobMeta) -> None: # group has max 150 chars for its name. Group.objects.create(name="!" * 151) return None failuretask.enqueue({}) originaljob = Job.objects.all()[0] queue.run_once() retryjob = Job.objects.all()[0] self.assertNotEqual(originaljob.id, retryjob.id) self.assertEqual(retryjob.args["meta"]["retries"], 1)
python
"""Mapping Vector Field of Single Cells """ from .estimation import *
python
from machine import I2C, Pin from sh1106 import SH1106_I2C import random from time import sleep # Options ROUND_WORLD = True # if True object can move around edges, if False edge is treated as an empty cell USE_USER_SEED = False # if True USER_SEED will be used to settle cells on world map, if False random seed will be generated USER_SEED = 553443 # seed for the initial colony of cells BACKGROUND_COLOUR = 0 LIVE_CELL_COLOUR = 1 SIZE_OF_INITIAL_COLONY = 0.4 # where 1 is the whole map UPDATE_DELAY = 0 # additional delay between population updates # Constants WORLD_WIDTH = 64 # number of cells horizontally WORLD_HEIGHT = 32 # number of cells vertically CELL_SIZE = 2 # side of single cell in pixels CENTER_X = int(WORLD_WIDTH / 2) CENTER_Y = int(WORLD_HEIGHT / 2) # Variables cells = [] # array where Cell objects will be stored # Init oled display i2c = I2C(1, scl=Pin(15), sda=Pin(14)) oled = SH1106_I2C(WORLD_WIDTH * CELL_SIZE, WORLD_HEIGHT * CELL_SIZE, i2c) oled.rotate(True) class Cell: def __init__(self, x, y): self.x = x self.y = y self.live = False def change_state(self): # changes state of the cell to opposite self.live = not self.live if self.live: draw_cell(self.x, self.y, LIVE_CELL_COLOUR) else: draw_cell(self.x, self.y, BACKGROUND_COLOUR) def check_neighbours(self): self.live_neighbours = 0 x_to_check = [self.x] y_to_check = [self.y] if ROUND_WORLD: y_to_check.append((self.y - 1) % WORLD_HEIGHT) y_to_check.append((self.y + 1) % WORLD_HEIGHT) x_to_check.append((self.x - 1) % WORLD_WIDTH) x_to_check.append((self.x + 1) % WORLD_WIDTH) else: if self.y > 0: # if cell is in the row 0, it doesn't have neighbours above y_to_check.append(self.y - 1) if self.y < WORLD_HEIGHT - 1: # if cell is in the lowest row, it doesn't have neighbours below y_to_check.append(self.y + 1) if self.x > 0: # if cell is in the left column, it doesn't have neighbours from the left side x_to_check.append(self.x - 1) if self.x < WORLD_WIDTH - 1: # if cell is in the right column, it doesn't have neighbours from the right side x_to_check.append(self.x + 1) for y in y_to_check: for x in x_to_check: if y != self.y or x != self.x: if cells[x][y].live == True: self.live_neighbours += 1 def check_rules(self): if self.live == True: if self.live_neighbours < 2 or self.live_neighbours > 3: self.change_state() if self.live == False and self.live_neighbours == 3: self.change_state() # Helper function used to draw single cell def draw_cell(x, y, colour): for x_value in range(x * CELL_SIZE, x * CELL_SIZE + CELL_SIZE): for y_value in range(y * CELL_SIZE, y * CELL_SIZE + CELL_SIZE): oled.pixel(x_value, y_value, colour) # Create world filled with dead cells def create_world(): global cells for x in range(0, WORLD_WIDTH): cells.append([]) for y in range(0, WORLD_HEIGHT): cells[x].append(Cell(x, y)) # Randomize initial state def seed_world(): global cells randomized_seed = '' if USE_USER_SEED: print("User seed used: ", USER_SEED) random.seed(USER_SEED) else: for counter in range(0, 6): randomized_seed += str(random.randrange(0, 10)) print("Seed used: ", randomized_seed) random.seed(int(randomized_seed)) for y in range(int(CENTER_Y - SIZE_OF_INITIAL_COLONY * CENTER_Y), int(CENTER_Y + SIZE_OF_INITIAL_COLONY * CENTER_Y)): for x in range(int(CENTER_X - SIZE_OF_INITIAL_COLONY * CENTER_X), int(CENTER_X + SIZE_OF_INITIAL_COLONY * CENTER_X)): finger_of_god = random.randrange(0, 2) if finger_of_god == 1: cells[x][y].change_state() oled.show() # Helper function used to update state of the colony def update_colony(): for row in cells: for cell in row: cell.check_neighbours() for row in cells: for cell in row: cell.check_rules() oled.show() # Run the simulation create_world() seed_world() while True: update_colony() sleep(UPDATE_DELAY)
python
import argparse import io import json import os import sys import zipfile import jinja2 def main(): parser = argparse.ArgumentParser() parser.add_argument('-o', '--output') parser.add_argument('input') options = parser.parse_args() known_solution_tests = set() broken_tests = {} solution_tests = {} other_tests = {} with open(options.input) as event_file: for line in event_file: data = json.loads(line) if 'configured' in data: if 'testSize' in data['configured']: target = data['id']['targetConfigured']['label'] broken_tests[target] = { 'target': target, 'result': 'error', 'message': 'Test was not run', } tags = data['configured'].get('tag', []) if 'solution' in tags: known_solution_tests.add(target) if 'testResult' in data: target = data['id']['testResult']['label'] if target in known_solution_tests: for output in data['testResult']['testActionOutput']: if output['name'] == 'test.outputs__outputs.zip': assert output['uri'].startswith('file://'), output['uri'] zip_path = output['uri'][len('file://'):] break else: broken_tests[target]['message'] = 'outputs.zip not found' continue try: with zipfile.ZipFile(zip_path) as archive: with archive.open('results.json') as f: solution_tests[target] = json.load(io.TextIOWrapper(f)) broken_tests.pop(target) except IOError as e: broken_tests[target]['message'] = 'Failed to read results.json: %s' % e else: status = data['testResult']['status'] result = { 'PASSED': 'success', 'FAILED': 'failure', }.get(status, 'error') other_tests[target] = { 'target': target, 'result': result, 'message': status, } broken_tests.pop(target) judge_matrices = {} for test_target, test in sorted(solution_tests.items()): judge_target = test['judge']['target'] judge_matrix = judge_matrices.setdefault( judge_target, {'judge_target': judge_target, 'test_targets': [], 'cases': {}}) judge_matrix['test_targets'].append(test_target) for case in test['cases']: row = judge_matrix['cases'].setdefault(case['name'], {}) row[test_target] = case report = { 'broken_tests': broken_tests, 'solution_tests': solution_tests, 'judge_matrices': judge_matrices, 'other_tests': other_tests, } env = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), autoescape=True) template = env.get_template('test_results.md') html = template.render(report=report) if options.output: with open(options.output, 'w') as out: out.write(html) else: sys.stdout.write(html) if __name__ == '__main__': main()
python
__all__ = [ "assistant", "event", "error" ]
python
"""This class provides the Forward class""" import attr from ..handlers import CommandHandler, ReactionHandler from ..dataclasses import Thread, ThreadType, Message, Reaction, MessageReaction from .._i18n import _ @attr.s class Forward(object): """ This class provides a system for forwarding messages to a group. A selected account outside of a group can send a message to a group, and any of the group users can respond to it. The "send to group" command is by default called "send", and "send to user" command is by default called "respond". They can be changed by send_cmd and respond_cmd kwargs. This class provides two commands, so it has to be registered as: `bot.register(*forward.handlers())` """ _group_thread = attr.ib(converter=Thread.from_group_uid) _user_thread = attr.ib(converter=Thread.from_user_uid) _send_cmd = attr.ib(default='send') _respond_cmd = attr.ib(default='respond') def _send_fn(self, message: Message, bot_object): if message.thread != self._user_thread: message.reply(_("You can't use this command.")) return if not message.args: message.reply(_('Please provide text to be sent.')) return bot_object.send( _("Message from {user}:\n{message}").format( user=message.get_author_name(), message=message.args ), thread=self._group_thread ) message.reply(_('The message was forwarded.')) def _respond_fn(self, message: Message, bot_object): if message.thread != self._group_thread: message.reply(_("You can't use this command.")) return if not message.args: message.reply(_('Please provide text to be sent.')) return def _callback(reaction: Reaction, bot_object): if reaction.uid == message.uid: if reaction.reaction == MessageReaction.YES: bot_object.send( _("Message from {user}:\n{message}").format( user=message.get_author_name(), message=message.args ), thread=self._user_thread ) message.reply(_('The message was forwarded.')) mid = message.reply( _('Are you sure you want to send this to {user}?\n' 'Please confirm by reacting {reaction}.').format( user=bot_object.get_user_name(self._user_thread.id_), reaction=MessageReaction.YES.value ), reply=True ) bot_object.register(ReactionHandler(_callback, mid, timeout=120)) def handlers(self): """Returns a list of handlers that need to be registered""" handlers = [] handlers.append( CommandHandler(self._send_fn, self._send_cmd) ) handlers.append( CommandHandler(self._respond_fn, self._respond_cmd) ) return handlers
python
from app import app, iam_blueprint, iam_base_url, sla as sla from flask import json, current_app, render_template, request, redirect, url_for, flash, session import requests, json import yaml import io, os, sys from fnmatch import fnmatch from hashlib import md5 from functools import wraps def to_pretty_json(value): return json.dumps(value, sort_keys=True, indent=4, separators=(',', ': ')) app.jinja_env.filters['tojson_pretty'] = to_pretty_json def avatar(email, size): digest = md5(email.lower().encode('utf-8')).hexdigest() return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(digest, size) toscaDir = app.config.get('TOSCA_TEMPLATES_DIR') + "/" tosca_pars_dir = app.config.get('TOSCA_PARAMETERS_DIR') orchestratorUrl = app.config.get('ORCHESTRATOR_URL') imUrl = app.config.get('IM_URL') toscaTemplates = [] for path, subdirs, files in os.walk(toscaDir): for name in files: if fnmatch(name, "*.yml") or fnmatch(name, "*.yaml"): # skip hidden files if name[0] != '.': toscaTemplates.append( os.path.relpath(os.path.join(path, name), toscaDir )) #toscaTemplates.sort(key=str.lower) toscaInfo = {} for tosca in toscaTemplates: with io.open( toscaDir + tosca) as stream: template = yaml.full_load(stream) toscaInfo[tosca] = { "valid": True, "description": "TOSCA Template", "metadata": { "icon": "https://cdn4.iconfinder.com/data/icons/mosaicon-04/512/websettings-512.png" }, "enable_config_form": False, "inputs": {}, "tabs": {} } if 'topology_template' not in template: toscaInfo[tosca]["valid"] = False else: if 'description' in template: toscaInfo[tosca]["description"] = template['description'] if 'metadata' in template and template['metadata'] is not None: for k,v in template['metadata'].items(): toscaInfo[tosca]["metadata"][k] = v if 'icon' not in template['metadata']: toscaInfo[tosca]["metadata"]['icon'] = "xxxx" if 'inputs' in template['topology_template']: toscaInfo[tosca]['inputs'] = template['topology_template']['inputs'] ## add parameters code here tabs = {} if tosca_pars_dir: tosca_pars_path = tosca_pars_dir + "/" # this has to be reassigned here because is local. for fpath, subs, fnames in os.walk(tosca_pars_path): for fname in fnames: if fnmatch(fname, os.path.splitext(tosca)[0] + '.parameters.yml') or \ fnmatch(fname, os.path.splitext(tosca)[0] + '.parameters.yaml'): # skip hidden files if fname[0] != '.': tosca_pars_file = os.path.join(fpath, fname) with io.open(tosca_pars_file) as pars_file: toscaInfo[tosca]['enable_config_form'] = True pars_data = yaml.full_load(pars_file) toscaInfo[tosca]['inputs'] = pars_data["inputs"] if "tabs" in pars_data: toscaInfo[tosca]['tabs'] = pars_data["tabs"] app.logger.debug("Extracted TOSCA INFO: " + json.dumps(toscaInfo)) def authorized_with_valid_token(f): @wraps(f) def decorated_function(*args, **kwargs): if not iam_blueprint.session.authorized or 'username' not in session: return redirect(url_for('login')) if iam_blueprint.session.token['expires_in'] < 20: app.logger.debug("Force refresh token") iam_blueprint.session.get('/userinfo') return f(*args, **kwargs) return decorated_function @app.route('/settings') @authorized_with_valid_token def show_settings(): return render_template('settings.html', orchestrator_url=orchestratorUrl, iam_url=iam_base_url) @app.route('/login') def login(): session.clear() return render_template('home.html') @app.route('/slas') @authorized_with_valid_token def getslas(): slas={} try: access_token = iam_blueprint.token['access_token'] slas = sla.get_slas(access_token) except Exception as e: flash("Error retrieving SLAs list: \n" + str(e), 'warning') return render_template('sla.html', slas=slas) @app.route('/') def home(): if not iam_blueprint.session.authorized: return redirect(url_for('login')) account_info = iam_blueprint.session.get("/userinfo") if account_info.ok: account_info_json = account_info.json() session['username'] = account_info_json['name'] session['gravatar'] = avatar(account_info_json['email'], 26) session['organisation_name'] = account_info_json['organisation_name'] access_token = iam_blueprint.token['access_token'] return render_template('portfolio.html', templates=toscaInfo) @app.route('/deployments') @authorized_with_valid_token def showdeployments(): access_token = iam_blueprint.session.token['access_token'] headers = {'Authorization': 'bearer %s' % (access_token)} url = orchestratorUrl + "/deployments?createdBy=me&page=0&size=9999" response = requests.get(url, headers=headers) deployments = {} if not response.ok: flash("Error retrieving deployment list: \n" + response.text, 'warning') else: deployments = response.json()["content"] app.logger.debug("Deployments: " + str(deployments)) return render_template('deployments.html', deployments=deployments) @app.route('/template/<depid>') @authorized_with_valid_token def deptemplate(depid=None): access_token = iam_blueprint.session.token['access_token'] headers = {'Authorization': 'bearer %s' % (access_token)} url = orchestratorUrl + "/deployments/" + depid + "/template" response = requests.get(url, headers=headers) if not response.ok: flash("Error getting template: " + response.text) return redirect(url_for('home')) template = response.text return render_template('deptemplate.html', template=template) # @app.route('/log/<physicalId>') @authorized_with_valid_token def deplog(physicalId=None): access_token = iam_blueprint.session.token['access_token'] headers = {'Authorization': 'id = im; type = InfrastructureManager; token = %s;' % (access_token)} url = imUrl + "/infrastructures/" + physicalId + "/contmsg" response = requests.get(url, headers=headers) if not response.ok: log="Not found" else: log = response.text return render_template('deplog.html', log=log) @app.route('/delete/<depid>') @authorized_with_valid_token def depdel(depid=None): access_token = iam_blueprint.session.token['access_token'] headers = {'Authorization': 'bearer %s' % (access_token)} url = orchestratorUrl + "/deployments/" + depid response = requests.delete(url, headers=headers) if not response.ok: flash("Error deleting deployment: " + response.text); return redirect(url_for('showdeployments')) @app.route('/configure') @authorized_with_valid_token def configure(): access_token = iam_blueprint.session.token['access_token'] selected_tosca = request.args['selected_tosca'] slas = sla.get_slas(access_token) return render_template('createdep.html', template=toscaInfo[selected_tosca], selectedTemplate=selected_tosca, slas=slas) def add_sla_to_template(template, sla_id): # Add the placement policy template['topology_template']['policies'] = [ {"deploy_on_specific_site": {"type": "tosca.policies.Placement", "properties": {"sla_id": sla_id}}}] app.logger.debug(yaml.dump(template, default_flow_style=False)) return template # # @app.route('/submit', methods=['POST']) @authorized_with_valid_token def createdep(): access_token = iam_blueprint.session.token['access_token'] app.logger.debug("Form data: " + json.dumps(request.form.to_dict())) with io.open( toscaDir + request.args.get('template')) as stream: template = yaml.full_load(stream) form_data = request.form.to_dict() params={} if 'extra_opts.keepLastAttempt' in form_data: params['keepLastAttempt'] = 'true' else: params['keepLastAttempt'] = 'false' if form_data['extra_opts.schedtype'] == "man": template = add_sla_to_template(template, form_data['extra_opts.selectedSLA']) inputs = { k:v for (k,v) in form_data.items() if not k.startswith("extra_opts.") } app.logger.debug("Parameters: " + json.dumps(inputs)) payload = { "template" : yaml.dump(template,default_flow_style=False, sort_keys=False), "parameters": inputs } url = orchestratorUrl + "/deployments/" headers = {'Content-Type': 'application/json', 'Authorization': 'bearer %s' % (access_token)} response = requests.post(url, json=payload, params=params, headers=headers) if not response.ok: flash("Error submitting deployment: \n" + response.text) return redirect(url_for('showdeployments')) @app.route('/logout') def logout(): session.clear() iam_blueprint.session.get("/logout") return redirect(url_for('login'))
python
import sys import logging logging.basicConfig( format="[%(levelname)s] [%(name)s] %(asctime)s %(message)s", level=logging.INFO ) logging.StreamHandler(sys.stdout) logger = logging.getLogger("brev-cli") class Dev: api_url = "http://localhost:5000" log_level = logging.DEBUG cotter_api_key_id = "19024767-a0b2-4221-8faa-ef116dc853d0" class Staging: api_url = "https://staging.brev.dev" log_level = logging.INFO cotter_api_key_id = "19024767-a0b2-4221-8faa-ef116dc853d0" class Prod: api_url = "https://app.brev.dev" log_level = logging.WARNING cotter_api_key_id = "19024767-a0b2-4221-8faa-ef116dc853d0" config = Prod logger.setLevel(config.log_level)
python
# coding=utf-8 """ The Campaign Folders API endpoints Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/campaign-folders/ Schema: https://api.mailchimp.com/schema/3.0/CampaignFolders/Instance.json """ from __future__ import unicode_literals from mailchimp3.baseapi import BaseApi class CampaignFolders(BaseApi): """ Organize your campaigns using folders. """ def __init__(self, *args, **kwargs): """ Initialize the endpoint """ super(CampaignFolders, self).__init__(*args, **kwargs) self.endpoint = 'campaign-folders' self.folder_id = None def create(self, data): """ Create a new campaign folder. :param data: The request body parameters :type data: :py:class:`dict` data = { "name": string* } """ if 'name' not in data: raise KeyError('The campaign folder must have a name') response = self._mc_client._post(url=self._build_path(), data=data) if response is not None: self.folder_id = response['id'] else: self.folder_id = None return response def all(self, get_all=False, **queryparams): """ Get all folders used to organize campaigns. :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] queryparams['count'] = integer queryparams['offset'] = integer """ self.folder_id = None if get_all: return self._iterate(url=self._build_path(), **queryparams) else: return self._mc_client._get(url=self._build_path(), **queryparams) def get(self, folder_id, **queryparams): """ Get information about a specific folder used to organize campaigns. :param folder_id: The unique id for the campaign folder. :type folder_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] """ self.folder_id = folder_id return self._mc_client._get(url=self._build_path(folder_id), **queryparams) def update(self, folder_id, data): """ Update a specific folder used to organize campaigns. :param folder_id: The unique id for the campaign folder. :type folder_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "name": string* } """ self.folder_id = folder_id if 'name' not in data: raise KeyError('The campaign folder must have a name') return self._mc_client._patch(url=self._build_path(folder_id), data=data) def delete(self, folder_id): """ Delete a specific campaign folder, and mark all the campaigns in the folder as ‘unfiled’. :param folder_id: The unique id for the campaign folder. :type folder_id: :py:class:`str` """ self.folder_id = folder_id return self._mc_client._delete(url=self._build_path(folder_id))
python
""" This file tests the whole stack of the miura tool. """ import os import shlex import miura from jenkinsapi import jenkins from mock import Mock, patch, call from nose.tools import eq_ class TestMiura(): def setUp(self): self.old_dir = os.path.abspath(os.curdir) self.test_dir = os.path.dirname(__file__) os.chdir(self.test_dir) self._jenkins = jenkins.Jenkins self.jenkinsapi_job = Mock() self.jenkinsapi_jenkins = Mock() self.jenkinsapi_jenkins.__getitem__ = Mock(return_value=self.jenkinsapi_job) jenkins.Jenkins = lambda *_: self.jenkinsapi_jenkins self._create_stdout_logger = miura._create_stdout_logger miura._create_stdout_logger = Mock() def tearDown(self): os.chdir(self.old_dir) jenkins.Jenkins = self._jenkins miura._create_stdout_logger = self._create_stdout_logger def test_base_case(self): miura.main(shlex.split('example')) assert self.jenkinsapi_job.update_config.called def test_delete(self): miura.main(shlex.split('-d example')) assert self.jenkinsapi_jenkins.delete_job.called def test_bad_script(self): with patch.object(miura, 'LOGGER') as logger: miura.main(shlex.split('boogyboogy')) logger.exception.assert_called_once_with("") def test_filter(self): miura.main(shlex.split('-f "foo=ba[r|z]" example')) eq_(self.jenkinsapi_jenkins.__getitem__.mock_calls, [call('bar'), call('baz')])
python
""" This file is part of the TheLMA (THe Laboratory Management Application) project. See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information. Chemical structure resource. """ from everest.resources.base import Member from everest.resources.descriptors import member_attribute from everest.resources.descriptors import terminal_attribute from thelma.interfaces import IMoleculeDesign from thelma.interfaces import IMoleculeDesignPool from thelma.interfaces import IOrganization from thelma.resources.base import RELATION_BASE_URL __docformat__ = 'reStructuredText en' __all__ = ['PooledSupplierMoleculeDesignMember', 'SingleSupplierMoleculeDesignMember', 'SupplierMoleculeDesignMember', ] class SupplierMoleculeDesignMember(Member): relation = "%s/supplier-molecule-design" % RELATION_BASE_URL product_id = terminal_attribute(str, 'product_id') supplier = member_attribute(IOrganization, 'supplier') is_current = terminal_attribute(bool, 'is_current') class SingleSupplierMoleculeDesignMember(SupplierMoleculeDesignMember): relation = "%s/single-supplier-molecule-design" % RELATION_BASE_URL molecule_design = member_attribute(IMoleculeDesign, 'molecule_design') class PooledSupplierMoleculeDesignMember(SupplierMoleculeDesignMember): relation = "%s/pooled-supplier-molecule-design" % RELATION_BASE_URL molecule_design_pool = member_attribute(IMoleculeDesignPool, 'molecule_design_pool')
python
import gym import numpy as np from tqdm import trange scale = 3 src_prefix = "figures" seed = 100 def get_obs_spec(env_id): env = gym.make("fetch:" + env_id) env.seed(seed) buffer = [] for k, v in env.observation_space.spaces.items(): if hasattr(v, "spaces"): buffer += [f"{k}:"] for k, v in v.spaces.items(): buffer += [f"&nbsp;&nbsp;&nbsp;&nbsp;{k}: {v.shape}"] else: buffer += [f"{k}: {v.shape}"] return "<br>".join(buffer) def render_initial(env_id, doc): env = gym.make(env_id) env.seed(seed) env_id = env_id.split(':')[-1] img = env.render('rgb_array', width=150 * scale, height=120 * scale) doc.figure(img, src=f"{src_prefix}/{env_id}_init.png?ts={doc.now('%f')}", title=env_id) frames = [] for i in range(10): env.reset() frames.append(env.render('rgb_array', width=100 * scale, height=120 * scale)) doc.figure(np.array(frames).min(axis=0), src=f"{src_prefix}/{env_id}_reset.png?ts={doc.now('%f')}", title="distribution") return env def render_video(env_id, n, doc, env=None, title=None, filename=None): if env is None: env = gym.make(env_id) env.seed(seed) env_id = env_id.split(':')[-1] frames = [] for ep in trange(n): obs = env.reset() frames.append(env.render('rgb_array', width=100 * scale, height=120 * scale)) for i in range(10): act = env.action_space.sample() obs, r, done, info = env.step(act) frames.append(env.render('rgb_array', width=100 * scale, height=120 * scale)) else: print(env_id, "desired", obs['desired_goal']) print(env_id, "achieved", obs['achieved_goal']) if filename: doc.video(np.array(frames), src=f"{src_prefix}/{filename}?ts={doc.now('%f')}", title=title) else: doc.video(np.array(frames), src=f"{src_prefix}/{env_id}.gif?ts={doc.now('%f')}", title=title)
python
from coolname import generate_slug from flask import Flask, request from flask_cors import CORS from src.users.user_profile import ( get_user_profile, get_user_profiles, create_user_profile, update_user_profile, ) from src.teams.team_profile import ( get_team_profile, get_team_profiles, create_team_profile, update_team_profile, ) from src.teams.team_complete import team_complete from src.teams.user_leave import user_leave from src.teams.unify.team_invite import team_invite from src.teams.unify.team_confirm import team_confirm from src.teams.unify.team_rescind import team_rescind from src.teams.unify.team_reject import team_reject from src.teams.unify.user_invite import user_invite from src.matching.team_recommendations import get_team_recommendations from src.flaskapp.util import format_string from src.flaskapp.auth import authenticate app = Flask(__name__) CORS(app) @app.route("/", methods=["GET"]) def index(): return {"message": "Welcome to TeamRU!"}, 200 ############################## USERS ############################## @app.route("/users", methods=["GET", "POST"]) @authenticate def users(email): if request.method == "GET": # Filter response using query parameters # Might need to add pagination (limit/offset) for this response return get_user_profiles(request.args) if request.method == "POST": # Create a new user data = request.get_json(silent=True) prizes = [] skills = [] interests = [] bio = "" github = "" seriousness = 3 if "prizes" in data: prizes = format_string(data["prizes"]) if "skills" in data: skills = format_string(data["skills"]) if "interests" in data: interests = format_string(data["interests"]) if "bio" in data: bio = format_string(data["bio"]) if "github" in data: # NOTE can ping github api to verify this is an actual acct. github = format_string(data["github"]) if "seriousness" in data: try: seriousness = int(data["seriousness"]) except ValueError: pass return create_user_profile( email, prizes=prizes, skills=skills, bio=bio, github=github, interests=interests, seriousness=seriousness, ) @app.route("/users/profile", methods=["GET", "PUT"]) @authenticate def single_user(email): if request.method == "GET": # Retrieve a single user return get_user_profile(email) if request.method == "PUT": data = request.get_json(silent=True) kwargs = { name: format_string(data[name]) for name in [ "prizes", "skills", "bio", "github", "interests", "seriousness", ] if data.get(name) } return update_user_profile(email, **kwargs) ############################## TEAMS ############################## @app.route("/teams", methods=["GET", "POST"]) @authenticate def teams(email): if request.method == "GET": search = request.args.get("filter", None) try: offset = int(request.args.get("offset")) except: offset = 0 try: limit = int(request.args.get("limit")) except: limit = 10 return get_team_profiles(email, search, offset, limit) if request.method == "POST": data = request.get_json(silent=True) if ( not data or "name" not in data or "desc" not in data or not data["name"] or not data["desc"] ): return {"message": "Required info not found"}, 400 team_name = format_string(data["name"]) team_desc = format_string(data["desc"]) skills = [] if "skills" in data: skills = format_string(data["skills"]) prizes = [] if "prizes" in data: prizes = format_string(data["prizes"]) return create_team_profile(team_name, email, team_desc, skills, prizes) @app.route("/teams/<team_id>", methods=["GET", "PUT"]) @authenticate def single_team(email, team_id): if request.method == "GET": return get_team_profile(email, team_id) if request.method == "PUT": data = request.get_json(silent=True) kwargs = { name: format_string(data[name]) for name in ["name", "desc", "skills", "prizes"] if data.get(name) } return update_team_profile(email, team_id, **kwargs) @app.route("/teams/<team_id>/complete", methods=["PUT"]) @authenticate def mark_team_complete(email, team_id): return team_complete(email, team_id) @app.route("/teams/<team_id>/leave", methods=["PUT"]) @authenticate def leave(email, team_id): response = user_leave(email, team_id) create_team_profile(generate_slug(), email, "Edit Me :D", [], []) return response ############################## UNIFY ############################## @app.route("/teams/<team1_id>/invite", methods=["POST"]) @authenticate def invite(email, team1_id): # NOTE team1 -inviting-> team2 (invite another team) # team1_name = team_id data = request.get_json(silent=True) if not data or "team2_id" not in data or not data["team2_id"]: return {"message": "Required info not found"}, 400 team2_id = data["team2_id"] return team_invite(email, team1_id, team2_id) @app.route("/teams/<team1_id>/confirm", methods=["POST"]) @authenticate def confirm(email, team1_id): # NOTE team1 -confirms-> team2 (confirm an invite) # team1_name = team_id data = request.get_json(silent=True) if not data or "team2_id" not in data or not data["team2_id"]: return {"message": "Required info not found"}, 400 team2_id = data["team2_id"] return team_confirm(email, team1_id, team2_id) @app.route("/teams/<team1_id>/rescind", methods=["POST"]) @authenticate def rescind(email, team1_id): # NOTE team1 -rescind-> team2 (rescind an invite) # team1_name = team_id data = request.get_json(silent=True) if not data or "team2_id" not in data or not data["team2_id"]: return {"message": "Required info not found"}, 400 team2_id = data["team2_id"] return team_rescind(email, team1_id, team2_id) @app.route("/teams/<team1_id>/reject", methods=["POST"]) @authenticate def reject(email, team1_id): # NOTE team1 -reject-> team2 (rejecting an invite) # team1_name = team_id data = request.get_json(silent=True) if not data or "team2_id" not in data or not data["team2_id"]: return {"message": "Required info not found"}, 400 team2_id = data["team2_id"] return team_reject(email, team1_id, team2_id) @app.route("/teams/<team1_id>/invite/user", methods=["POST"]) @authenticate def invite_user(email, team1_id): # NOTE team1 -inviting-> user2 (invite another 1 person team) data = request.get_json(silent=True) if not data or "user_email" not in data or not data["user_email"]: return {"message": "Required info not found"}, 400 user2_email = data["user_email"] return user_invite(email, team1_id, user2_email) ############################## MATCHES ############################## @app.route("/matches/<team_id>", methods=["GET"]) @authenticate def team_recommendations(email, team_id): # WIP return get_team_recommendations(email) email = None team_id = None return {"message": "placeholder"}, 200
python
#!/usr/bin/env python """ An example consumer that uses a greenlet pool to accept incoming market messages. This example offers a high degree of concurrency. """ import zlib # This can be replaced with the built-in json module, if desired. import simplejson import gevent from gevent.pool import Pool from gevent import monkey; gevent.monkey.patch_all() import zmq import scipy.stats as stats import numpy.ma as ma import numpy as np import PySQLPool from config import config from datetime import datetime import time import dateutil.parser np.seterr(all='ignore') PySQLPool.getNewPool().maxActiveConnections = 50 dbConn = PySQLPool.getNewConnection(user=config['username'],passwd=config['password'],db=config['db'], commitOnEnd=True) # The maximum number of greenlet workers in the greenlet pool. This is not one # per processor, a decent machine can support hundreds or thousands of greenlets. # I recommend setting this to the maximum number of connections your database # backend can accept, if you must open one connection per save op. MAX_NUM_POOL_WORKERS = 300 def main(): """ The main flow of the application. """ context = zmq.Context() subscriber = context.socket(zmq.SUB) # Connect to the first publicly available relay. subscriber.connect('tcp://element-43.com:8050') # Disable filtering. subscriber.setsockopt(zmq.SUBSCRIBE, "") # We use a greenlet pool to cap the number of workers at a reasonable level. greenlet_pool = Pool(size=MAX_NUM_POOL_WORKERS) print("Consumer daemon started, waiting for jobs...") print("Worker pool size: %d" % greenlet_pool.size) while True: # Since subscriber.recv() blocks when no messages are available, # this loop stays under control. If something is available and the # greenlet pool has greenlets available for use, work gets done. greenlet_pool.spawn(worker, subscriber.recv()) def worker(job_json): """ For every incoming message, this worker function is called. Be extremely careful not to do anything CPU-intensive here, or you will see blocking. Sockets are async under gevent, so those are fair game. """ # Receive raw market JSON strings. market_json = zlib.decompress(job_json) # Un-serialize the JSON data to a Python dict. market_data = simplejson.loads(market_json) # Save to your choice of DB here. global dbConn query = PySQLPool.getNewQuery(dbConn) if market_data['resultType'] == 'orders': rows = market_data['rowsets'] try: for row in rows: if len(row['rows']) == 0: pass genTime = dateutil.parser.parse(row['generatedAt']) genTime = int(time.mktime(genTime.timetuple())) typeID = row['typeID'] regionID = row['regionID'] buyCount = [] sellCount = [] buyPrice = [] sellPrice = [] tempMask = [] buyAvg = 0 buyMean = 0 buyTotal = 0 sellAvg = 0 sellMean = 0 sellTotal = 0 buy = 0 sell = 0 set = 0 stuff = row['rows'] search = "SELECT * FROM prices WHERE uniquek = '%s' AND dateTime > '%s'" % (str(regionID) + str(typeID), genTime) query.Query(search) if (len(query.record) == 1) or (genTime > int(time.mktime(time.gmtime()))): pass for data in stuff: if data[6] == True: buyPrice.append(data[0]) buyCount.append(data[4] - data[1]) elif data[6] == False: sellPrice.append(data[0]) sellCount.append(data[4] - data[1]) else: pass if len(buyPrice) > 1: top = stats.scoreatpercentile(buyPrice, 95) bottom = stats.scoreatpercentile(buyPrice, 5) buyMasked = ma.masked_outside(buyPrice, bottom, top) tempMask = buyMasked.mask buyCountMasked = ma.array(buyCount, mask=tempMask, fill_value = 0) ma.fix_invalid(buyMasked, mask=0) ma.fix_invalid(buyCountMasked, mask=0) buyAvg = ma.average(buyMasked, 0, buyCountMasked) buyMean = ma.mean(buyMasked) buyTotal = ma.sum(buyCountMasked) if buyTotal == 0: buyAvg = 0 buyMean = 0 set = 1 if len(buyPrice) < 4: buyAvg = ma.average(buyPrice) buyMean = ma.mean(buyPrice) buyPrice.sort() buy = buyPrice.pop() if len(sellPrice) > 3: top = stats.scoreatpercentile(sellPrice, 95) bottom = stats.scoreatpercentile(sellPrice, 5) sellMasked = ma.masked_outside(sellPrice, bottom, top) tempMask = sellMasked.mask sellCountMasked = ma.array(sellCount, mask=tempMask, fill_value = 0) ma.fix_invalid(sellMasked, mask=0) ma.fix_invalid(sellCountMasked, mask=0) sellAvg = ma.average(sellMasked, 0, sellCountMasked) sellMean = ma.mean(sellMasked) sellTotal = ma.sum(sellCountMasked) if sellTotal == 0: sellAvg = 0 sellMean = 0 set = 1 if len(sellPrice) < 4: sellMean = ma.mean(sellPrice) sellTotal = ma.sum(sellPrice) sellPrice.sort() sellPrice.reverse() sell = sellPrice.pop() data = "REPLACE INTO prices SET uniquek = '%s', region = '%i', itemid = '%i', buymean = '%.2f', buyavg = '%.2f', sellmean = '%.2f', sellavg = '%.2f', buycount = '%i', sellcount = '%i', buy = '%.2f', sell = '%.2f', dateTime = '%i'" % (str(regionID) + str(typeID), regionID, typeID, np.nan_to_num(buyMean), np.nan_to_num(buyAvg), np.nan_to_num(sellMean), np.nan_to_num(sellAvg), np.nan_to_num(buyTotal), np.nan_to_num(sellTotal), buy, sell, genTime) query.Query(data) except: pass if __name__ == '__main__': main()
python
def site_name(request): return { 'name_of_site': 'Worker Quest Tour' }
python
import pytest from cuenca.resources import CurpValidation, Identity @pytest.mark.vcr def test_identity_retrieve(curp_validation_request): # creating a curp_validation automatically creates the identity curp_validation = CurpValidation.create(**curp_validation_request) assert curp_validation.renapo_curp_match # querying the identity identity = Identity.one(curp=curp_validation.calculated_curp) assert identity.id is not None
python
from hashlib import sha256 from zappa.async import task import hmac from flask import Flask, request, render_template import dropbox from dropbox.files import FileMetadata from dropbox.exceptions import ApiError import os import boto3 from boto.mturk.connection import MTurkConnection from boto.mturk.connection import HTMLQuestion import json import requests app = Flask(__name__) # Instantiate Dropbox dbx = dropbox.Dropbox(os.environ['DB_ACCESS_TOKEN']) # Create connection to mturk mtc = MTurkConnection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'], host = 'mechanicalturk.sandbox.amazonaws.com') def send_email(email, name, subject, html, time, context, tags): with app.test_request_context(): r = requests.post('https://api.mailgun.net/v3/{}/messages'.format(DOMAIN), auth=auth, data={"from": '{}@{}'.format(MAIL_PREFIX, DOMAIN), "to": '{} <{}>'.format(name, email), "subject": subject, "html": render_template(html, context=context), "o:deliverytime": (datetime.utcnow() + timedelta(days=time)).strftime("%a, %d %b %Y %H:%M:%S +0000"), "v:context": json.dumps(context), "o:tag": tags}) print('Status: {}, {}'.format(r.status_code, email)) # Check mturk account balance def check_balance(): try: account_balance = str(mtc.get_account_balance()[0]) if float(account_balance[1:]) <= 10.00: print(account_balance) #send_email() except ValueError: print('You have an account balance of {0}'.format(account_balance)) def get_db_links(folder): '''Move the file to a temporary folder, get the shared url and then process the function that creates the HIT on Mechanical Turk''' temp_folder = '/matthew/business/atlasalliancegroup/pythonfinancial/receipts/temp/' result = dbx.files_list_folder(path=folder) for entry in result.entries: if isinstance(entry, FileMetadata): move_file = dbx.files_move_v2(from_path=entry.path_lower, to_path='{0}{1}'.format(temp_folder, entry.name)) temp_location = move_file.metadata.path_lower try: doc_url = dbx.sharing_create_shared_link_with_settings(path=temp_location).url except ApiError: doc_url = dbx.sharing_list_shared_links(path=temp_location).links[0].url create_hit(doc_url, temp_location) @task def process_user(account): # Check Mturk account balance and notify if low check_balance() receipts_folder = '/matthew/business/atlasalliancegroup/pythonfinancial/receipts/' bills_folder = '/matthew/business/atlasalliancegroup/pythonfinancial/bills/' get_db_links(receipts_folder) # get_db_links(bills_folder) #dbx.files_permanently_delete(entry.path_lower) @app.route('/webhook', methods=['GET', 'POST']) def webhook(): '''Receive a list of changed user IDs from Dropbox and process each.''' if request.method == 'GET': return request.args.get('challenge') else: # Make sure this is a valid request from Dropbox signature = request.headers.get('X-Dropbox-Signature') if not hmac.compare_digest(signature, hmac.new(os.environ['DB_APP_SECRET'].encode('UTF-8'), request.data, sha256).hexdigest()): abort(403) for account in json.loads(request.data)['list_folder']['accounts']: # We need to respond quickly to the webhook request, so we do the # actual work in a separate thread. For more robustness, it's a # good idea to add the work to a reliable queue and process the queue # in a worker process. process_user(account) return '' def create_hit(url, path): # Load the form template and set the height of the frame it will be shown in html_question = HTMLQuestion(render_template('form.html', url=url), 500) response = mtc.create_hit(question=html_question, max_assignments=1, title="Enter the information on a receipt", description="Help research a topic", keywords="question, answer, research, receipt, data entry", duration=120, reward=0.10) # The response included several fields that will be helpful later hit_type_id = response[0].HITTypeId hit_id = response[0].HITId print("Your HIT has been created. You can see it at this link:") print("https://workersandbox.mturk.com/mturk/preview?groupId={}".format(hit_type_id)) print("Your HIT ID is: {}".format(hit_id)) ## Future Developments # Get responses from mturk and write them to our ledger file def ledger(): file = open('testfile.txt', 'a') file.write('{} ! {}\n'.format(form.date.data, form.note.data)) file.write(' {} {}\n'.format(form.to_account.data, form.to_amount.data)) file.write(' {} {}\n'.format(form.from_account.data, form.from_amount.data)) file.close() # Update form to allow adding of classes and payment types if __name__ == '__main__': app.run(debug=True)
python
from pathlib import Path from code_scanner.analysis_result import AnalysisResult, AnalyzedFile from code_scanner.file_info import FileInfo from code_scanner.filter_utils import PythonSourceLineFilter def python_code_counter(root: Path, files: [FileInfo]) -> AnalysisResult: filtered_files: [AnalyzedFile] = [] for file in files: original_lines = PythonSourceLineFilter().filter(file.full_name.read_text().split("\n")) lines = remove_comments(original_lines) filtered_files.append(AnalyzedFile(file.full_name, original_lines, lines)) return AnalysisResult(filtered_files, root, line_num_sum(filtered_files, "original"), line_num_sum(filtered_files, "filtered")) def line_num_sum(analyzed_files: [AnalyzedFile], field_name: str) -> int: return sum(map(lambda f: len(getattr(f, field_name)), analyzed_files)) def remove_comments(lines: [str]) -> [str]: """ line starts with """ ''' or # line ends with ''' """ :param lines: :return: """ in_comment = False extracted = [] for line in lines: trimmed = line.lower().strip() if trimmed == '' or trimmed.startswith("#") or trimmed.startswith("print"): continue if trimmed.startswith("'''") or trimmed.startswith('"""'): in_comment = True if not in_comment: extracted.append(line) if len(trimmed) > 3 and (trimmed.endswith("'''") or trimmed.endswith('"""')): in_comment = False return extracted
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved. """Contains a dict to validate the app configs""" VALIDATE_DICT = { "num_workers": { "required": False, "valid_condition": lambda c: True if c >= 1 and c <= 50 else False, "invalid_msg": "num_workers must be in the range 1 <= 50" } }
python