content
stringlengths
0
894k
type
stringclasses
2 values
num = str(input()) [int(i) for i in str(num)] n = sorted(num, reverse=True) print(n) if n[0] > n[1]: print(n[1]) else: buf = 0 for j in n: if n[buf] < n[0]: print(n[buf]) break else: buf += 1
python
import opensim import math import numpy as np import os from .utils.mygym import convert_to_gym import gym class Osim(object): # Initialize simulation model = None state = None state0 = None joints = [] bodies = [] brain = None maxforces = [] curforces = [] def __init__(self, model_path, visualize): self.model = opensim.Model(model_path) self.model.initSystem() self.brain = opensim.PrescribedController() # Enable the visualizer self.model.setUseVisualizer(visualize) self.muscleSet = self.model.getMuscles() self.forceSet = self.model.getForceSet() self.bodySet = self.model.getBodySet() self.jointSet = self.model.getJointSet() self.contactGeometrySet = self.model.getContactGeometrySet() for j in range(self.muscleSet.getSize()): func = opensim.Constant(1.0) self.brain.addActuator(self.muscleSet.get(j)) self.brain.prescribeControlForActuator(j, func) self.maxforces.append(self.muscleSet.get(j).getMaxIsometricForce()) self.curforces.append(1.0) self.model.addController(self.brain) def set_strength(self, strength): self.curforces = strength for i in range(len(self.curforces)): self.muscleSet.get(i).setMaxIsometricForce(self.curforces[i] * self.maxforces[i]) def get_body(self, name): return self.bodySet.get(name) def get_joint(self, name): return self.jointSet.get(name) def get_muscle(self, name): return self.muscleSet.get(name) def get_contact_geometry(self, name): return self.contactGeometrySet.get(name) def get_force(self, name): return self.forceSet.get(name) def initializeState(self): self.state = self.model.initializeState() class Spec(object): def __init__(self, *args, **kwargs): self.id = 0 self.timestep_limit = 1000 class OsimEnv(gym.Env): stepsize = 0.01 integration_accuracy = 1e-3 timestep_limit = 1000 test = False action_space = None observation_space = None osim_model = None istep = 0 model_path = "" visualize = False ninput = 0 noutput = 0 last_action = None spec = None metadata = { 'render.modes': ['human'], 'video.frames_per_second' : 50 } def __getstate__(self): state = self.__dict__.copy() del state['osim_model'] print ("HERE1") return state def __setstate__(self, newstate): self.__dict__.update(newstate) self.osim_model = Osim(self.model_path, True) self.configure() def angular_dist(self, t,s): x = (t-s) % (2*math.pi) return min(x, 2*math.pi-x) def compute_reward(self): return 0.0 def is_done(self): return False def terminate(self): pass def __init__(self, visualize = True, noutput = None): self.visualize = visualize self.osim_model = Osim(self.model_path, self.visualize) self.noutput = noutput if not noutput: self.noutput = self.osim_model.muscleSet.getSize() if not self.action_space: self.action_space = ( [0.0] * self.noutput, [1.0] * self.noutput ) if not self.observation_space: self.observation_space = ( [-math.pi] * self.ninput, [math.pi] * self.ninput ) self.action_space = convert_to_gym(self.action_space) self.observation_space = convert_to_gym(self.observation_space) self.spec = Spec() self.horizon = self.spec.timestep_limit self.configure() # self.reset() def configure(self): pass def _reset(self): self.istep = 0 self.osim_model.initializeState() return self.get_observation() def sanitify(self, x): if math.isnan(x): return 0.0 BOUND = 1000.0 if x > BOUND: x = BOUND if x < -BOUND: x = -BOUND return x def activate_muscles(self, action): if np.any(np.isnan(action)): raise ValueError("NaN passed in the activation vector. Values in [0,1] interval are required.") brain = opensim.PrescribedController.safeDownCast(self.osim_model.model.getControllerSet().get(0)) functionSet = brain.get_ControlFunctions() for j in range(functionSet.getSize()): func = opensim.Constant.safeDownCast(functionSet.get(j)) func.setValue( float(action[j]) ) def _step(self, action): self.last_action = action self.activate_muscles(action) # Integrate one step manager = opensim.Manager(self.osim_model.model) manager.setInitialTime(self.stepsize * self.istep) manager.setFinalTime(self.stepsize * (self.istep + 1)) try: manager.integrate(self.osim_model.state) except Exception as e: print (e) return self.get_observation(), -500, True, {} self.istep = self.istep + 1 res = [ self.get_observation(), self.compute_reward(), self.is_done(), {} ] return res def _render(self, mode='human', close=False): return
python
import sys import os import numpy as np import math from oct2py import octave from extract_feature import get_sequence, calc_z_curve, z_curve_fft if __name__=='__main__': taxonomy= sys.argv[1] fft_length= int(sys.argv[2]) time_length= int(sys.argv[3]) file_list= list(filter(lambda x: 'fna' == x[-3:], os.listdir(taxonomy))) for seq_file in file_list: print(seq_file) seqs= get_sequence(taxonomy+'/'+seq_file, 1000)#1000 is not very meaningfull. if len(seqs)==0: continue feature_idx= 1 seqs= list(filter(lambda x: len(x) > fft_length* time_length, seqs)) for seq in seqs: for sub_seq_idx in range(int(len(seq)/(fft_length*time_length))): cur_seqs= seq[sub_seq_idx*fft_length*time_length: (sub_seq_idx+1)*fft_length*time_length] cur_seqs= np.reshape(list(cur_seqs), (time_length, fft_length)).tolist() cur_ffts=[] for cur_seq in cur_seqs: z_curve= calc_z_curve(cur_seq) fft_result= z_curve_fft(z_curve) cur_ffts.append(fft_result) print(seq_file+"_"+str(feature_idx)) np.save(taxonomy+'/'+seq_file+'_'+str(feature_idx)+'_'+str(fft_length)+'_'+str(time_length), np.array(cur_ffts, dtype='f')) feature_idx+= 1
python
from .gpib_bus_server import GPIBBusServer from .gpib_device_manager import GPIBDeviceManager
python
import numpy as np class Convolution(): def initalizeParams(self): self.W = np.random.randn(self.shape[0],self.shape[1],self.shape[2],self.shape[3]) self.b = np.zeros([1,self.ksize]) # 初始化一个 w shape的矩阵,在convAdd中使用 # self.wConvAdd = np.zeros(self.windowWidth,self.windowHeight,self.ksize) # for i in range(self.windowWidth): # for j in range(self.windowHeight): # self.wConvAdd[i,j,:] = 1 def __init__(self,ids,shape,ifOutput,preLayer): self.name = 'convolution' self.firstLayer = False self.ids = ids self.shape = shape # self.ksize = ksize self.samples = preLayer.A.shape[0] self.ifOutput = ifOutput self.preLayer = preLayer self.inputWidth = self.preLayer.A.shape[1] self.inputHeight = self.preLayer.A.shape[2] self.windowWidth = self.shape[0] self.windowHeight = self.shape[1] self.outputWidth = self.inputWidth - self.windowWidth + 1 self.outputHeight = self.inputHeight - self.windowHeight + 1 self.ksize = self.shape[3] # print ("input dx,dy:(%d,%d),output dx,dy:(%d,%d),kenerl size:%d"%(self.inputWidth, # self.inputHeight, # self.outputWidth, # self.outputHeight, # self.ksize)) self.initalizeParams() def convAdd(self,sameMatrix): # 对于同维度小矩阵和权值矩阵进行向量叠加 result = 0.0 for i in range(self.windowWidth): for j in range(self.windowHeight): result += sameMatrix[:,i,j,i,j,:] # print("result" + str(result.shape)) return result def computeForward(self,model): # print("begin") temp_x = np.dot(self.preLayer.A,self.W) + self.b # print('temo_x:' + str(temp_x.shape)) self.A = np.zeros([self.samples,self.outputWidth, self.outputHeight, self.ksize]) for i in range(self.inputWidth - self.windowWidth + 1): for j in range(self.inputHeight-self.windowHeight + 1): sameMatrix = temp_x[:,i:i + self.windowWidth,j:j + self.windowHeight,:,:,:] self.A[:,i,j] = self.convAdd(sameMatrix=sameMatrix) # print(self.A) # print('forward done!') del(temp_x) def computeBackward(self,model): def computeDWAndDXAndDb(): dW = np.zeros_like(self.W) dX = np.zeros_like(self.preLayer.A) db = np.zeros_like(self.b) # 遍历整个dZ 依次累加 dW dX for i in range(dZ.shape[1]): for j in range(dZ.shape[2]): dz = dZ[:,i,j,:] # 8 x 10 for m in range(self.windowWidth): for n in range(self.windowHeight): dW[m,n,:,:] += np.dot(self.preLayer.A[:,i+m,j+n,:].T,dz) # 100 x10 = 100 x 8 x 8 x 10 dX[:,i+m,j+n,:] += np.dot(dz,self.W[m,n,:,:].T) # 8 x 100 = 8 x 10 x 10 x 100 db += np.dot(np.ones([1,self.samples]),dz) return dW,dX,db dZ = model.layers[self.ids+1].dX self.dW,self.dX,self.db = computeDWAndDXAndDb() def update(self,lr): self.W -= lr * self.dW self.b -= lr * self.db
python
""" Owner: Noctsol Contributors: N/A Date Created: 2021-10-24 Summary: Just here for messing around. """ # import os # DATA_DIR = "src/data/" # with open(os.path.join(DATA_DIR, "VERSION"), "w", encoding="utf-8") as fh: # fh.write(f"2.8.8\n")
python
""" Test No Operation Operator """ import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) from gva.flows.operators import NoOpOperator try: from rich import traceback traceback.install() except ImportError: pass def test_noop_operator(): in_d = {'a':1} in_c = {'b':2} n = NoOpOperator(print_message=True) d, c = n.execute(in_d,in_c) assert d == in_d assert c == in_c if __name__ == "__main__": test_noop_operator() print('okay')
python
# -*- coding: utf-8 -*- """ This is the config-loading and json-loading module which loads and parses the config file as well as the json file. It handles the [General]-Section of the config. All object-getters create deepcopies. """ import logging from copy import deepcopy import hjson try: import ConfigParser except ImportError: import configparser as ConfigParser from ast import literal_eval from scrapy.utils.log import configure_logging import os class CrawlerConfig(object): """ The actual class. First parameter: config-file. This class is a singleton-class, Usage: First creation and loading of the config-file: c = CrawlerConfig.get_instance() c.setup(<config_file>) Further using: c = CrawlerConfig.get_instance() """ # singleton-helper-class # Source: http://code.activestate.com/recipes/52558-the-singleton-pattern-implemented-with-python/#c4 class SingletonHelper(object): """The singleton-helper-class""" # https://pythontips.com/2013/08/04/args-and-kwargs-in-python-explained/ def __call__(self, *args, **kw): if CrawlerConfig.instance is None: CrawlerConfig.instance = CrawlerConfig() return CrawlerConfig.instance # singleton-helper-variable + function get_instance = SingletonHelper() instance = None # Here starts the actual class log = None log_output = [] sections = None parser = None __current_section = None __scrapy_options = None __config = None def __init__(self): """ The constructor (keep in mind: this is a singleton, so just called once) """ if CrawlerConfig.instance is not None: self.log_output.append( {"level": "error", "msg": "Multiple instances of singleton-class"}) raise RuntimeError('Multiple instances of singleton-class') def setup(self, filepath): """ Setup the actual class. :param str filepath: path to the config-file (including file-name) """ if self.log is not None: self.log.warning("Disallowed multiple setup of config.") return self.log = logging.getLogger(__name__) self.parser = ConfigParser.RawConfigParser() self.parser.read(filepath) self.sections = self.parser.sections() self.log_output.append( {"level": "info", "msg": "Loading config-file (%s)" % filepath}) self.load_config() self.handle_logging() def load_config(self): """ Loads the config-file """ self.__config = {} # Parse sections, its options and put it in self.config. for section in self.sections: self.__config[section] = {} options = self.parser.options(section) # Parse options of each section for option in options: try: opt = self.parser \ .get(section, option) try: self.__config[section][option] = literal_eval(opt) except (SyntaxError, ValueError): self.__config[section][option] = opt self.log_output.append( {"level": "debug", "msg": "Option not literal_eval-parsable" " (maybe string): [{0}] {1}" .format(section, option)}) if self.__config[section][option] == -1: self.log_output.append( {"level": "debug", "msg": "Skipping: [%s] %s" % (section, option)} ) except ConfigParser.NoOptionError as exc: self.log_output.append( {"level": "error", "msg": "Exception on [%s] %s: %s" % (section, option, exc)} ) self.__config[section][option] = None def get_scrapy_options(self): """ :return: all options listed in the config section 'Scrapy' """ if self.__scrapy_options is None: self.__scrapy_options = {} options = self.section("Scrapy") for key, value in options.items(): self.__scrapy_options[key.upper()] = value return self.__scrapy_options def handle_logging(self): """ To allow devs to log as early as possible, logging will already be handled here """ configure_logging(self.get_scrapy_options()) # Disable duplicates self.__scrapy_options["LOG_ENABLED"] = False # Now, after log-level is correctly set, lets log them. for msg in self.log_output: if msg["level"] is "error": self.log.error(msg["msg"]) elif msg["level"] is "info": self.log.info(msg["msg"]) elif msg["level"] is "debug": self.log.debug(msg["msg"]) def config(self): """ Get the whole config as a dict. :returns: The whole config as dict[section][option] (all lowercase) :rtype: dict """ return deepcopy(self.__config) def section(self, section): """ Get the whole section of a the config. :param section (string): The section to get all the options from. :return dict[option] (all lowercase) """ return deepcopy(self.__config[section]) def set_section(self, section): """ Sets the current section to get the options from. :param section (string) """ self.__current_section = section def option(self, option): """ Gets the option, set_section needs to be set before. :param option (string): The option to get. :return mixed: The option from from the config. """ if self.__current_section is None: raise RuntimeError('No section set in option-getting') return self.__config[self.__current_section][option] def get_working_path(self): """ Gets the working path. If the path starts with a ~, this will be replaced by the current user's home path. :return: """ self.set_section('Files') raw_path = self.option("working_path") if raw_path.startswith('~'): raw_path = os.path.expanduser('~') + raw_path[1:] return raw_path class JsonConfig(object): """ The actual class. First parameter: config-file. This class is a singleton-class, Usage: First creation and loading of the config-file: c = JsonConfig.get_instance() c.setup(<config_file>) Further using: c = JsonConfig.get_instance() """ # singleton-helper-class # Source: http://code.activestate.com/recipes/52558-the-singleton-pattern-implemented-with-python/#c4 class SingletonHelper(object): """The singleton-helper-class""" def __call__(self, *args, **kw): if JsonConfig.instance is None: JsonConfig.instance = JsonConfig() return JsonConfig.instance # singleton-helper-variable + function get_instance = SingletonHelper() instance = None # Here starts the actual class! log = None __json_object = None def __init__(self): """ The constructor (keep in mind: this is a singleton, so just called once) """ self.log = logging.getLogger(__name__) if JsonConfig.instance is not None: self.log.error('Multiple instances of singleton-class') raise RuntimeError('Multiple instances of singleton-class') def setup(self, filepath): """ Setup the actual class. :param str filepath: path to the config-file (including file-name) """ self.log.debug("Loading JSON-file (%s)", filepath) self.load_json(filepath) def load_json(self, filepath): """ Loads the JSON-file from the filepath. :param filepath (string): The location of the JSON-file. """ self.__json_object = hjson.load(open(filepath, 'r')) def config(self): """ Get the whole JSON as a dict. :return dict """ return deepcopy(self.__json_object) def get_site_objects(self): """ Get the object containing all sites. :return sites (dict): The sites from the JSON-file """ return deepcopy(self.__json_object["base_urls"]) def get_url_array(self): """ Get all url-objects in an array :return sites (array): The sites from the JSON-file """ urlarray = [] for urlobjects in self.__json_object["base_urls"]: urlarray.append(urlobjects["url"]) return urlarray
python
import itertools import collections from pyclts import CLTS from pycldf import Sources from clldutils.misc import nfilter, slug from clldutils.color import qualitative_colors from clld.cliutil import Data, bibtex2source from clld.db.meta import DBSession from clld.db.models import common from clld.lib import bibtex from nameparser import HumanName import tppsr from tppsr import models def iteritems(cldf, t, *cols): # pragma: no cover cmap = {cldf[t, col].name: col for col in cols} for item in cldf[t]: for k, v in cmap.items(): item[v] = item[k] yield item def main(args): # pragma: no cover data = Data() clts = CLTS(input('Path to cldf-clts/clts:') or '../../cldf-clts/clts') ds = data.add( common.Dataset, tppsr.__name__, id=tppsr.__name__, name='Tableaux phonétiques des patois suisses romands Online', domain='tppsr.clld.org', contact="list@shh.mpg.de", publisher_name="Max Planck Institute for Evolutionary Anthropology", publisher_place="Leipzig", publisher_url="https://www.eva.mpg.de", license="https://creativecommons.org/licenses/by/4.0/", jsondata={ 'license_icon': 'cc-by.png', 'license_name': 'Creative Commons Attribution 4.0 International License'}, ) for i, name in enumerate(['Hans Geisler', 'Robert Forkel', 'Johann-Mattis List']): common.Editor( dataset=ds, ord=i, contributor=common.Contributor(id=slug(HumanName(name).last), name=name) ) contrib = data.add( common.Contribution, None, id='cldf', name=args.cldf.properties.get('dc:title'), description=args.cldf.properties.get('dc:bibliographicCitation'), ) for lang in iteritems(args.cldf, 'LanguageTable', 'id', 'name', 'latitude', 'longitude'): data.add( models.Variety, lang['id'], id=lang['Number'], name=lang['name'], description=lang['FullName'], latitude=lang['latitude'], longitude=lang['longitude'], canton=lang['Canton'], group=lang['DialectGroup'], recorded=lang['DateOfRecording'], population=int(lang['Population']) if lang['Population'] else None, speaker_age=int(lang['SpeakerAge']) if lang['SpeakerAge'] else None, speaker_proficiency=lang['SpeakerProficiency'], speaker_language_use=lang['SpeakerLanguageUse'], speaker_gender=lang['SpeakerGender'], investigators=lang['Investigators'], ) colors = qualitative_colors(len(set(l.canton for l in data['Variety'].values())), set='tol') for i, (_, langs) in enumerate(itertools.groupby( sorted(data['Variety'].values(), key=lambda l: l.canton), lambda l: l.canton, )): for lang in langs: lang.update_jsondata(color=colors[i]) for rec in bibtex.Database.from_file(args.cldf.bibpath, lowercase=True): data.add(common.Source, rec.id, _obj=bibtex2source(rec)) refs = collections.defaultdict(list) for param in iteritems(args.cldf, 'ParameterTable', 'id', 'concepticonReference', 'name'): data.add( models.Concept, param['id'], id=param['Number'], number=int(param['Number']), name='{} [{}]'.format(param['name'], param['Number']), latin_gloss=param['Latin_Gloss'], french_gloss=param['French_Gloss'], concepticon_id=param['concepticonReference'], concepticon_gloss=param['Concepticon_Gloss'], concepticon_concept_id=param['id'].split('_')[0], ) inventories = collections.defaultdict(set) scan_url_template = args.cldf['FormTable', 'Scan'].valueUrl for form in iteritems(args.cldf, 'FormTable', 'id', 'value', 'form', 'languageReference', 'parameterReference', 'source'): if not form['form']: continue inventories[form['languageReference']] = inventories[form['languageReference']].union(form['Segments']) vsid = (form['languageReference'], form['parameterReference']) vs = data['ValueSet'].get(vsid) if not vs: vs = data.add( common.ValueSet, vsid, id='-'.join(vsid), language=data['Variety'][form['languageReference']], parameter=data['Concept'][form['parameterReference']], contribution=contrib, ) for ref in form.get('source', []): sid, pages = Sources.parse(ref) refs[(vsid, sid)].append(pages) f = data.add( models.Form, form['id'], # Gauchat-1925-480-1_ id=form['id'], name=form['form'].replace('+', ' '), description=form['value'], segments=' '.join(form['Segments']), valueset=vs, scan=scan_url_template.expand(**form), prosodic_structure=form['ProsodicStructure'], ) for example in args.cldf['ExampleTable']: sentence = models.Phrase( id=example['ID'], language=data['Variety'][example['Language_ID']], name=example['Primary_Text'], description=example['Translated_Text'], original_script=example['Alt_Transcription'], ) for cid in example['Concept_ID']: DBSession.add(models.ConceptSentence(concept=data['Concept'][cid], sentence=sentence)) for fid in example['Form_ID']: DBSession.add(common.ValueSentence(value=data['Form'][fid], sentence=sentence)) for lid, inv in inventories.items(): inv = [clts.bipa[c] for c in inv] data['Variety'][lid].update_jsondata( inventory=[(str(c), c.name) for c in inv if hasattr(c, 'name')]) for (vsid, sid), pages in refs.items(): DBSession.add(common.ValueSetReference( valueset=data['ValueSet'][vsid], source=data['Source'][sid], description='; '.join(nfilter(pages)) )) def prime_cache(args): """If data needs to be denormalized for lookup, do that here. This procedure should be separate from the db initialization, because it will have to be run periodically whenever data has been updated. """
python
# coding: utf-8 from .mecab_read import read_mecab_data from collections import defaultdict def Q_036(): """ 36. 単語の出現頻度 文章中に出現する単語とその出現頻度を求め,出現頻度の高い順に並べよ. """ data = read_mecab_data('data/neko.txt.mecab') noun_phrase_set = defaultdict(lambda: 0) for sent in data: for word in sent: noun_phrase_set[word['surface']] += 1 return [(k, v) for k, v in sorted(noun_phrase_set.items(), key=lambda x:x[1], reverse=True)]
python
# -*- coding: utf-8 -*- import os import shutil import yaml # logging related packages import logging from logging.handlers import RotatingFileHandler PROJECT_DIR = os.path.dirname(os.path.realpath(__file__)) DebugConf = True #DebugConf = False model_logger = logging.getLogger('bart-web') formatter = logging.Formatter('[%(asctime)s][pid:%(process)s-tid:%(thread)s] %(module)s.%(funcName)s: %(levelname)s: %(message)s') # StreamHandler for print log to console hdr = logging.StreamHandler() hdr.setFormatter(formatter) hdr.setLevel(logging.DEBUG) #level at debug, which output debug info and error information to screen according to level of information # RotatingFileHandler ## Set log dir abs_path = os.path.dirname(os.path.abspath(__file__)) log_dir_path = abs_path + '/usercase/log' if not os.path.exists(log_dir_path): os.makedirs(log_dir_path) ## Specific file handler fhr_model = RotatingFileHandler('%s/bartweb_backend.log'%(log_dir_path), maxBytes=10*1024*1024, backupCount=3) fhr_model.setFormatter(formatter) fhr_model.setLevel(logging.DEBUG) #level at debug, which output debug info and error information to screen according to level of information model_logger.addHandler(fhr_model) if DebugConf: model_logger.addHandler(hdr) model_logger.setLevel(logging.DEBUG) else: model_logger.setLevel(logging.ERROR) if __name__ == '__main__': ''' Usage: from tools.log_tools import data_process_logger as logger logger.debug('debug debug') ''' model_logger.info('Ohhh model') model_logger.error('error model')
python
from sanic.app import Sanic from sanic.blueprints import Blueprint __version__ = "19.6.0" __all__ = ["Sanic", "Blueprint"]
python
from django.contrib.auth.mixins import LoginRequiredMixin,UserPassesTestMixin from django.contrib.auth.models import User from django.views.generic import ListView,DetailView from .models import Rating,Post from .forms import PostForm,RatingForm from django.contrib.auth.decorators import login_required from django.shortcuts import render, redirect, get_object_or_404 from django.http import HttpResponseRedirect from users.models import Profile from django.http import JsonResponse # Create your views here. class PostListView(LoginRequiredMixin,ListView): model=Post context_object_name = 'projects' ordering = ['-date_posted'] class UserPostListView(ListView,LoginRequiredMixin): model = Post # <app>/<model>_<viewtype>.html image_list.html context_object_name = 'images' paginate_by = 7 def get_queryset(self): user = get_object_or_404(User, username=self.kwargs.get('username')) return Image.objects.filter(author=user).order_by('-date_posted') @login_required(login_url='login') def project(request,project_id): current_user = request.user try: project = Post.objects.get(id=project_id) except Project.DoesNotExist: raise ObjectDoesNotExist() total_design = 0 total_usability = 0 total_creativity = 0 total_content = 0 overall_score = 0 ratings = Rating.objects.filter(project=project_id) if len(ratings) > 0: users = len(ratings) else: users = 1 design = list(Rating.objects.filter(project=project_id).values_list('design',flat=True)) usability = list(Rating.objects.filter(project=project_id).values_list('usability',flat=True)) creativity = list(Rating.objects.filter(project=project_id).values_list('creativity',flat=True)) content = list(Rating.objects.filter(project=project_id).values_list('content',flat=True)) total_design=sum(design)/users total_usability=sum(usability)/users total_creativity=sum(creativity)/users total_content=sum(content)/users overall_score=(total_design+total_content+total_usability+total_creativity)/4 project.design = total_design project.usability = total_usability project.creativity = total_creativity project.content = total_content project.overall = overall_score project.save() if request.method == 'POST': form = RatingForm(request.POST, request.FILES) if form.is_valid(): rating = form.save(commit=False) rating.project= project if not Rating.objects.filter( project=project).exists(): rating.overall_score = (rating.design+rating.usability+rating.creativity+rating.content)/4 rating.save() else: form = RatingForm() return render(request, "awward/post_detail.html",{"project":project, "ratings":ratings,"form":form, 'total_design':total_design, 'total_usability':total_usability, 'total_creativity':total_creativity, 'total_content':total_content}) @login_required(login_url='login') def search_project(request): if request.method == 'GET': title = request.GET.get("title") results = Post.objects.filter(title__icontains=title).all() message = f'name' params = { 'results': results, 'message': message } return render(request, 'awward/results.html', params) else: message = "You haven't searched for any image category" return render(request, 'awward/results.html', {'message': message}) @login_required(login_url='login') def upload(request): current_user = request.user profile =Profile.objects.get(user=current_user) if request.method == 'POST': form = PostForm(request.POST, request.FILES) if form.is_valid(): image = form.save(commit=False) image.author = current_user image.save() return redirect('projects') else: form = PostForm() return render(request, 'awward/post_form.html', {'form': form,'profile':profile})
python
from libfmp.b import plot_matrix import numpy as np from numba import jit import matplotlib.pyplot as plt from synctoolbox.feature.filterbank import FS_PITCH, generate_list_of_downsampled_audio, get_fs_index, filtfilt_matlab,\ generate_filterbank PITCH_NAME_LABELS = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'C0 ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'C1 ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'C2 ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'C3 ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'C4 ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'C5 ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'C6 ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'C7 ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'C8 ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'C9 '] def audio_to_pitch_features(f_audio: np.ndarray, Fs: float = 22050, feature_rate: int = 50, midi_min: int = 21, midi_max: int = 108, tuning_offset: int = 0, verbose: bool = False) -> np.ndarray: """Computes pitch-based features via an IIR filterbank aggregated as STMSP (short-time mean-square power). The signal is decomposed into subbands that correspond to MIDI pitches between midi_min and midi_max. In the output array, each row corresponds to one MIDI pitch. Per convention, the output has size 128xN. Only the rows between ``midi_min`` and ``midi_max`` are filled, the rest contains zeros. Parameters ---------- f_audio : np.ndarray One dimensional audio array (mono) Fs : float Sampling rate of ``f_audio`` (in Hz) feature_rate: int Features per second midi_min : int Minimum MIDI index (indices below ``midi_min`` are filled with zero in the output) midi_max : int Maximum MIDI index (indices above ``midi_max`` are filled with zero in the output) tuning_offset : int Tuning offset used to shift the filterbank (in cents) verbose : bool Set `True` to activate the visualization of features Returns ------- f_pitch : np.ndarray [shape=(128, N)] Matrix containing the extracted pitch-based features """ if verbose: print("Generating filterbank...") h = generate_filterbank(semitone_offset_cents=tuning_offset) if verbose: print("Downsampling signal...") wav_ds = generate_list_of_downsampled_audio(f_audio) # Compute features for all pitches wav_size = f_audio.size win_len_STMSP = Fs / feature_rate * 2 step_size = int(win_len_STMSP / 2) group_delay = np.round(win_len_STMSP / 2) # Group delay is adjusted seg_wav_start = np.concatenate([np.ones(1), np.arange(1, wav_size+1, step_size)]).astype(np.float64) seg_wav_stop = np.minimum(seg_wav_start + win_len_STMSP, wav_size) seg_wav_stop[0] = np.minimum(group_delay, wav_size) seg_wav_num = seg_wav_start.size f_pitch = np.zeros((128, seg_wav_num)) if verbose: print("Processing midi pitches", midi_min, "to", midi_max) for midi_pitch in range(midi_min, midi_max + 1): if verbose and midi_pitch % 10 == 0: print(midi_pitch, end="") else: print(".", end="") index = get_fs_index(midi_pitch) b = h[midi_pitch]['b'] a = h[midi_pitch]['a'] f_filtfilt = filtfilt_matlab(x=wav_ds[index], b=b, a=a) f_square = f_filtfilt ** 2 start = np.floor(seg_wav_start / Fs * FS_PITCH[index]).astype(np.int) # floor due to indexing stop = np.floor(seg_wav_stop / Fs * FS_PITCH[index]).astype(np.int) factor = Fs / FS_PITCH[index] __window_and_sum(f_pitch, f_square, midi_pitch, seg_wav_num, start, stop, factor) if verbose: print("") __visualize_pitch(f_pitch, feature_rate=feature_rate) plt.show() return f_pitch @jit(nopython=True) def __window_and_sum(f_pitch, f_square, midi_pitch, seg_wav_num, start, stop, factor): for k in range(seg_wav_num): # TODO this is extremely inefficient, can we use better numpy indexing to improve this? np.convolve? f_pitch[midi_pitch, k] = np.sum(f_square[start[k]:stop[k]]) * factor def __visualize_pitch(f_pitch: np.ndarray, midi_min: int = 21, midi_max: int = 108, feature_rate: float = 0, use_pitch_name_labels: bool = False, y_tick: np.ndarray = np.array([21, 30, 40, 50, 60, 70, 80, 90, 100], np.int)): f_image = f_pitch[midi_min:midi_max + 1, :] fig, ax, im = plot_matrix(X=f_image, extent=[0, f_pitch.shape[1]/feature_rate, midi_min, midi_max+1], title='Pitch Features', ylabel='MIDI Pitch', figsize=(9, 9), colorbar_aspect=50) pitchscale = np.arange(midi_min, midi_max + 1) ax[0].set_yticks(pitchscale[::2]) if use_pitch_name_labels: ax[0].set_yticks(np.arange(midi_min, midi_max + 1)) ax[0].set_yticklabels(PITCH_NAME_LABELS[midi_min-1:midi_max], fontsize=12) else: ax[0].set_yticks(pitchscale[::2]) ax[0].set_yticklabels(pitchscale[::2], fontsize=10)
python
"""Allows light-weight profiling of code execution.""" import time class Profiler: """Collects messages with timestamps so you can profile your code.""" def __init__(self): self.clear() def add_event(self, message): milliseconds = int(round(time.time() * 1000)) self._profile_events.append((message[0:30], milliseconds)) def clear(self): self._profile_events = [] def __str__(self): return self._get_profile() def _get_profile(self): output = [ "", "Message Run Time Total time", "---------------------------------------------------", ] rows = [] i = 0 previous_time = None net_time = 0 for message, time in self._profile_events: if i is not 0: t = time - previous_time net_time += t rows[i - 1][1] = t previous_time = time rows.append([message, 0, net_time]) i += 1 for row in rows: output.append('%-30s %-8s %10s' % (row[0], row[1], row[2])) return "\n".join(output)
python
#!/usr/bin/env python """ Code for Harris corner detection. """ import cv2 import numpy as np def interactive_harris(title, img): cv2.imshow(title, img) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) def update_harris(pos): bs_i = cv2.getTrackbarPos('bs', title) ks_i = cv2.getTrackbarPos('ks', title) k_i = cv2.getTrackbarPos('k', title) odds = [2*x+1 for x in range(100)] bs = odds[bs_i] ks = odds[ks_i] k = k_i harris = cv2.cornerHarris(gray, blockSize=bs, ksize=ks, k=k) harris = cv2.normalize(harris, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).astype(np.uint8) print "%s :: bs=%d, ks=%d, k=%d" % (title, bs, ks, k) cv2.imshow(title, np.vstack((harris,gray))) cv2.createTrackbar('bs', title, 0, 20, update_harris) cv2.createTrackbar('ks', title, 0, 15, update_harris) cv2.createTrackbar('k', title, 0, 100, update_harris) update_harris(None) if __name__ == '__main__': digits = cv2.imread('../images/digits.png') interactive_harris('digits', digits) symbols = cv2.imread('../images/symbols.png') interactive_harris('symbols', symbols) print "Done. Press enter." cv2.waitKey()
python
import logging logging.basicConfig(level=logging.DEBUG) from experiments_seminar_2 import ptl_wandb_run_builder if __name__ == "__main__": """ Best fit with multiple orders """ config_dict = { "env": { "num_dcs": 3, "num_customers": 10, "num_commodities": 5, "orders_per_day": 4, # start with one, and then play with this. "dcs_per_customer": 2, "demand_mean": 500, "demand_var": 150, "num_steps": 30, # steps per episode "big_m_factor": 10000, # how many times the customer cost is the big m. # New parameters 2021 "version": "v2", "order_generator": "biased", "reward_function": "negative_cost", # big_m_diff }, "hps": { "env": "shipping-v0", # openai env ID. "episode_length": 150, # todo isn't this an env thing? "max_episodes": 10, # to do is this num episodes, is it being used? # "batch_size": 30, # "sync_rate": 2, # Rate to sync the target and learning network, not used with this agent "lr": 1e-3, "discount": 0.8, "epsilon": 0.01, "init_state_value": 0.001, }, "seed": 0, # "agent": "lookahead" # "agent": "tree_search" "agent": "best_fit" # "agent": "random_valid", } trainer, model = ptl_wandb_run_builder.create_ptl_shipping_allocation_rl_runner( config_dict, # run_mode="local_debug", run_mode="debug", experiment_name=f"{config_dict['agent']}_multicommodity_multiorder", project_name="rl_warehouse_assignment", ) trainer.fit(model)
python
# coding: utf-8 # # Tutorial 2 - MicaSense library # # This tutorial assumes you have gone through the [basic setup](./Micasense Image Processing Setup.html) and builds on the basic radiance, irradiance, and reflectance concepts and code covered in the [first tutorial](./MicaSense Image Processing Tutorial 1.html). # # In this tutorial, we will cover usage of the MicaSense python library to access images and groups of images. Most of the processing details are hidden away in the library, but the library code is open and available in the git repository. # # # Library Components # # In the first tutorial, we introduced `micasense.utils` which provided some helper functions for single image manipulation, and `micasense.plotutils` which provided some plotting helpers. # # For this second tutorial, we are going to introduce the usage of the included micasense libraries for opening, converting, and displaying images. This will allow us to discuss and visualize results at a high level, while the underlying source code is available for those interested in the implementation details. In some cases, the libraries themselves may be enough to implement a custom workflow without the need to re-implement or translate the code to another system or language. # # The library code provides some basic classes to manage image data. At the highest level is the `ImageSet`, which is able to load a list of files or recursively search a whole directory into data structures which are easy to access and manipulate. `ImageSet`s are made up of `Capture`s, which hold the set of (usually 5) images as they are simultaneously gathered by the RedEdge camera. Within `Capture`s are `Image`s, which hold a single image file and allow easy access to the image metadata. The `Image` class also provides the ability to extract metadata from individual images and to convert individual images in similar ways to those described in the first tutorial. # # For the rest of this article, we will look at each of the objects available starting with the single `Image` object, and work our way up to the whole `ImageSet`. Each section in this article is standalone, and can be copied into another workbook or edited in place to explore more of the functions associated with that object. # ## micasense.Image # # An image is the lowest level object. It represents the data in a single tiff file as taken by the camera. `Image` objects expose a set of data retrieval methods which provide access to raw, radiance, and reflectance corrected images, and to undistort any of those images. Note that when retrieving image data from an `Image` object, the data is stored internally in the object, increasing the object's memory footprint. If operating on a large number of images, it may be necessary to release this data memory after each image is processed to limit the program memory footprint. This can be done by calling the `Image.clear_image_data()` method. # In[ ]: import os import micasense.image as image get_ipython().run_line_magic('matplotlib', 'inline') image_path = os.path.join('.','data','0000SET','000','IMG_0000_1.tif') img = image.Image(image_path) img.plot_raw(); # ### Accessing `Image` Metadata # # Metadata for each image is available in the `Image.meta` parameter. This object is a `micasense.Metadata` object and can be accessed directly for image specific metadata extraction. Below, we print the same metadata values as we did in Tutorial #1, but using direct access to the `Metadata` object parameters. # # A notebook for experimenting with the `Image` class can be found [here](Images.html). # In[ ]: print('{0} {1} firmware version: {2}'.format(img.meta.camera_make(), img.meta.camera_model(), img.meta.firmware_version())) print('Exposure Time: {0} seconds'.format(img.meta.exposure())) print('Imager Gain: {0}'.format(img.meta.gain())) print('Size: {0}x{1} pixels'.format(img.meta.image_size()[0], img.meta.image_size()[1])) print('Band Name: {0}'.format(img.meta.band_name())) print('Center Wavelength: {0} nm'.format(img.meta.center_wavelength())) print('Bandwidth: {0} nm'.format(img.meta.bandwidth())) print('Capture ID: {0}'.format(img.meta.capture_id())) print('Flight ID: {0}'.format(img.meta.flight_id())) # ## micasense.Capture # # The `Capture` class is a container for `Image`s which allows access to metadata common to the group of images. The internal `Image` objects are accessible via the `capture.images` properties, and images in this list are kept sorted by the `band` property. Data which is different for each image can be accessed through composite methods, such as the `capture.dls_irradiance()` method, which returns a list of irradiances in band order. # In[ ]: import os, glob import micasense.capture as capture images_path = os.path.join('.','data','0000SET','000') image_names = glob.glob(os.path.join(images_path,'IMG_0000_*.tif')) cap = capture.Capture.from_filelist(image_names) cap.plot_radiance(); # ### Acessing `Capture` metadata # # Metadata which is common to all captures can be accessed via methods on the `Capture` object. Metadata which varies between the images of the capture, such as DLS information, is available as lists accessed from the capture object. # # <div class="alert alert-info"> # <strong>Note:</strong> The lists returned from metadata access on the `Capture` object are returned in `band_index` order. All images within a capture are sorted by the image `band_index`, and all lists adhere to this ordering. This ordering is consistent with the number at the end of each filename of a RedEdge image. # </div> # # Below we plot the raw and tilt compensated DLS irradiance by center wavelength and by band name. # # In[ ]: import matplotlib.pyplot as plt print(cap.band_names()) fig = plt.figure(figsize=(14,6)) plt.subplot(1,2,1) plt.scatter(cap.center_wavelengths(), cap.dls_irradiance()) plt.ylabel('Irradiance $(W/m^2/nm)$') plt.xlabel('Center Wavelength (nm)') plt.subplot(1,2,2) plt.scatter(cap.band_names(), [img.meta.exposure() for img in cap.images]) plt.xlabel('Band Names') plt.ylim([0,2.5e-3]) plt.ylabel('Exposure Time (s)') plt.show() # A notebook for experimenting with the `Capture` class can be found [here](Captures.html). # ## micasense.Panel # # The `Panel` class is a helper class which can automatically extract panel information from MicaSense calibrated reflectance panels by finding the QR code within an image and using the QR Code location and orientation information to find the lambertian panel area. The class then allows extraction of statistics from the panel area such as mean raw values, mean radiance, standard deviation, and the number of saturated pixels in the panel region. The panel object can be included standalone, or used within the context of a `Capture` object. # # <div class="alert alert-info"> # <strong>Note:</strong> For the automatic panel QR code finding functions of the library to work, zbar and it's python bindings must be installed. We have made every effort to ensure this fails gracefully if zbar isn't available. Unfortunately zbar is only available using Python 2.7, not Python 3. If you're using Python 3.x, the code available in '/micasense/panel.py' shows how to find QR codes in images and to find the panel area from the QR location. We're currently looking for Python QR code finding options that work across platforms and Python versions, let us know if you have one that supports location! # </div> # In[ ]: import os, glob import micasense.image as image import micasense.panel as panel image_path = os.path.join('.','data','0000SET','000','IMG_0000_1.tif') img = image.Image(image_path) # panelCorners - if we dont have zbar installed to scan the QR codes, detect panel manually and panelCorners = [[[809,613],[648,615],[646,454],[808,452]], [[772,623],[613,625],[610,464],[770,462]], [[771,651],[611,653],[610,492],[770,490]], [[829,658],[668,659],[668,496],[829,496]], [[807,632],[648,634],[645,473],[805,471]]] pnl = panel.Panel(img,panelCorners = panelCorners[0]) print("Panel found: {}".format(pnl.panel_detected())) print("Panel serial: {}".format(pnl.serial)) print("QR Code Corners:\n{}".format(pnl.qr_corners())) mean, std, count, saturated_count = pnl.raw() print("Panel mean raw pixel value: {}".format(mean)) print("Panel raw pixel standard deviation: {}".format(std)) print("Panel region pixel count: {}".format(count)) print("Panel region saturated pixel count: {}".format(count)) pnl.plot(); # A notebook for experimenting with the `Panel` class can be found [here](Panels.html) # ## micasense.ImageSet # # An `ImageSet` contains a group of `Capture`s. The captures can be loaded from image object, from a list of files, or by recursively searching a directory for images. # # Loading an `ImageSet` can be a time consuming process. It uses python multithreading under the hood to maximize cpu usage on multi-core machines. # In[ ]: from ipywidgets import FloatProgress from IPython.display import display f = FloatProgress(min=0, max=1) display(f) def update_f(val): f.value=val import micasense.imageset as imageset import os images_dir = os.path.join('.','data','0000SET') imgset = imageset.ImageSet.from_directory(images_dir, progress_callback=update_f) for cap in imgset.captures: print ("Opened Capture {} with bands {}".format(cap.uuid,[str(band) for band in cap.band_names()])) # ## Extended ImageSet examples # # A large group of images captured over a central California orchard are available for [download here](https://s3-us-west-2.amazonaws.com/sample.micasense.com/imageprocessing/RedEdgeImageSet.zip). # # With this set extracted to a working folder, the [extended ImageSet example](./ImageSets.html) notebook provides more usages of ImageSet data. # # # Conclusion # # In this tutorial, we have introduced the MicaSense library and provided some examples of opening Images, Captures, and ImageSets, as well as detecting and extracting panel information from images. # # The next tutorial covers basic usage of DLS information, and is available [here](./MicaSense%20Image%20Processing%20Tutorial%203.html) # --- # Copyright (c) 2017-2018 MicaSense, Inc. For licensing information see the [project git repository](https://github.com/micasense/imageprocessing)
python
""" For each Results/Final/LargeSet_20180106/ subfolder: alpha maxiter lsiterations population eliteprop mutantprop generations inheritance create list of results ex: alpha_results = { 'paramval': get from file, 'objfunc': [], 'objfunc_avg': value } for each file inside read and add data series alpha_results_x = "alpha param value" -> get from file name alpha_results_y = objective function append alpha_results_y to objfunc compute average and save in objfunc_avg plot and save plot to Results/Final/LargeSet_20180106/Plots alpha_plot.png """ import json import matplotlib.pyplot as plt import sys import os from matplotlib.backends.backend_pdf import PdfPages from random import randrange import re import traceback from datetime import datetime import argparse import operator import shutil import pprint pp = pprint.PrettyPrinter(indent=2) def chartPlot(plotname, savefilename, x, y, axisnames, labels): fig, ax = plt.subplots() xs = range(len(y)) x0 = xs[0] x1 = xs[-1] y1 = y[0] for ys in y: if ys < y1: y1 = ys ax.plot([x0, x1], [y1, y1], 'k-', c='r') plt.plot(range(len(y)),y, marker='.', color='b', ls='', label=labels[0]) plt.xticks(range(len(x)),x) plt.xlabel(axisnames[0]) plt.ylabel(axisnames[1]) ax.legend(loc='upper right', fontsize='small') #fig.subplots_adjust(bottom=0.9) fig.tight_layout() #plt.axis([0, len(results), 0, max(y)]) plt.savefig(os.path.join('..','LargeSet_graphs',savefilename + '.png')) plt.show() plt.close() # copy to Documentation folder savename2 = 'best-' + savefilename[18:] shutil.copy(os.path.join('..','LargeSet_graphs',savefilename + '.png'), os.path.join('..','..','..','Documentation','img',savename2 + '.png')) def buildCharts(parameters_list, name): """ parameters_list=[{ 'name': parameter, 'results': [{ 'paramval': paramval, 'objfunc': [objfunc], 'objfunc_avg': objfunc }, { 'paramval': paramval, 'objfunc': [objfunc], 'objfunc_avg': objfunc }, ... ] }, { 'name': parameter, 'results': [{ 'paramval': paramval, 'objfunc': [objfunc], 'objfunc_avg': objfunc }, { 'paramval': paramval, 'objfunc': [objfunc], 'objfunc_avg': objfunc }, ... ] }, ] """ best_values = {} for elem in parameters_list: print(elem["name"]) if elem["name"] == "generation": pp.pprint(elem) elem["results"] = sorted(elem["results"], key=lambda k: k['paramval']) bestvalue = elem["results"][0]["paramval"] min_objfunc_avg = elem["results"][0]["objfunc_avg"] # if elem["name"] == "generation": # pp.pprint(elem) for paramval in elem["results"]: #print(paramval["paramval"]) #print(paramval["objfunc_avg"]) #avg verification thesum = 0 for objfs in paramval["objfunc"]: thesum += objfs theavg = thesum / len(paramval["objfunc"]) if round(theavg,2) != round(paramval["objfunc_avg"],2): print("----->Avg error!") print(theavg) print(paramval["objfunc_avg"]) print() if paramval["objfunc_avg"] < min_objfunc_avg: min_objfunc_avg = paramval["objfunc_avg"] bestvalue = paramval["paramval"] best_values[elem["name"]] = bestvalue # plotname # plotfilename # x x_ = [ x["paramval"] for x in elem["results"]] # y y_ = [ y["objfunc_avg"] for y in elem["results"]] # legend # axes thelabel = elem["name"] if elem["name"] == "lsiteration": thelabel = "failedIterations" elif elem["name"] == "maxIter": thelabel = "maxIterations" elif elem["name"] == "generation": thelabel = "generations" chartPlot( plotname=elem["name"], savefilename=name + "-" + elem["name"], x=x_, y=y_, axisnames=["Parameter values", "Average objective function"], labels=[thelabel]) print(best_values) with open(os.path.join('..','LargeSet_graphs','BestValues-') + name, 'w+' ) as fout: fout.write(json.dumps(best_values)) def extractParameterValue(parameter, filename): prefixes = { 'alpha': {'prefix': '-a', 'type': 'float'}, 'maxiter': {'prefix': '-i', 'type': 'int'}, 'lsiteration': {'prefix': '-lsit', 'type': 'int'}, 'generation': {'prefix': '-g', 'type': 'int'}, 'population': {'prefix': '-p', 'type': 'int'}, 'inheritance': {'prefix': '-i', 'type': 'float'}, 'eliteprop': {'prefix': '-e', 'type': 'float'}, 'mutantprop': {'prefix': '-m', 'type': 'float'}, } prefix = prefixes[parameter]["prefix"] i0 = filename.find('-i-ng') if i0 == -1: i0 = 0 else: i0 += len('-i-ng') i1 = filename[i0:].find(prefix) i2 = i0 + i1 + len(prefix) i3 = filename[i2:].find('-') if i3 == -1: i3 = filename[i2:].find('.json') value = filename[i2:i2 + i3] if prefixes[parameter]["type"] == "float": try: value = float(value) except: print(parameter) print(prefix) print(i0) print(i1) print(i2) print(i2 + i3) print(filename) print(value) exit() else: value = int(value) return value def parsefile(fileobject, parameters_list, parameter, filename): paramval = 0 objfunc = 0 # get param value from filename paramval = extractParameterValue(parameter, filename) # if parameter == "generation" and not (paramval in [5,10 ,15 ,20 ]): # return # extract objective function results = json.load(fileobject) for elem in results: for k,v in elem.items(): if k == 'end': continue # get objective function objfunc = int(v['ObjectiveFunction']) break # add new result to parameters_list for elem in parameters_list: if elem["name"] == parameter: param_results = elem["results"] found = False for res in param_results: if res["paramval"] == paramval: found = True res["objfunc"].append(objfunc) l = len(res["objfunc"]) res["objfunc_avg"] = (res["objfunc_avg"] * (l - 1) + objfunc ) / l break if not found: param_results.append({ 'paramval': paramval, 'objfunc': [objfunc], 'objfunc_avg': objfunc }) break return if __name__ == '__main__': results_folder = os.path.join('..','..','Results','Final','LargeSet_20180106') parser = argparse.ArgumentParser() parser.add_argument("--folder",help="folder where to read results from") args = parser.parse_args() if args.folder: results_folder = os.path.join(args.folder,'data') os.chdir(results_folder) parameters_list = [] for root, dirs, files in os.walk("."): for folder in dirs: print(folder) parameter = folder parameter_results = { 'name': parameter, 'results': [] } parameters_list.append(parameter_results) for result in files: parameter = root.split('/')[-1] if not result.endswith(".json"): continue filepath = os.path.join(root,result) with open(filepath,'r+') as f: try: #print(os.path.join(root,result)) parsefile(f, parameters_list, parameter, result) except Exception: print() print("Exception in " + result) print("-"*60) traceback.print_exc(file=sys.stdout) print("-"*60) #print(parameters_list) buildCharts(parameters_list, '{0:%Y%m%d_%H-%M-%S}'.format(datetime.now()) )
python
""" Problem: You come across a dictionary of sorted words in a language you've never seen before. Write a program that returns the correct order of letters in this language. For example, given ['xww', 'wxyz', 'wxyw', 'ywx', 'ywz'], you should return ['x', 'z', 'w', 'y']. """ from typing import Dict, List, Optional, Set def update_letter_order(sorted_words: List[str], letters: Dict[str, Set[str]]) -> None: order = [] new_words = {} prev_char = None for word in sorted_words: if word: char = word[0] if char != prev_char: order.append(char) if char not in new_words: new_words[char] = list() new_words[char].append(word[1:]) prev_char = char for index, char in enumerate(order): letters[char] = letters[char] | set(order[index + 1 :]) for char in new_words: update_letter_order(new_words[char], letters) def find_path( letters: Dict[str, Set[str]], start: str, path: List[str], length: int ) -> Optional[List[str]]: if len(path) == length: return path if not letters[start]: return None for next_start in letters[start]: new_path = find_path(letters, next_start, path + [next_start], length) if new_path: return new_path def get_letter_order(sorted_words: List[str]): letters = {} for word in sorted_words: for letter in word: if letter not in letters: letters[letter] = set() update_letter_order(sorted_words, letters) max_children = max([len(x) for x in letters.values()]) potential_heads = [x for x in letters if len(letters[x]) == max_children] path = None for head in potential_heads: path = find_path(letters, head, path=[head], length=len(letters)) if path: break return path if __name__ == "__main__": print(get_letter_order(["xww", "wxyz", "wxyw", "ywx", "ywz"])) """ SPECS: TIME COMPLEXITY: O(words x letters + words ^ 2 + letters ^ 2) SPACE COMPLEXITY: O(words x letters) """
python
# -*- coding: utf-8 -*- from typing import List from decibel.tab_chord_parser.segment import Segment from decibel.tab_chord_parser.line_type import LineType from decibel.tab_chord_parser.line import Line from decibel.tab_chord_parser.system import System def find_systems(segment: Segment): system_nr = 0 system_line_nr = 0 while system_line_nr < len(segment.lines): line = segment.lines[system_line_nr] if line.line_type == LineType.ChordsAndLyrics: segment.add_system(System(system_nr)) segment.systems[system_nr].add_chords_and_lyrics_line(line) system_line_nr += 1 system_nr += 1 elif line.line_type == LineType.Chords: system = System(system_nr) segment.add_system(system) system.add_chords_line(line) system_line_nr += 1 if system_line_nr == len(segment.lines): break if segment.is_start_of_tab_block(system_line_nr): # Here is a tab block, but we ignore it as we already know the chords system_line_nr += 6 # If the tab block is followed by max. 3 subsequent lyrics lines, add the lyrics to the system nr_of_subsequent_lyrics_lines = segment.length_of_lyrics_block(system_line_nr) for subsequent_lyric_i in range(0, nr_of_subsequent_lyrics_lines): system.add_lyrics_line(segment.lines[system_line_nr + subsequent_lyric_i]) system_line_nr += nr_of_subsequent_lyrics_lines system_nr += 1 elif segment.is_start_of_tab_block(system_line_nr): # Add new system system = System(system_nr) segment.systems.append(system) tab_block_str = [block_line.content for block_line in segment.lines[system_line_nr:system_line_nr + 6]] system.add_tab_block(tab_block_str) system_line_nr += 6 # If the tab block is followed by max. 3 subsequent lyrics lines, add the lyrics to the system nr_of_subsequent_lyrics_lines = segment.length_of_lyrics_block(system_line_nr) for subsequent_lyric_i in range(0, nr_of_subsequent_lyrics_lines): system.add_lyrics_line(segment.lines[system_line_nr + subsequent_lyric_i]) system_line_nr += nr_of_subsequent_lyrics_lines system_nr += 1 else: system_line_nr += 1 def segment_line_list(line_list: List[Line]) -> List[Segment]: """ Takes a list of Lines and divides them into Segments, based on Empty LineTypes. Returns a list of them. :param line_list: List of Lines from a tab file :return: List of segments from a tab file """ result = [] segment_nr = 0 new_segment = True for line in line_list: if line.line_type == LineType.Empty: if not new_segment: new_segment = True segment_nr += 1 else: if new_segment: result.append(Segment(segment_nr)) new_segment = False result[segment_nr].add_line(line) for segment in result: find_systems(segment) return result
python
import argparse import asyncio import getpass import logging import os import sys import traceback import yaml import pkg_resources from aiohttp import web from colorlog import ColoredFormatter from pathlib import Path from rest_api.intkey_client import IntkeyClient from rest_api.exceptions import IntKeyCliException from rest_api.exceptions import IntkeyClientException from rest_api.route_handler import RouteHandler import config from zmq.asyncio import ZMQEventLoop from sawtooth_signing import create_context from sawtooth_signing.secp256k1 import Secp256k1PublicKey LOGGER = logging.getLogger(__file__) DISTRIBUTION_NAME = 'sawtooth-intkey' DEFAULT_URL = 'http://127.0.0.1:8008' ARGS = "" def create_console_handler(verbose_level): clog = logging.StreamHandler() formatter = ColoredFormatter( "%(log_color)s[%(asctime)s %(levelname)-8s%(module)s]%(reset)s " "%(white)s%(message)s", datefmt="%H:%M:%S", reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', }) clog.setFormatter(formatter) if verbose_level == 0: clog.setLevel(logging.WARN) elif verbose_level == 1: clog.setLevel(logging.INFO) else: clog.setLevel(logging.DEBUG) return clog def setup_loggers(verbose_level): logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(create_console_handler(verbose_level)) def create_parent_parser(prog_name): parent_parser = argparse.ArgumentParser(prog=prog_name, add_help=False) parent_parser.add_argument( '-v', '--verbose', action='count', help='enable more verbose output') # add bind port to run server parent_parser.add_argument( '-b', '--bind', help='identify host and port for api to run on', default='txt-rest-api:8000') parent_parser.add_argument( '--url', type=str, help='specify URL of REST API') parent_parser.add_argument( '--keyfile', type=str, help="identify file containing user's private key") parent_parser.add_argument( '--wait', nargs='?', const=sys.maxsize, type=int, help='set time, in seconds, to wait for transaction to commit') try: version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version except pkg_resources.DistributionNotFound: version = 'UNKNOWN' parent_parser.add_argument( '-V', '--version', action='version', version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}') .format(version), help='display version information') return parent_parser def generate_private_key(): context = create_context('secp256k1') private_key = context.new_random_private_key() private_key_hex = Secp256k1PublicKey.as_hex(private_key) real_user = getpass.getuser() home = os.path.expanduser("~") key_dir = os.path.join(home, ".sawtooth", "keys") path = key_dir + "/" + real_user + ".priv" Path(key_dir).mkdir(parents=True, exist_ok=True) f = open(path, "w") f.write(private_key_hex) f.close() def start_rest_api(host, port): loop = asyncio.get_event_loop() app = web.Application(loop=loop) client = _get_client(ARGS) handler = RouteHandler(loop, client) app.router.add_post('/invoke', handler.invoke_function) LOGGER.warning('Starting REST API on %s:%s', host, port) web.run_app( app, host=host, port=port, access_log=LOGGER) def _get_client(args, read_key_file=True): return IntkeyClient( url=DEFAULT_URL if args.url is None else args.url, keyfile=_get_keyfile(args) if read_key_file else None) def _get_keyfile(args): try: if args.keyfile is not None: return args.keyfile except AttributeError: return None real_user = getpass.getuser() home = os.path.expanduser("~") key_dir = os.path.join(home, ".sawtooth", "keys") return '{}/{}.priv'.format(key_dir, real_user) def main(prog_name=os.path.basename(sys.argv[0]), args=None): if args is None: args = sys.argv[1:] parser = create_parent_parser(prog_name) args = parser.parse_args(args) global ARGS ARGS = args if args.verbose is None: verbose_level = 0 else: verbose_level = args.verbose setup_loggers(verbose_level=verbose_level) loop = ZMQEventLoop() asyncio.set_event_loop(loop) try: host, port = args.bind.split(":") port = int(port) except ValueError: print("Unable to parse binding {}: Must be in the format" " host:port".format(args.bind)) sys.exit(1) config.init_config() start_rest_api(host, port) def main_wrapper(): # pylint: disable=bare-except try: generate_private_key() main() except (IntKeyCliException, IntkeyClientException) as err: print("Error: {}".format(err), file=sys.stderr) sys.exit(1) except KeyboardInterrupt: pass except SystemExit as e: raise e except: traceback.print_exc(file=sys.stderr) sys.exit(1) main_wrapper()
python
# -*- coding: utf-8 -*- def str_dict(str_headers): di = [] try: for i in str_headers.split("\n"): he = i.split(": ", 1) if he != [""]: di.append(he) return dict(di) except ValueError as error: print("请把请求类型一行去掉:POST /xxx/xxx/xxx HTTP/1.1" + "\n" + "错误为:%s" % error)
python
#!/usr/local/bin/python3 import torch # Element-wise , componenet-wise, point-wise # If the two tensors have the same shape, we can perform element wise # operations on them. +-*/ are all element wise operations. # Returns a tensor filled with random numbers from a uniform # distribution on the interval [0,1) t1 = torch.rand((5, 5)) print(t1.shape) t2 = torch.rand((5, 5)) print(t2.shape) result = t1 + t2 print(result) # Broadcasting works however, just like numpy. result = result - 1 # A filter matrix filterMatrix = result > 0 print("Filter Matrix is:\n", filterMatrix) # Will print the values that were bigger than 0. print("Values bigger than 0:\n", result[filterMatrix]) # For element wise multiplication use *. For casting use result.int(), or double() etc. print("Values bigger than 0 in their place as a matrix.\n", result * filterMatrix.int() ) # We can get the same results using the in-built functions # Greater equals print("Greater equals to 0:\n", result, "\n", result.ge(0)) # Greater print("Greater than 0:\n", result.gt(0)) t3 = torch.rand((5, 1)) # Because of Broadcasting, we can do the following operation. print("Broadcasting works:\n", t3 + t1)
python
import tensorflow.contrib.learn as skflow from sklearn import datasets, metrics iris = datasets.load_iris() classifier_model = skflow.LinearClassifier(feature_columns=[tf.contrib.layers.real_valued_column("", dimension=iris.data.shape[1])], n_classes=3) classifier_model.fit(iris.data, iris.target) score = metrics.accuracy_score(iris.target,classifier_model.predict(iris.data)) print("Accuracy: %f" % score)
python
'''Author: Sourabh Bajaj''' import ez_setup ez_setup.use_setuptools() from setuptools import setup, find_packages setup( name='QSTK', version='0.2.8.2', author='Sourabh Bajaj', packages=find_packages(), namespace_packages=['QSTK'], include_package_data=True, long_description=open('README.md').read(), author_email='sourabh@sourabhbajaj.com', url='https://github.com/tucker777/QuantSoftwareToolkit', license=open('LICENSE.txt').read(), description='QuantSoftware Toolkit', install_requires=[ "numpy >= 1.6.1", "scipy >= 0.9.0", "matplotlib >= 1.1.0", "pandas >= 0.7.3", "python-dateutil == 1.5", "cvxopt>=1.1.8", "scikit-learn >= 0.11", "pandas-datareader>=0.4.0" ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'Intended Audience :: Financial and Insurance Industry', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python :: 2.7', 'Topic :: Utilities', ], )
python
from typing import Any __all__ = ["AttrDict"] class AttrDict(dict): """ Wrapper of dict class, to allow usage of attribute notation (instance.key) in place of index notation (instance["key"]). Can be used as a mixin for Mappings. """ def __getattr__(self, item: str) -> Any: if item in self: return self[item] return getattr(super(), item)
python
from django.conf.urls import url from zebra import views urlpatterns = [ url(r'webhooks/$', views.webhooks, name='webhooks'), url(r'webhooks/v2/$', views.webhooks_v2, name='webhooks_v2'), ]
python
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import pytest import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore import amp from mindspore.nn import Dense from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.cell import Cell from mindspore.nn.layer.basic import Flatten from mindspore.nn.layer.conv import Conv2d from mindspore.nn.layer.normalization import BatchNorm2d from mindspore.nn.layer.pooling import MaxPool2d from mindspore.nn.optim import Momentum from mindspore.ops import operations as P from mindspore.ops.operations import TensorAdd context.set_context(mode=context.GRAPH_MODE, device_target="GPU") def random_normal_init(shape, mean=0.0, stddev=0.01, seed=None): init_value = np.ones(shape).astype(np.float32) * 0.01 return Tensor(init_value) def variance_scaling_raw(shape): variance_scaling_value = np.ones(shape).astype(np.float32) * 0.01 return Tensor(variance_scaling_value) def weight_variable_0(shape): zeros = np.zeros(shape).astype(np.float32) return Tensor(zeros) def weight_variable_1(shape): ones = np.ones(shape).astype(np.float32) return Tensor(ones) def conv3x3(in_channels, out_channels, stride=1, padding=1): """3x3 convolution """ weight_shape = (out_channels, in_channels, 3, 3) weight = variance_scaling_raw(weight_shape) return Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, weight_init=weight, has_bias=False, pad_mode="same") def conv1x1(in_channels, out_channels, stride=1, padding=0): """1x1 convolution""" weight_shape = (out_channels, in_channels, 1, 1) weight = variance_scaling_raw(weight_shape) return Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, weight_init=weight, has_bias=False, pad_mode="same") def conv7x7(in_channels, out_channels, stride=1, padding=0): """1x1 convolution""" weight_shape = (out_channels, in_channels, 7, 7) weight = variance_scaling_raw(weight_shape) return Conv2d(in_channels, out_channels, kernel_size=7, stride=stride, weight_init=weight, has_bias=False, pad_mode="same") def bn_with_initialize(out_channels): shape = (out_channels) mean = weight_variable_0(shape) var = weight_variable_1(shape) beta = weight_variable_0(shape) gamma = weight_variable_1(shape) bn = BatchNorm2d(out_channels, momentum=0.1, eps=0.0001, gamma_init=gamma, beta_init=beta, moving_mean_init=mean, moving_var_init=var) return bn def bn_with_initialize_last(out_channels): shape = (out_channels) mean = weight_variable_0(shape) var = weight_variable_1(shape) beta = weight_variable_0(shape) gamma = weight_variable_0(shape) bn = BatchNorm2d(out_channels, momentum=0.1, eps=0.0001, gamma_init=gamma, beta_init=beta, moving_mean_init=mean, moving_var_init=var) return bn def fc_with_initialize(input_channels, out_channels): weight_shape = (out_channels, input_channels) bias_shape = (out_channels) weight = random_normal_init(weight_shape) bias = weight_variable_0(bias_shape) return Dense(input_channels, out_channels, weight, bias) class ResidualBlock(Cell): expansion = 4 def __init__(self, in_channels, out_channels, stride=1, down_sample=False): super(ResidualBlock, self).__init__() out_chls = out_channels // self.expansion self.conv1 = conv1x1(in_channels, out_chls, stride=1, padding=0) self.bn1 = bn_with_initialize(out_chls) self.conv2 = conv3x3(out_chls, out_chls, stride=stride, padding=1) self.bn2 = bn_with_initialize(out_chls) self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0) self.bn3 = bn_with_initialize_last(out_channels) self.relu = P.ReLU() self.add = TensorAdd() def construct(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out = self.add(out, identity) out = self.relu(out) return out class ResidualBlockWithDown(Cell): expansion = 4 def __init__(self, in_channels, out_channels, stride=1, down_sample=False): super(ResidualBlockWithDown, self).__init__() out_chls = out_channels // self.expansion self.conv1 = conv1x1(in_channels, out_chls, stride=1, padding=0) self.bn1 = bn_with_initialize(out_chls) self.conv2 = conv3x3(out_chls, out_chls, stride=stride, padding=1) self.bn2 = bn_with_initialize(out_chls) self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0) self.bn3 = bn_with_initialize_last(out_channels) self.relu = P.ReLU() self.downSample = down_sample self.conv_down_sample = conv1x1( in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = bn_with_initialize(out_channels) self.add = TensorAdd() def construct(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) identity = self.conv_down_sample(identity) identity = self.bn_down_sample(identity) out = self.add(out, identity) out = self.relu(out) return out class MakeLayer0(Cell): def __init__(self, block, layer_num, in_channels, out_channels, stride): super(MakeLayer0, self).__init__() self.a = ResidualBlockWithDown( in_channels, out_channels, stride=1, down_sample=True) self.b = block(out_channels, out_channels, stride=stride) self.c = block(out_channels, out_channels, stride=1) def construct(self, x): x = self.a(x) x = self.b(x) x = self.c(x) return x class MakeLayer1(Cell): def __init__(self, block, layer_num, in_channels, out_channels, stride): super(MakeLayer1, self).__init__() self.a = ResidualBlockWithDown( in_channels, out_channels, stride=stride, down_sample=True) self.b = block(out_channels, out_channels, stride=1) self.c = block(out_channels, out_channels, stride=1) self.d = block(out_channels, out_channels, stride=1) def construct(self, x): x = self.a(x) x = self.b(x) x = self.c(x) x = self.d(x) return x class MakeLayer2(Cell): def __init__(self, block, layer_num, in_channels, out_channels, stride): super(MakeLayer2, self).__init__() self.a = ResidualBlockWithDown( in_channels, out_channels, stride=stride, down_sample=True) self.b = block(out_channels, out_channels, stride=1) self.c = block(out_channels, out_channels, stride=1) self.d = block(out_channels, out_channels, stride=1) self.e = block(out_channels, out_channels, stride=1) self.f = block(out_channels, out_channels, stride=1) def construct(self, x): x = self.a(x) x = self.b(x) x = self.c(x) x = self.d(x) x = self.e(x) x = self.f(x) return x class MakeLayer3(Cell): def __init__(self, block, layer_num, in_channels, out_channels, stride): super(MakeLayer3, self).__init__() self.a = ResidualBlockWithDown( in_channels, out_channels, stride=stride, down_sample=True) self.b = block(out_channels, out_channels, stride=1) self.c = block(out_channels, out_channels, stride=1) def construct(self, x): x = self.a(x) x = self.b(x) x = self.c(x) return x class ResNet(Cell): def __init__(self, block, layer_num, num_classes=100): super(ResNet, self).__init__() self.conv1 = conv7x7(3, 64, stride=2, padding=3) self.bn1 = bn_with_initialize(64) self.relu = P.ReLU() self.maxpool = MaxPool2d(kernel_size=3, stride=2, pad_mode="same") self.layer1 = MakeLayer0( block, layer_num[0], in_channels=64, out_channels=256, stride=1) self.layer2 = MakeLayer1( block, layer_num[1], in_channels=256, out_channels=512, stride=2) self.layer3 = MakeLayer2( block, layer_num[2], in_channels=512, out_channels=1024, stride=2) self.layer4 = MakeLayer3( block, layer_num[3], in_channels=1024, out_channels=2048, stride=2) self.pool = nn.AvgPool2d(7, 1) self.fc = fc_with_initialize(512 * block.expansion, num_classes) self.flatten = Flatten() def construct(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.pool(x) x = self.flatten(x) x = self.fc(x) return x def resnet50(num_classes): return ResNet(ResidualBlock, [3, 4, 6, 3], num_classes) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_trainTensor(num_classes=10, epoch=8, batch_size=1): net = resnet50(num_classes) lr = 0.1 momentum = 0.9 optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, momentum) criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) net_with_criterion = WithLossCell(net, criterion) train_network = TrainOneStepCell( net_with_criterion, optimizer) # optimizer train_network.set_train() losses = [] for i in range(0, epoch): data = Tensor(np.ones([batch_size, 3, 224, 224] ).astype(np.float32) * 0.01) label = Tensor(np.ones([batch_size]).astype(np.int32)) loss = train_network(data, label) losses.append(loss) assert (losses[-1].asnumpy() < 1) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_trainTensor_big_batchSize(num_classes=10, epoch=8, batch_size=170): net = resnet50(num_classes) lr = 0.1 momentum = 0.9 optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, momentum) criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) net_with_criterion = WithLossCell(net, criterion) train_network = TrainOneStepCell( net_with_criterion, optimizer) # optimizer train_network.set_train() losses = [] for i in range(0, epoch): data = Tensor(np.ones([batch_size, 3, 224, 224] ).astype(np.float32) * 0.01) label = Tensor(np.ones([batch_size]).astype(np.int32)) loss = train_network(data, label) losses.append(loss) assert (losses[-1].asnumpy() < 1) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_trainTensor_amp(num_classes=10, epoch=18, batch_size=16): net = resnet50(num_classes) lr = 0.1 momentum = 0.9 optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, momentum) criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) train_network = amp.build_train_network( net, optimizer, criterion, level="O2") train_network.set_train() losses = [] for i in range(0, epoch): data = Tensor(np.ones([batch_size, 3, 224, 224] ).astype(np.float32) * 0.01) label = Tensor(np.ones([batch_size]).astype(np.int32)) loss = train_network(data, label) losses.append(loss) assert (losses[-1][0].asnumpy() < 1) assert (losses[-1][1].asnumpy() == False) assert (losses[-1][2].asnumpy() > 1)
python
import pandas as pd import time #------------------------------------ #loading dataset begin = time.time() df = pd.read_csv("adult.data" , names=["age", "workclass", "fnlwgt", "education", "education-num", "marital-status", "occupation", "relationship", "race", "sex", "capital-gain", "capital-loss", "hours-per-week", "native-country", "earning"]) print("dataset loaded in ",time.time()-begin," seconds") #------------------------------------ rows = df.shape[0] - 1 columns = df.shape[1] """ #dataset summary for i in range(0, columns): if df[df.columns[i]].dtypes != "int64": print(df.columns[i],": ",df[df.columns[i]].unique()," (",len(df[df.columns[i]].unique())," classes)") else: print(df.columns[i]) """ #------------------------------------ f = open('one-hot-encoded.txt', 'w') #dump header header = "" for i in range(0, columns): if i == 0: seperator = "" else: seperator = "," if df[df.columns[i]].dtypes != "int64": for k in range(0, len(df[df.columns[i]].unique())): header += seperator + df[df.columns[i]].unique()[k] else: header += seperator + df.columns[i] header += "\n" #print(header) f.write(header) #------------------------------------ #iterate on rows for index, row in df.iterrows(): new_line = "" #iterate on columns for i in range(0, columns): if i == 0: seperator = "" else: seperator = "," column_name = df.columns[i] if df[df.columns[i]].dtypes == "int64": new_line = new_line + seperator + str(row[column_name]) else: #class num_hot_encoded_classes = len(df[df.columns[i]].unique()) for k in range(0, num_hot_encoded_classes): if df[df.columns[i]].unique()[k] == row[column_name]: new_line = new_line + seperator + "1" else: new_line = new_line + seperator + "0" new_line += "\n" #print(new_line) f.write(new_line) #------------------------------------ f.close() print("converted to one-hot-encoded dataset in ",time.time()-begin," seconds")
python
#Author: Zhicheng Zhu #Email: zhicheng.zhu@ttu.edu, yisha.xiang@ttu.edu #copyright @ 2018: Zhicheng Zhu. All right reserved. #Info: #main file to solve multi-stage DEF of CBM model by using linearization and solver # #Last update: 10/18/2018 #!/usr/bin/python from __future__ import print_function import sys import cplex import itertools import time from scipy.stats import gamma ##################################### #class info ##################################### class component_info(): def transProb(self, stateFrom, stateTo, inspItvl): if stateFrom > stateTo: return 0; stepSize = self.failTsh/(self.nStates - 1); #step size for normal states degFrom = stateFrom * stepSize; #degradation lower bound of the state degToU = (stateTo + 1) * stepSize; #degradation upper bound of the state degToL = stateTo * stepSize; #degradation lower bound of the state if stateTo >= self.nStates - 1: deltaDeg = self.failTsh - degFrom; prob = 1 - gamma.cdf(deltaDeg, self.gammaAlpha*inspItvl, scale=self.gammaBeta); else: deltaDeg1 = degToU - degFrom; prob1 = gamma.cdf(deltaDeg1, self.gammaAlpha*inspItvl, scale=self.gammaBeta); deltaDeg2 = degToL - degFrom; prob2 = gamma.cdf(deltaDeg2, self.gammaAlpha*inspItvl, scale=self.gammaBeta); prob = prob1 - prob2; return prob; ''' def state2lv(): crtState = self.crtState; bound = []; bound.append(0);#put it here for now.. bound.append(1); return bound; ''' def __init__(self, idx, gam_a, gam_b, states, S, \ initState,cCM, cPM): self.idx = idx; self.gammaAlpha = gam_a; self.gammaBeta = gam_b; self.nStates = states; # 0 ... nStates - 1. nStates - 1 is failure states. self.failTsh = S; #failure threshold self.initState = initState; #self.crtState = initState; #self.crtDgLvRange = self.state2lv(); self.cCM = cCM; self.cPM = cPM; #system information #parameters class system_info(): def add_com(self, comInfo): self.comInfoAll.append(comInfo); def __init__(self, N, T, inspInterval, cS, cInsp): self.nComponents = N; self.nStages = T; self.inspItvl = inspInterval; self.cS = cS; self.cInsp = cInsp; self.comInfoAll = []; def get_child_nodes(node, sysInfo): #find/generate child nodes #(t, 0), (t, 1), ..., (t, m**tn) m = sysInfo.comInfoAll[0].nStates; n = sysInfo.nComponents; numOutcome = m**n; start = node*numOutcome; childNodes = list(range(start, start + numOutcome)); #we only return next stage nodes index. return childNodes; def node_2_outcome(node, sysInfo): #translate a node to outcome: m = sysInfo.comInfoAll[0].nStates; n = sysInfo.nComponents; numOutcome = m**n; outcome = node % numOutcome; return outcome; ####################################### #1. initialization, START FROM HERE!!!. ####################################### #init system parameter start_time = time.clock(); #init system parameter nComponents = 2; nStates = 4; #number of states for components, 0 - (m-1); nStages = 6; initState = [3,2]; inspInterval = 10; cS = 20; #setup cost cInsp = 1; sysInfo = system_info(nComponents, nStages, inspInterval, cS, cInsp); nOrder = 0; #order of approximation #init component parameter #gamma distribution is assumed. gam_a = [1]*nComponents; gam_b = [5]*nComponents; S = [60]*nComponents; #failure threshold cCM = [20]*nComponents; cPM = [5]*nComponents; for i in range(nComponents): comInfo = component_info(i, gam_a[i], gam_b[i], nStates,\ S[i], initState[i], cCM[i], cPM[i]); sysInfo.add_com(comInfo); ######################################## #2. build multi-stage DEF model and run ######################################## start_time = time.clock(); #2.1 # (1) get scenario combinations omega = []; for i in itertools.product(list(range(nStates)), repeat = sysInfo.nComponents): omega.append(list(i)); ''' #no set j when order = 0; # (2) get subsets which cardinality = j setS = []; #start from j = 2 for j in range(2, sysInfo.nComponents + 1): #sysInfo.nComponents >=2; setSj = []; for i in itertools.combinations(list(range(sysInfo.nComponents)), j): setSj.append(list(i)); setS.append(setSj); ''' # get coeA and coeB # no coeA when order = 0 #coeA = []; #scen*n*scen coeB = []; #scen*n*scen #coeAInit = []; #n*scen, store init coeA coeBInit = []; #n*scen, store init coeB for w1 in range(len(omega)): stateFrom = omega[w1]; #coeAW1 = []; coeBW1 = []; for i in range(sysInfo.nComponents): biw = []; #aiw = []; for w2 in range(len(omega)): comStatesTo = omega[w2]; comIFrom = stateFrom[i]; comITo = comStatesTo[i]; tmp = sysInfo.comInfoAll[i].transProb(comIFrom, comITo, sysInfo.inspItvl); biw.append(tmp); #aiw.append(sysInfo.comInfoAll[i].transProb(0, comITo, sysInfo.inspItvl) - tmp); #coeAW1.append(aiw); coeBW1.append(biw); if stateFrom == initState: #coeAInit = coeAW1; coeBInit = coeBW1; #coeA.append(coeAW1); coeB.append(coeBW1); cpx = cplex.Cplex(); #init solver cpx.objective.set_sense(cpx.objective.sense.minimize); #2.2 add decision variables #add X varX = []; dictX = {}; for stageIdx in range(nStages): nodeNum = sysInfo.comInfoAll[0].nStates ** (stageIdx * sysInfo.nComponents); for node in range(nodeNum): #nodes in current stage for i in range(sysInfo.nComponents): scripts = str(i) + str(stageIdx) + str(node); nameTmp = "x"+scripts; dictX[scripts] = nameTmp; varX.append(cpx.variables.get_num()); objCoe = 0; if stageIdx == 0: objCoe = sysInfo.comInfoAll[i].cPM; cpx.variables.add(obj = [objCoe], lb = [0.0], ub=[1.0], types=["B"], names=[nameTmp]); #add Y varY = []; dictY = {}; for stageIdx in range(nStages): nodeNum = sysInfo.comInfoAll[0].nStates ** (stageIdx * sysInfo.nComponents); for node in range(nodeNum): #nodes in current stage for i in range(sysInfo.nComponents): scripts = str(i)+str(stageIdx)+str(node); nameTmp = "y" + scripts; dictY[scripts] = nameTmp; varY.append(cpx.variables.get_num()); objCoe = 0; if stageIdx == 0: objCoe = sysInfo.comInfoAll[i].cCM - sysInfo.comInfoAll[i].cPM; cpx.variables.add(obj = [objCoe], lb = [0.0], ub=[1.0], types=["B"], names=[nameTmp]); #add Z varZ = []; dictZ = {}; for stageIdx in range(nStages): nodeNum = sysInfo.comInfoAll[0].nStates ** (stageIdx * sysInfo.nComponents); for node in range(nodeNum): #nodes in current stage scripts = str(stageIdx) + str(node); nameTmp = "z" + scripts; dictZ[scripts] = nameTmp; varZ.append(cpx.variables.get_num()); objCoe = 0; if stageIdx == 0: objCoe = sysInfo.cS; cpx.variables.add(obj = [objCoe], lb = [0.0], ub=[1.0], types=["B"], names=[nameTmp]); #add Theta varTheta = []; dictTheta = {}; for stageIdx in range(1, nStages): nodeNum = sysInfo.comInfoAll[0].nStates ** (stageIdx * sysInfo.nComponents); for node in range(nodeNum): #nodes in current stage coeTmp = 0; if stageIdx == 1: coeTmp = 1; for i in range(sysInfo.nComponents): coeTmp = coeTmp * coeBInit[i][node]; #print ("ThetacoeTmp=" + str(coeTmp)); scripts = str(stageIdx) + str(node); nameTmp = "th" + scripts; dictTheta[scripts] = nameTmp; varTheta.append(cpx.variables.get_num()); cpx.variables.add(obj = [coeTmp], lb = [0.0], ub=[cplex.infinity], types=["C"], names=[nameTmp]) ''' #no V & W & U when order = 0 #add V varV = []; dictV= {}; for stageIdx in range(nStages - 1): nodeNum = sysInfo.comInfoAll[0].nStates ** (stageIdx * sysInfo.nComponents); for curNode in range(nodeNum): childNodes = get_child_nodes(curNode, sysInfo); for chNode in childNodes: for i in range(sysInfo.nComponents): #v corresponds to cardinality set when cardinality j = 1. if stageIdx != 0: coeTmp = 0; else: coeTmp = coeAInit[i][chNode]; for r in range(sysInfo.nComponents): if r != i: coeTmp = coeTmp * coeBInit[r][chNode]; #print ("VcoeTmp=" + str(coeTmp)); scripts = str(i) + str(stageIdx) + str(curNode) + str(chNode); nameTmp = "v" + scripts; dictV[scripts] = nameTmp; varV.append(cpx.variables.get_num()); #continuous variable cpx.variables.add(obj = [coeTmp], lb = [0.0], ub=[cplex.infinity], types=["C"], names=[nameTmp]); #add W varW = []; dictW = {}; for stageIdx in range(nStages - 1): nodeNum = sysInfo.comInfoAll[0].nStates**(stageIdx*sysInfo.nComponents); for curNode in range(nodeNum): childNodes = get_child_nodes(curNode, sysInfo); for chNode in childNodes: for j in range(2, sysInfo.nComponents+1): #cardinality starts from 2 to n. setSj = setS[j-2]; for k in range(len(setSj)): if stageIdx != 0: coeTmp = 0; else: setSjk = setSj[k]; coeTmp = 1; for i in range(sysInfo.nComponents): if i in setSjk: coeTmp = coeTmp*coeAInit[i][chNode]; else: coeTmp = coeTmp*coeBInit[i][chNode]; #print ("WcoeTmp=" + str(coeTmp)); scripts = str(j) + str(k) + str(stageIdx) + str(curNode) + str(chNode); nameTmp = "w" + scripts; dictW[scripts] = nameTmp; varW.append(cpx.variables.get_num()); #continuous variable cpx.variables.add(obj = [coeTmp], lb = [0.0], ub=[cplex.infinity], types=["C"], names=[nameTmp]); #add U: auxilary variable that used in w varU = []; dictU = {}; for stageIdx in range(nStages - 1): nodeNum = sysInfo.comInfoAll[0].nStates**(stageIdx*sysInfo.nComponents); for node in range(nodeNum): #nodes in current stage for j in range(2, sysInfo.nComponents+1): #cardinality starts from 2 to n. setSj = setS[j-2]; for k in range(len(setSj)): scripts = str(j) + str(k) + str(stageIdx) + str(node); nameTmp = "u" + scripts; dictU[scripts] = nameTmp; varU.append(cpx.variables.get_num()); cpx.variables.add(obj = [0], lb = [0.0], ub=[1.0], types=["B"], names=[nameTmp]); ''' ## 2.2 add constraints # 1 for stageIdx in range(nStages): nodeNum = sysInfo.comInfoAll[0].nStates**(stageIdx*sysInfo.nComponents); for node in range(nodeNum): coefNameZ = dictZ[str(stageIdx) + str(node)]; for i in range(sysInfo.nComponents): coefNameX = dictX[str(i) + str(stageIdx) + str(node)]; cpx.linear_constraints.add(lin_expr=[cplex.SparsePair([coefNameX, coefNameZ], [1, -1])], senses=["L"], range_values=[0.0], rhs=[0]); # 2 & 3 for stageIdx in range(nStages): nodeNum = sysInfo.comInfoAll[0].nStates**(stageIdx*sysInfo.nComponents); curOutcome = 0; #distinct outcome index. for node in range(nodeNum): coefValueVec = []; coefNameVec = []; if stageIdx == 0: curStates = initState; else: curStates = omega[curOutcome]; curOutcome += 1; if curOutcome == len(omega): curOutcome = 0; for i in range(sysInfo.nComponents): # 2 curStatesI = curStates[i]; coefNameY = dictY[str(i) + str(stageIdx) + str(node)]; coefValueY = curStatesI; cpx.linear_constraints.add(lin_expr=[cplex.SparsePair([coefNameY],[-coefValueY])], senses=["L"], range_values=[0.0], rhs=[sysInfo.comInfoAll[i].nStates-2-curStatesI]); # 3 nameIdxScriptX = str(i) + str(stageIdx) + str(node); coefNameX = dictX[nameIdxScriptX]; coefValueX = -1; coefValueY = 1; #value changed here for 3rd constraint cpx.linear_constraints.add(lin_expr=[cplex.SparsePair([coefNameY, coefNameX],[coefValueY, coefValueX])], senses=["L"], range_values=[0.0], rhs=[0.0]); # 4: tooooo complex: # in 4, theta starts from stage 1 to nStages - 2. for stageIdx in range(1, nStages - 1): nodeNum = sysInfo.comInfoAll[0].nStates**(stageIdx*sysInfo.nComponents); for node in range(nodeNum): # do the first part coefNameVec = []; coefValueVec = []; nameTmp = dictTheta[str(stageIdx) + str(node)]; coefNameVec.append(nameTmp); coefValueVec.append(-1); for i in range(sysInfo.nComponents): #add x nameTmp = dictX[str(i) +str(stageIdx) + str(node)]; coefNameVec.append(nameTmp); coefValueVec.append(sysInfo.comInfoAll[i].cPM); #add y nameTmp = dictY[str(i) +str(stageIdx) + str(node)]; coefNameVec.append(nameTmp); coefValueVec.append(sysInfo.comInfoAll[i].cCM - sysInfo.comInfoAll[i].cPM); #add z nameTmp = dictZ[str(stageIdx) + str(node)]; coefNameVec.append(nameTmp); coefValueVec.append(sysInfo.cS); #do the second part childNodes = get_child_nodes(node, sysInfo); for chNode in childNodes: #within the second part... #part 1 nameTmp = dictTheta[str(stageIdx+1) + str(chNode)]; stateFromIdx = node_2_outcome(node, sysInfo); stateFrom = omega[stateFromIdx]; stateToIdx = node_2_outcome(chNode, sysInfo); stateTo = omega[stateToIdx]; valueTmp = 1; for i in range(sysInfo.nComponents): valueTmp = valueTmp * coeB[stateFromIdx][i][stateToIdx]; if valueTmp == 0: break; #make it faster; coefNameVec.append(nameTmp); coefValueVec.append(valueTmp); cpx.linear_constraints.add(lin_expr=[cplex.SparsePair(coefNameVec,coefValueVec)], senses=["E"], range_values=[0.0], rhs=[0.0]); ''' # only have the constant term in zero-order approximation #print (valueTmp); #part 2 for i in range(sysInfo.nComponents): nameTmp = dictV[str(i) + str(stageIdx) + str(node) + str(chNode)]; valueTmp = coeA[stateFromIdx][i][stateToIdx]; for r in range(sysInfo.nComponents): if r != i: valueTmp = valueTmp * coeB[stateFromIdx][r][stateToIdx]; if valueTmp == 0: break; #make it faster coefNameVec.append(nameTmp); coefValueVec.append(valueTmp); #part 3: for j in range(2, sysInfo.nComponents + 1): setSj = setS[j - 2]; #setS starts from 2 for k in range(len(setSj)): nameTmp = dictW[str(j) + str(k) + str(stageIdx) + str(node) + str(chNode)]; valueTmp = 1; setSjk = setSj[k]; for i in range(sysInfo.nComponents): if i in setSjk: valueTmp = valueTmp * coeA[stateFromIdx][i][stateToIdx]; else: valueTmp = valueTmp * coeB[stateFromIdx][i][stateToIdx]; if valueTmp == 0: break; #make it faster coefNameVec.append(nameTmp); coefValueVec.append(valueTmp); #theta is stage * node ''' # 5: theta at last stage stageIdx = nStages - 1; nodeNum = sysInfo.comInfoAll[0].nStates**(stageIdx*sysInfo.nComponents); for node in range(nodeNum): coefNameVec = []; coefValueVec = []; nameTmp = dictTheta[str(stageIdx) + str(node)]; coefNameVec.append(nameTmp); coefValueVec.append(-1); for i in range(sysInfo.nComponents): #add x nameTmp = dictX[str(i) +str(stageIdx) + str(node)]; coefNameVec.append(nameTmp); coefValueVec.append(sysInfo.comInfoAll[i].cPM); #add y nameTmp = dictY[str(i) +str(stageIdx) + str(node)]; coefNameVec.append(nameTmp); coefValueVec.append(sysInfo.comInfoAll[i].cCM - sysInfo.comInfoAll[i].cPM); #add z nameTmp = dictZ[str(stageIdx) + str(node)]; coefNameVec.append(nameTmp); coefValueVec.append(sysInfo.cS); cpx.linear_constraints.add(lin_expr=[cplex.SparsePair(coefNameVec,coefValueVec)], senses=["E"], range_values=[0.0], rhs=[0.0]); ''' # 6: add linearization of V: # There are 4 parts in this section: upperM = 10000; #upper bound of theta for stageIdx in range(0, nStages - 1): nodeNum = sysInfo.comInfoAll[0].nStates**(stageIdx*sysInfo.nComponents); for node in range(nodeNum): childNodes = get_child_nodes(node, sysInfo); for i in range(sysInfo.nComponents): nameTmpX = dictX[str(i) + str(stageIdx) + str(node)]; valueTmpX = -upperM; for chNode in childNodes: nameTmpV = dictV[str(i) + str(stageIdx) + str(node) + str(chNode)]; valueTmpV = 1; # part 1 cpx.linear_constraints.add(lin_expr=[cplex.SparsePair([nameTmpX, nameTmpV],[valueTmpX, valueTmpV])], senses=["L"], range_values=[0.0], rhs=[0.0]); # part 2 nameTmpTheta = dictTheta[str(stageIdx + 1) + str(chNode)]; valueTmpTheta = -1; cpx.linear_constraints.add(lin_expr=[cplex.SparsePair([nameTmpTheta, nameTmpV],[valueTmpTheta, valueTmpV])], senses=["L"], range_values=[0.0], rhs=[0.0]); #part 3 cpx.linear_constraints.add(lin_expr=[cplex.SparsePair([nameTmpV, nameTmpTheta, nameTmpX],[valueTmpV, valueTmpTheta, valueTmpX])], senses=["G"], range_values=[0.0], rhs=[valueTmpX]); # part 4 is added when adding variable V # 7: add linearization of W: # There are 4 parts of W for stageIdx in range(0, nStages - 1): nodeNum = sysInfo.comInfoAll[0].nStates**(stageIdx*sysInfo.nComponents); for node in range(nodeNum): childNodes = get_child_nodes(node, sysInfo); for chNode in childNodes: for j in range(2, sysInfo.nComponents + 1): setSj = setS[j - 2]; for k in range(len(setSj)): nameTmpW = dictW[str(j) + str(k) + str(stageIdx) + str(node) + str(chNode)]; valueTmpW = 1; nameTmpU = dictU[str(j) + str(k) + str(stageIdx) + str(node)]; valueTmpU = -upperM; # part 1 cpx.linear_constraints.add(lin_expr=[cplex.SparsePair([nameTmpW, nameTmpU],[valueTmpW, valueTmpU])], senses=["L"], range_values=[0.0], rhs=[0.0]); # part 2 nameTmpTheta = dictTheta[str(stageIdx + 1) + str(chNode)]; valueTmpTheta = -1; cpx.linear_constraints.add(lin_expr=[cplex.SparsePair([nameTmpW, nameTmpTheta],[valueTmpW, valueTmpTheta])], senses=["L"], range_values=[0.0], rhs=[0.0]); # part 3 cpx.linear_constraints.add(lin_expr=[cplex.SparsePair([nameTmpW, nameTmpTheta, nameTmpU],[valueTmpW, valueTmpTheta, valueTmpU])], senses=["G"], range_values=[0.0], rhs=[valueTmpU]); # part 4 is added when adding variable W # 8: add linearization of U: # There are 3 parts of U for stageIdx in range(nStages - 1): nodeNum = sysInfo.comInfoAll[0].nStates**(stageIdx*sysInfo.nComponents); for node in range(nodeNum): for j in range(2, sysInfo.nComponents + 1): setSj = setS[j - 2]; for k in range(len(setSj)): setSjk = setSj[k]; nameTmpU = dictU[str(j) + str(k) + str(stageIdx) + str(node)]; valueTmpU = 1; namePart2 = []; valuePart2 = []; namePart2.append(nameTmpU); valuePart2.append(valueTmpU); for i in setSjk: nameTmpX = dictX[str(i) + str(stageIdx) + str(node)]; valueTmpX = -1; #part 1: cpx.linear_constraints.add(lin_expr=[cplex.SparsePair([nameTmpU, nameTmpX],[valueTmpU, valueTmpX])], senses=["L"], range_values=[0.0], rhs=[0.0]); #prepare for part 2: namePart2.append(nameTmpX); valuePart2.append(valueTmpX); #part 2 cpx.linear_constraints.add(lin_expr=[cplex.SparsePair(namePart2, valuePart2)], senses=["G"], range_values=[0.0], rhs=[-j + 1]); # -(j - 1) # part 3 is added when adding variable U ''' ######################################## #3. solve and result handling ######################################## end_time = time.clock(); time_elapsed0 = end_time - start_time; start_time = time.clock(); cpx.solve(); solution = cpx.solution; #obj value objValues = solution.get_objective_value(); #get solutions solutionAll = solution.get_values(); #get X minTmp = varX[0]; maxTmp = varX[-1] + 1; solutionX = solutionAll[minTmp:maxTmp]; #get Y minTmp = varY[0]; maxTmp = varY[-1] + 1; solutionY = solutionAll[minTmp:maxTmp]; #get Z minTmp = varZ[0]; maxTmp = varZ[-1] + 1; solutionZ = solutionAll[minTmp:maxTmp]; #get theta minTmp = varTheta[0]; maxTmp = varTheta[-1] + 1; solutionTheta = solutionAll[minTmp:maxTmp]; ''' #get V minTmp = varV[0]; maxTmp = varV[-1] + 1; solutionV = solutionAll[minTmp:maxTmp]; #get W minTmp = varW[0]; maxTmp = varW[-1] + 1; solutionW = solutionAll[minTmp:maxTmp]; #get U minTmp = varU[0]; maxTmp = varU[-1] + 1; solutionU = solutionAll[minTmp:maxTmp]; ''' end_time = time.clock(); time_elapsed = end_time - start_time; f = open("log3.txt", "w"); old = sys.stdout; sys.stdout = f; print ("\n===============================main_multi_DEF_solver_0, (m, n, t)=(%d,%d,%d)============" %(nStates, sysInfo.nComponents, nStages)); print ("loading time is %f" %time_elapsed0); print ("calculation time is %f" %time_elapsed); print ("objValues:"); print (objValues); countX = 0; countY = 0; countZ = 0; countV = 0; countW = 0; countU = 0; countTheta = 0; for stageIdx in range(nStages): nodeNum = sysInfo.comInfoAll[0].nStates**(stageIdx*sysInfo.nComponents); for node in range(nodeNum): print ("=======(stage, scen) = (%d, %d)========" %(stageIdx,node)); #get X Y Z theta solX = []; solY = []; solZ = solutionZ[countZ]; countZ += 1; solTheta = []; if stageIdx != 0: solTheta = solutionTheta[countTheta]; countTheta += 1; for i in range(sysInfo.nComponents): solX.append(solutionX[countX]); countX += 1; solY.append(solutionY[countY]); countY += 1; print ("solutionX:"); print (solX); print ("solutionY:"); print (solY); print ("solutionZ:"); print (solZ); print ("solutionTheta:"); print (solTheta); ''' #get U if stageIdx == nStages - 1: #last stage, no U V W continue; solU = []; for j in range(2, sysInfo.nComponents + 1): setSj = setS[j - 2]; for k in range(len(setSj)): solU.append(solutionU[countU]); countU += 1; print ("solutionU:"); print (solU); #get v and w childNodes = get_child_nodes(node, sysInfo); solV = []; solW = []; for chNode in childNodes: #get V solVTmp = []; for i in range(sysInfo.nComponents): solVTmp.append(solutionV[countV]); countV += 1; solV.append(solVTmp); #get W solWTmp = []; for j in range(2, sysInfo.nComponents + 1): setSj = setS[j - 2]; for k in range(len(setSj)): solWTmp.append(solutionW[countW]); countW += 1; solW.append(solWTmp); print ("solutionV:"); print (solV); print ("solutionW:"); print (solW); print ("===================\n"); ''' ''' print ("=======coeA======"); print (coeA); print ("=======coeB======"); print (coeB); print ("=======coeU======"); print (coeU); print ("=======coeX======"); print (coeX); print ("=======costTerm======"); print (consTerm); ''' ## 4. end of file sys.stdout = old; f.close();
python
""" ██████╗██╗██████╗ ██╗ ██╗███████╗██╗ ██╗ ██╔════╝██║██╔══██╗██║ ██║██╔════╝╚██╗ ██╔╝ ██║ ██║██████╔╝███████║█████╗ ╚████╔╝ ██║ ██║██╔═══╝ ██╔══██║██╔══╝ ╚██╔╝ ╚██████╗██║██║ ██║ ██║███████╗ ██║ © Brandon Skerritt Github: brandonskerritt """ from copy import copy from distutils import util from typing import Optional, Dict, Union, Set, List import re from loguru import logger import ciphey import cipheycore from ciphey.iface import ParamSpec, Cracker, CrackResult, T, CrackInfo, registry from ciphey.common import fix_case @registry.register class Vigenere(ciphey.iface.Cracker[str]): def getInfo(self, ctext: str) -> CrackInfo: if self.keysize is not None: analysis = self.cache.get_or_update( ctext, f"vigenere::{self.keysize}", lambda: cipheycore.analyse_string(ctext.lower(), self.keysize, self.group), ) val = cipheycore.vigenere_detect(analysis, self.expected) logger.debug(f"Vigenere has likelihood {val}") return CrackInfo( success_likelihood=val, # TODO: actually calculate runtimes success_runtime=1e-3, failure_runtime=1e-2, ) likely_lens = self.cache.get_or_update( ctext, f"vigenere::likely_lens", lambda: cipheycore.vigenere_likely_key_lens(ctext.lower(), self.expected, self.group, self.detect_p_value), ) likely_lens_cpy = likely_lens # Filter out the lens that make no sense likely_lens = [i for i in likely_lens if i.len <= self.MAX_KEY_LENGTH] for keysize in likely_lens: # Store the analysis analysis = self.cache.get_or_update( ctext, f"vigenere::{keysize.len}", lambda: keysize.tab ) if len(likely_lens) == 0: return CrackInfo( success_likelihood=0, # TODO: actually calculate runtimes success_runtime=2e-3, failure_runtime=2e-2, ) logger.debug(f"Vigenere has likelihood {likely_lens[0].p_value} with lens {[i.len for i in likely_lens]}") return CrackInfo( success_likelihood=likely_lens[0].p_value, # TODO: actually calculate runtimes success_runtime=2e-4, failure_runtime=2e-4, ) @staticmethod def getTarget() -> str: return "vigenere" def crackOne( self, ctext: str, analysis: cipheycore.windowed_analysis_res, real_ctext: str ) -> List[CrackResult]: possible_keys = cipheycore.vigenere_crack( analysis, self.expected, self.group, self.p_value ) if len(possible_keys) > self.clamp: possible_keys = possible_keys[:self.clamp] logger.trace( f"Vigenere crack got keys: {[[i for i in candidate.key] for candidate in possible_keys]}" ) return [ CrackResult( value=fix_case(cipheycore.vigenere_decrypt(ctext, candidate.key, self.group), real_ctext), key_info="".join([self.group[i] for i in candidate.key]), misc_info=f"p-value was {candidate.p_value}" ) for candidate in possible_keys[: min(len(possible_keys), 10)] ] def attemptCrack(self, ctext: str) -> List[CrackResult]: logger.debug("Trying vigenere cipher") # Convert it to lower case if self.lower: message = ctext.lower() else: message = ctext # Analysis must be done here, where we know the case for the cache if self.keysize is not None: return self.crackOne( message, self.cache.get_or_update( ctext, f"vigenere::{self.keysize}", lambda: cipheycore.analyse_string(message, self.keysize, self.group), ), ctext ) else: arrs = [] likely_lens = self.cache.get_or_update( ctext, f"vigenere::likely_lens", lambda: cipheycore.vigenere_likely_key_lens(message, self.expected, self.group), ) possible_lens = [i for i in likely_lens] possible_lens.sort(key=lambda i: i.p_value) logger.trace(f"Got possible lengths {[i.len for i in likely_lens]}") # TODO: work out length for i in possible_lens: arrs.extend( self.crackOne( message, self.cache.get_or_update( ctext, f"vigenere::{i.len}", lambda: cipheycore.analyse_string(message, i.len, self.group), ), ctext ) ) logger.debug(f"Vigenere returned {len(arrs)} candidates") return arrs @staticmethod def getParams() -> Optional[Dict[str, ParamSpec]]: return { "expected": ciphey.iface.ParamSpec( desc="The expected distribution of the plaintext", req=False, config_ref=["default_dist"], ), "group": ciphey.iface.ParamSpec( desc="An ordered sequence of chars that make up the caesar cipher alphabet", req=False, default="abcdefghijklmnopqrstuvwxyz", ), "lower": ciphey.iface.ParamSpec( desc="Whether or not the ciphertext should be converted to lowercase first", req=False, default=True, ), "keysize": ciphey.iface.ParamSpec( desc="A key size that should be used. If not given, will attempt to work it out", req=False, ), "p_value": ciphey.iface.ParamSpec( desc="The p-value to use for windowed frequency analysis", req=False, default=0.5, ), "detect_p_value": ciphey.iface.ParamSpec( desc="The p-value to use for the detection of Vigenere length", req=False, default=0.01, ), "clamp": ciphey.iface.ParamSpec( desc="The maximum number of candidates that can be returned per key len", req=False, default=10, ), } def __init__(self, config: ciphey.iface.Config): super().__init__(config) self.lower: Union[str, bool] = self._params()["lower"] if type(self.lower) != bool: self.lower = util.strtobool(self.lower) self.group = list(self._params()["group"]) self.expected = config.get_resource(self._params()["expected"]) self.cache = config.cache self.keysize = self._params().get("keysize") if self.keysize is not None: self.keysize = int(self.keysize) self.p_value = float(self._params()["p_value"]) self.detect_p_value = float(self._params()["detect_p_value"]) self.clamp = int(self._params()["clamp"]) self.MAX_KEY_LENGTH = 16
python
from django.contrib import admin from .models import Coach, Comment class CoachAdmin(admin.ModelAdmin): list_display = ( 'id', 'first_name', 'last_name', 'email', 'phone_number', 'image', ) ordering = ('first_name',) class CommentAdmin(admin.ModelAdmin): list_display = ( 'coach', 'stars', 'comment', 'author', ) ordering = ('coach',) admin.site.register(Coach, CoachAdmin) admin.site.register(Comment, CommentAdmin)
python
# Generated by Django 3.0 on 2020-12-03 14:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('manager', '0001_initial'), ] operations = [ migrations.AlterField( model_name='activity', name='amount', field=models.PositiveIntegerField(), ), ]
python
from enum import Enum, IntEnum from pathlib import Path from typing import List, Literal, Optional, Union from pydantic import BaseModel import tomlkit # type: ignore (no stub) from .iec_62056_protocol.obis_data_set import ( ObisFloatDataSet, ObisId, ObisIntegerDataSet, ObisStringDataSet, ) def load_default_configuration(): return PyPowerMeterMonitorConfig() def load_configuration_from_file_path(config_file_path: Path): if not config_file_path.is_file(): return load_default_configuration() return load_configuration_from_text(config_file_path.read_text()) def load_configuration_from_text(config_file_text: str) -> "PyPowerMeterMonitorConfig": return PyPowerMeterMonitorConfig.parse_obj(dict(tomlkit.parse(config_file_text))) class LoggingLevel(IntEnum): critical = 50 error = 40 warning = 30 info = 20 debug = 10 class LoggingConfig(BaseModel): level: LoggingLevel = LoggingLevel.error class SerialPortParity(Enum): NONE = "N" EVEN = "E" ODD = "O" MARK = "M" SPACE = "S" class SerialPortStopBits(Enum): ONE = 1 ONE_POINT_FIVE = 1.5 TWO = 2 class SerialPortConfig(BaseModel): port_url: str = "/dev/ttyUSB0" baud_rate: int = 300 byte_size: int = 7 parity: SerialPortParity = SerialPortParity.EVEN stop_bits: SerialPortStopBits = SerialPortStopBits.ONE polling_delay: float = 30.0 response_delay: float = 0.3 read_timeout: float = 30.0 write_timeout: float = 10.0 class Config: allow_mutation = False class MqttBrokerConfig(BaseModel): hostname: str = "localhost" port: int = 1883 username: Optional[str] = None password: Optional[str] = None class MqttDeviceConfig(BaseModel): id: str = "power-meter-0" name: str = "Power Meter 0" manufacturer: str = "Unknown Manufacturer" model: str = "Unknown Model" class MqttConfig(BaseModel): enabled: bool = True configuration_topic_template: str = "homeassistant/sensor/{entity_id}/config" state_topic_template: str = "homeassistant/sensor/{entity_id}/state" broker: MqttBrokerConfig = MqttBrokerConfig() device: MqttDeviceConfig = MqttDeviceConfig() class ObisBaseDataSetConfig(BaseModel): id: ObisId name: str class ObisIntegerDataSetConfig(ObisBaseDataSetConfig): value_type: Literal["integer"] @property def obis_data_set_type(self): return ObisIntegerDataSet class ObisFloatDataSetConfig(ObisBaseDataSetConfig): value_type: Literal["float"] @property def obis_data_set_type(self): return ObisFloatDataSet class ObisStringDataSetConfig(ObisBaseDataSetConfig): value_type: Literal["string"] @property def obis_data_set_type(self): return ObisStringDataSet ObisDataSetConfig = Union[ ObisIntegerDataSetConfig, ObisFloatDataSetConfig, ObisStringDataSetConfig ] class ObisConfig(BaseModel): data_sets: List[ObisDataSetConfig] = [] class PyPowerMeterMonitorConfig(BaseModel): logging: LoggingConfig = LoggingConfig() serial_port: SerialPortConfig = SerialPortConfig() mqtt: MqttConfig = MqttConfig() obis: ObisConfig = ObisConfig() class Config: allow_mutation = False
python
from telegram.ext import Dispatcher,CommandHandler,CallbackQueryHandler from telegram import InlineKeyboardMarkup,InlineKeyboardButton, BotCommand import random def whoAreYou(update,context): msg = [ """You can call me Operation Lune 9000, I'm actually just a random reply AI(not really)""", """Bro, I'm Operation Lune 9000, I am an emotional AI with supercalifregeristicexpialidocious brain!""", """I'm gonna be your first personal AI, you can call me Operation Lune 9000!""", """I am a random Sentence AI Operation Lune 9000, you can ask me anything!""", ] update.message.reply_text("Hey, it seems that you are understanding who I am, let me tell you more :D \n%s"%(random.choice(msg))) def add_handler(dp:Dispatcher): About_handler = CommandHandler('About', whoAreYou) dp.add_handler(About_handler)
python
import datetime, pytz from dateutil.tz import tzlocal log_dir = None verbose = False def log(message): ts = pytz.utc.localize(datetime.datetime.now()).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] if verbose: print(f'{ts} {message}') if log_dir is not None: print(f'{ts} {message}', file=open(log_dir, 'a'))
python
from WMCore.WMException import WMException class WMSpecFactoryException(WMException): """ _WMSpecFactoryException_ This exception will be raised by validation functions if the code fails validation. It will then be changed into a proper HTTPError in the ReqMgr, with the message you enter used as the message for farther up the line. """ pass
python
#! python3 from __future__ import print_function import SimpleITK as sitk import numpy as np import sys import os # def LocalFusionWithLocalSimilarity(targetImage, registeredAtlases, outputPath, debug): """" Fuses the labels from a set of registered atlases using local similarity metrics. Arguments: targetImage: image being segmented: registeredAtlases: dictionary with a set of atlases having the fields intensityImage and labels """ # Generate a new image: fusedLabels = sitk.Image(targetImage.GetSize(), sitk.sitkUInt8) fusedLabels.SetSpacing(targetImage.GetSpacing()) fusedLabels.SetOrigin(targetImage.GetOrigin()) fusedLabels.SetDirection(targetImage.GetDirection()) # We need to evaluate the similarity between the target image and each atlas for each voxel. # The atlas to be propagated depends on every voxel, so I need to go through them: for i in range(0, targetImage.GetWidth()): for j in range(0, targetImage.GetHeight()): for k in range(0, targetImage.GetDepth()): for atlas in registeredAtlases: LocalNormalizedCrossCorrelation(targetImage, registeredAtlases[""], i, j, k) return fusedLabels def LocalNormalizedCrossCorrelation(image1, image2, r, c, z, kernelRadius): lncc = 0 patchImage1 = image1[r-kernelRadius:r+kernelRadius, c-kernelRadius:c+kernelRadius, z-kernelRadius:z+kernelRadius] patchImage2 = image2[r - kernelRadius:r + kernelRadius, c - kernelRadius:c + kernelRadius, z - kernelRadius:z + kernelRadius] lncc = np.cov(patchImage1, patchImage2)/(np.std(patchImage1)*np.std(patchImage2)) return lncc
python
def create_adjacency_list(num_nodes, edges): graph = [set() for _ in range(num_nodes)] for index, edge in enumerate(edges): v_1, v_2 = edge[0], edge[1] graph[v_1].add(v_2) graph[v_2].add(v_1) return graph
python
""" Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from gluon import current kind_mapping = { "just_count": ["total"], "success_failure": ["success", "failure"], "average": ["list"] } # ============================================================================== def get_redis_int_value(key_name): value = current.REDIS_CLIENT.get(key_name) return 0 if value is None else int(value) # ============================================================================== class MetricHandler(object): # -------------------------------------------------------------------------- def __init__(self, genre, kind, site, log_to_redis): """ Constructor for a specific MetricHandler @param genre (String): Metric identifier @param kind (String): Metric type ("just_count" or "success_failure") @param site (String): Metric handler is for which site @param log_to_redis (Boolean): If need to add it to redis """ self.redis_client = current.REDIS_CLIENT # Kind of tracking that we need to do self.genre = genre # The label to print in the health report self.label = " ".join([x.capitalize() for x in self.genre.split("_")]) # Just count or percentage self.kind = kind # Submission site self.site = site # If there metrics need to be persisted in redis self.log_to_redis = log_to_redis # The redis keys which will be used self.redis_keys = {} for type_of_key in kind_mapping[self.kind]: self.redis_keys[type_of_key] = "health_metrics:%s__%s__%s" % (self.genre, self.site, type_of_key) # -------------------------------------------------------------------------- def flush_keys(self): """ Remove all the keys for this MetricHandler from redis """ if self.log_to_redis is False: return [self.redis_client.delete(key) for key in self.redis_keys.values()] # -------------------------------------------------------------------------- def increment_count(self, type_of_key, increment_amount=1): """ Increment count of a metric given success key or failure key @param type_of_key (String): "success" or "failure" @param increment_amount (Number): Amount by which the redis key should be incremented """ if self.log_to_redis is False: return redis_key = self.redis_keys[type_of_key] value = self.redis_client.get(redis_key) if value is None: value = 0 else: value = int(value) self.redis_client.set(redis_key, value + increment_amount) # -------------------------------------------------------------------------- def add_to_list(self, type_of_key, value): """ Add a value to the list for computing average later @param value (Decimal): A decimal to be added to the list @param type_of_key (String): At present just "list" """ if self.log_to_redis is False: return self.redis_client.lpush(self.redis_keys[type_of_key], value) # -------------------------------------------------------------------------- def _get_average_string(self): all_values = self.redis_client.lrange(self.redis_keys["list"], 0, -1) return_str = None if len(all_values): all_values = [float(x) for x in all_values] average = sum(all_values) * 1.0 / len(all_values) return_str = str(average) else: return_str = "-" return return_str # -------------------------------------------------------------------------- def get_html(self): html_body = "<tr><td style='background-color: lavender;'><b>%s</b></td>" % self.label if self.kind == "just_count": html_body += "<td colspan='3'>Total: %d</td>" % get_redis_int_value(self.redis_keys["total"]) elif self.kind == "success_failure": success = get_redis_int_value(self.redis_keys["success"]) failure = get_redis_int_value(self.redis_keys["failure"]) if failure > 0: failure_percentage = str(failure * 100.0 / (failure + success)) else: failure_percentage = "-" html_body += """ <td>Success: %d</td><td>Failure: %d</td><td>Failure per: %s</td> """ % (success, failure, failure_percentage) elif self.kind == "average": html_body += "<td colspan='3'>Average: %s</td>" % self._get_average_string() else: html_body += "<td colspane='3'>Unknown kind</td>" html_body += "</tr>" return html_body # -------------------------------------------------------------------------- def __str__(self): """ Representation of the MetricHandler """ return_str = self.label + ": " if self.kind == "just_count": return_str += str(get_redis_int_value(self.redis_keys["total"])) elif self.kind == "success_failure": return_str += str(get_redis_int_value(self.redis_keys["success"])) + " " + \ str(get_redis_int_value(self.redis_keys["failure"])) elif self.kind == "average": return_str += self._get_average_string() return return_str
python
import glob import pandas as pd from pathlib import Path import re import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg import matplotlib.patches as patches import os def transformCordinates(coordinates, wmax, hmax): maxis = coordinates[0] minaxis = coordinates[1] angle = coordinates[2] xcoor = coordinates[3] ycoor = coordinates[4] maxis = float(maxis) minaxis = float(minaxis) angle = float(angle) xcoor = float(xcoor) ycoor = float(ycoor) w = 2*(np.sqrt((maxis*np.cos(angle))**2 + (minaxis*np.sin(angle))**2)) h = 2*(np.sqrt((maxis*np.sin(angle))**2 + (minaxis*np.cos(angle))**2)) xmax = xcoor-w/2 ymax = ycoor-h/2 return(xmax,ymax,w,h) def generateArray(file): with open(file, "r") as f: arr = f.read().splitlines() arr_len = len(arr) i = 0 rg = re.compile("(\d)*_(\d)*_(\d)*_big") arr_temp = [] while i != arr_len: val = arr[i] mtch = rg.match(val) if mtch: try: my_dict = dict() val = "{}.jpg".format(val) my_dict["name"] = val #matplotlib img = mpimg.imread(os.path.join("dataset", val)) fig, ax = plt.subplots(1) ax.imshow(img) (h, w, _) = img.shape s = int(arr[i+1]) for j in range(0, s): coord = arr[i + 2 + j] trans = transformCordinates(coord.split(" "),h,w) # print(trans) #print(trans) newf = patches.Rectangle( (trans[0], trans[1]), trans[2], trans[3], linewidth=1, edgecolor = 'b', facecolor ='none') ax.add_patch(newf) plt.show() my_dict["annotations"] = arr_temp i = i+1+s except: print("{}not found...".format(val)) i+=1 else: i+=1 def returnEllipseListFiles(path): return [ str(f) for f in Path(path).glob("**/*-ellipseList.txt") ] folder = glob.glob("dataset/*.jpg") folder = pd.Series(folder) files = returnEllipseListFiles("labels") print(folder) print(files) d = generateArray(files[0]) print(d)
python
""" Setup to install the 'factorymind' Python package """ import os from setuptools import find_packages, setup def read(file_name: str): """Utility function to read the README file. Used for the long_description. It's nice, because now 1) we have a top level README file and 2) it's easier to type in the README file than to put a raw string in belows 'setup()' config Args: file_name (str): Path to file """ return open(os.path.join(os.path.dirname(__file__), file_name)).read() install_requires = ["numpy", "pandas", "pytest"] setup_requirements = ["pytest-runner", "better-setuptools-git-version"] test_requirements = ["pytest", "nbformat"] setup( author="FactoryMind AS", author_email="enquiry@factorymind.ai", classifiers=[ "Development Status :: 3 - Alpha", "Topic :: Utilities", "License :: OSI Approved :: MIT License", ], name="factorymind", version="0.1.3", # version_config={"version_format": "{tag}.dev{sha}", "starting_version": "0.0.1"}, description="Python module `factorymind` for the FactoryMind platform", long_description=open("README.md").read(), packages=find_packages("src"), package_dir={"": "src"}, setup_requires=setup_requirements, test_suite="tests", tests_require=test_requirements, install_requires=install_requires, )
python
from dataclasses import dataclass, field from enum import Enum from typing import Optional __NAMESPACE__ = "NISTSchema-SV-IV-list-negativeInteger-enumeration-1-NS" class NistschemaSvIvListNegativeIntegerEnumeration1Type(Enum): VALUE_17702143_68213_73070785813457_55650_85440493680_6799621_74925_12_72537592001056039 = ( -17702143, -68213, -73070785813457, -55650, -85440493680, -6799621, -74925, -12, -72537592001056039, ) VALUE_26245_7189050820_38959743015554837_343346_3844467_100883_9141710_7583 = ( -26245, -7189050820, -38959743015554837, -343346, -3844467, -100883, -9141710, -7583, ) VALUE_98937535565323_54852263_56348773_97523843296749_777_588340914_5277957_838038027052 = ( -98937535565323, -54852263, -56348773, -97523843296749, -777, -588340914, -5277957, -838038027052, ) VALUE_81203437_48_202_57278_5095_786160081_93919465439172544_975282546950578033 = ( -81203437, -48, -202, -57278, -5095, -786160081, -93919465439172544, -975282546950578033, ) VALUE_208126785236_890121210854_63897214775493060_6698254859648_491278952624_90261_93114747005637_462457_91376823432390_68 = ( -208126785236, -890121210854, -63897214775493060, -6698254859648, -491278952624, -90261, -93114747005637, -462457, -91376823432390, -68, ) VALUE_355_80669246608_1445178596306_679353181481903_49652061562_533421508 = ( -355, -80669246608, -1445178596306, -679353181481903, -49652061562, -533421508, ) VALUE_58848_29022908056015_35829309187105862_25293146353_75728153211129700_70406362_42467387928552_2736381_8869532336 = ( -58848, -29022908056015, -35829309187105862, -25293146353, -75728153211129700, -70406362, -42467387928552, -2736381, -8869532336, ) VALUE_4044_23904266024445_16124907064250493_345_668380045472_7602241 = ( -4044, -23904266024445, -16124907064250493, -345, -668380045472, -7602241, ) VALUE_83357543849_27799953103921681_47075936_933435736058_81852_85553_37083595_212426303157_64 = ( -83357543849, -27799953103921681, -47075936, -933435736058, -81852, -85553, -37083595, -212426303157, -64, ) @dataclass class NistschemaSvIvListNegativeIntegerEnumeration1: class Meta: name = "NISTSchema-SV-IV-list-negativeInteger-enumeration-1" namespace = "NISTSchema-SV-IV-list-negativeInteger-enumeration-1-NS" value: Optional[NistschemaSvIvListNegativeIntegerEnumeration1Type] = field( default=None, metadata={ "required": True, } )
python
""" The go starter template. Author: Tom Fleet Created: 24/06/2021 """ import shutil import subprocess from pathlib import Path from typing import List, Optional from pytoil.exceptions import GoNotInstalledError from pytoil.starters.base import BaseStarter class GoStarter(BaseStarter): """ The go starter template class. """ def __init__(self, path: Path, name: str) -> None: """ The pytoil go starter template. Args: path (Path): Root path under which to generate the project from this template. name (str): The name of the project to be created. """ self._path = path self._name = name self._files = ["README.md", "main.go"] def __repr__(self) -> str: return self.__class__.__qualname__ + f"(path={self.path!r}, name={self.name!r})" @property def path(self) -> Path: return self._path @property def name(self) -> str: return self._name @property def root(self) -> Path: return self._path.joinpath(self._name) @property def files(self) -> List[Path]: return [self.root.joinpath(filename) for filename in self._files] def raise_for_go(self) -> None: """ Raises an error if the user doesn't have go installed. """ if not bool(shutil.which("go")): raise GoNotInstalledError("Go not found on $PATH.") def generate(self, username: Optional[str] = None) -> None: """ Generate a new go starter template. This is a mix of creating files in python, and invoking `go mod init` in a subprocess to initialise the go modules file. """ # Must have go installed to run go mod init self.raise_for_go() # Make the parent directory self.root.mkdir(parents=True) for file in self.files: file.touch() # Put the header in the readme readme = self.root.joinpath("README.md") readme.write_text(f"# {self.name}\n", encoding="utf-8") # Populate the main.go file go_file = self.root.joinpath("main.go") go_text = 'package main\n\nimport "fmt"\n\nfunc main() {\n\tfmt.Println("Hello World")\n}\n' # noqa: E501 go_file.write_text(go_text, encoding="utf-8") # Invoke go mod init _ = subprocess.run( ["go", "mod", "init", f"github.com/{username}/{self.name}"], check=True, cwd=self.root, capture_output=True, )
python
# Generated by Django 3.0.7 on 2020-10-30 16:41 from django.db import migrations import inclusive_django_range_fields.fields class Migration(migrations.Migration): dependencies = [ ('jobsapp', '0011_auto_20201030_1636'), ] operations = [ migrations.AddField( model_name='job', name='salary', field=inclusive_django_range_fields.fields.InclusiveIntegerRangeField(help_text='Minimum and maximum annual salary for this job.', null=True, verbose_name='Salary'), ), ]
python
from operator import eq, ge from functools import partial import pandas as pd from microsetta_public_api.resources import resources ops = { 'equal': eq, 'greater_or_equal': ge, } conditions = { "AND": partial(pd.DataFrame.all, axis=1), "OR": partial(pd.DataFrame.any, axis=1) } def _is_rule(node): rule_fields = ["id", "operator", "value"] for field in rule_fields: if field not in node: return False op = node["operator"] if op not in ops: raise ValueError(f"Only operators in {ops} are supported. " f"Got {op}") return True class MetadataRepo: def __init__(self, metadata=None): if metadata is not None: self._metadata = metadata else: self._metadata = resources.get('metadata', pd.DataFrame()) @property def metadata(self): return self._metadata @property def categories(self): return list(self._metadata.columns) @property def samples(self): return list(self._metadata.index) def category_values(self, category, exclude_na=True): """ Parameters ---------- category : str Metadata category to return the values of exclude_na : bool If True, not a number (na) values will be dropped from the category values Returns ------- list Contains the unique values in the metadata category Raises ------ ValueError If `category` is not an existing category in the metadata """ if category not in self._metadata.columns: raise ValueError(f'No category with name `{category}`') category_values = self._metadata[category].unique() if exclude_na: category_values = category_values[~pd.isnull(category_values)] return list(category_values) def has_category(self, category): if isinstance(category, str): return category in self._metadata.columns else: cols = set(self._metadata.columns) return [cat in cols for cat in category] def has_sample_id(self, sample_id): if isinstance(sample_id, str): return sample_id in self._metadata.index else: index = set(self._metadata.index) return [id_ in index for id_ in sample_id] def get_metadata(self, categories, sample_ids=None, fillna=None): md = self._metadata[categories] if sample_ids is not None: md = md.reindex(sample_ids, fill_value=None) md = md.astype('object') md[pd.isna(md)] = fillna return md def sample_id_matches(self, query): """ Parameters ---------- query : dict Expects a jquerybuilder formatted query Returns ------- list The sample IDs that match the given `query` """ slice_ = self._process_query(query) return list(self._metadata.index[slice_]) def _process_query(self, query): group_fields = ["condition", "rules"] if _is_rule(query): category, op, value = query['id'], query['operator'], \ query['value'] return ops[op](self._metadata[category], value) else: for field in group_fields: if field not in query: raise ValueError(f"query=`{query}` does not appear to be " f"a rule or a group.") if query['condition'] not in conditions: raise ValueError(f"Only conditions in {conditions} are " f"supported. Got {query['condition']}.") else: condition = conditions[query['condition']] return condition(self._safe_concat([self._process_query(rule) for rule in query['rules']], axis=1)) def _safe_concat(self, list_of_df, **concat_kwargs): if len(list_of_df) > 0: return pd.concat(list_of_df, **concat_kwargs) return pd.DataFrame(pd.Series(True, index=self._metadata.index))
python
# Copyright (C) 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Auto-generated file for MCP4725 v0.1.0. # Generated from peripherals/MCP4725.yaml using Cyanobyte Codegen v0.1.0 from i2cdevice import Device, Register, BitField I2C_ADDR = 98 EEPROM = Register('EEPROM', 96, fields=( BitField('digitalOut', 0b0001111111111111, bitwidth=13, values_in=_byte_swap, values_out=_byte_swap, values_map={ GND: 0, VCC: 4095 }) ), read_only=False, bitwidth=12) VOUT = Register('VOUT', 64, read_only=False, bitwidth=12) mcp4725 = Device(I2C_ADDR, registers=( EEPROM, VOUT ))
python
from unittest.mock import patch import pytest from telegram.ext import CommandHandler from autonomia.features import dublin_bike @pytest.mark.vcr def test_cmd_dublin_bike(update, context): with patch.object(update.message, "reply_text") as m: context.args = ["89"] dublin_bike.cmd_dublin_bike(update, context) m.assert_called_with( "Dublin bike station 89:\n" " Bikes 4\n" " Free spaces 36\n" " Location FITZWILLIAM SQUARE EAST\n" ) def test_get_bike_station_info_with_invalid_station(): msg = dublin_bike._get_bike_station_info("80000") assert msg == "deu merda!" def test_cmd_dublin_bike_without_bike_stop(update, context): with patch.object(update.message, "reply_text") as m: context.args = [] dublin_bike.cmd_dublin_bike(update, context) m.assert_called_with("Use: /bike <bike station number>") @patch("urllib.request.urlopen") def test_cmd_dublin_bike_on_error(urlopen_mock, update, context): urlopen_mock.site_effect = ValueError() with patch.object(update.message, "reply_text") as m: context.args = ["200"] dublin_bike.cmd_dublin_bike(update, context) m.assert_called_with("Oops deu merda!") def test_dublin_bike_factory(): handler = dublin_bike.dublin_bike_factory() assert isinstance(handler, CommandHandler) assert handler.callback == dublin_bike.cmd_dublin_bike assert handler.command == ["bike"] assert handler.pass_args
python
# All content Copyright (C) 2018 Genomics plc from wecall.bamutils.read_sequence import HIGH_QUALITY from wecall.bamutils.sequence_builder import sequence_builder class SequenceBank(object): """ A container to hold annotated DNA sequences in relation to a reference sequence. """ def __init__(self, reference): self.reference = reference self._read_sequences_with_coverage = [] def __getitem__(self, item): return self._read_sequences_with_coverage[item] def __len__(self): return len(self._read_sequences_with_coverage) @property def chrom(self): return self.reference.chrom @property def variants(self): variants = set() for sequence in self._read_sequences_with_coverage: variants.update(sequence.read_sequence.variants) return variants def add_sequence( self, seq_string, quality_string=None, n_fwd=None, n_rev=None, mapping_quality=HIGH_QUALITY, insert_size=None, read_id=None, read_flags=None, cigar_string=None, read_start=None, read_mate_start=None ): self._read_sequences_with_coverage.extend( sequence_builder( self.reference, seq_string, quality_string, n_fwd, n_rev, mapping_quality, insert_size, read_id, read_flags, cigar_string, read_start, read_mate_start ) ) return self def build_reads(self, chrom_id, read_tags): for read_seq_with_coverage in self._read_sequences_with_coverage: for read in read_seq_with_coverage.build_reads( chrom_id, read_tags): yield read class AsciiVariantGenerator(object): def __init__(self, reference): self.reference = reference def get_variants(self, ascii_haplotypes): seq_bank = SequenceBank(self.reference) for candidate_ascii_haplotype in ascii_haplotypes: seq_bank.add_sequence(candidate_ascii_haplotype) return seq_bank.variants
python
# # PySNMP MIB module ZHONE-GEN-INTERFACE-CONFIG-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZHONE-GEN-INTERFACE-CONFIG-MIB # Produced by pysmi-0.3.4 at Wed May 1 15:47:34 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection") ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") ModuleIdentity, iso, Bits, Counter32, NotificationType, Integer32, Unsigned32, TimeTicks, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, ObjectIdentity, Gauge32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "iso", "Bits", "Counter32", "NotificationType", "Integer32", "Unsigned32", "TimeTicks", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "ObjectIdentity", "Gauge32", "IpAddress") DisplayString, TruthValue, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "TextualConvention") zhoneModules, zhoneInterfaceConfig = mibBuilder.importSymbols("Zhone", "zhoneModules", "zhoneInterfaceConfig") ZhoneAlarmSeverity, ZhoneRowStatus = mibBuilder.importSymbols("Zhone-TC", "ZhoneAlarmSeverity", "ZhoneRowStatus") alarmConfigMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1)) alarmConfigMib.setRevisions(('2010-12-07 02:37', '2008-02-26 06:25',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: alarmConfigMib.setRevisionsDescriptions(('V01.00.02 - Added alarmSeverity', 'V01.00.01 - adding alarmConfigTraps',)) if mibBuilder.loadTexts: alarmConfigMib.setLastUpdated('201012071714Z') if mibBuilder.loadTexts: alarmConfigMib.setOrganization('Organization.') if mibBuilder.loadTexts: alarmConfigMib.setContactInfo('Contact-info.') if mibBuilder.loadTexts: alarmConfigMib.setDescription('Description.') alarmConfigTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 1), ) if mibBuilder.loadTexts: alarmConfigTable.setStatus('current') if mibBuilder.loadTexts: alarmConfigTable.setDescription('The alarm configuration table') alarmConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex")) if mibBuilder.loadTexts: alarmConfigEntry.setStatus('current') if mibBuilder.loadTexts: alarmConfigEntry.setDescription('An entry in the alarm configuration table.') alarmConfigBitRateThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 1, 1, 1), TruthValue()).setMaxAccess("readcreate") if mibBuilder.loadTexts: alarmConfigBitRateThreshold.setStatus('current') if mibBuilder.loadTexts: alarmConfigBitRateThreshold.setDescription('This field describes the enable status of the Bit Rate Threshold Alarm. If this field is true (1) then the Bit Rate Threshold alarm is enabled. If this field is false (2) then the Bit Rate Threshold Alarm is disabled.') alarmConfigBitRateThresholdValue = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 1, 1, 2), Integer32()).setMaxAccess("readcreate") if mibBuilder.loadTexts: alarmConfigBitRateThresholdValue.setStatus('current') if mibBuilder.loadTexts: alarmConfigBitRateThresholdValue.setDescription('This field indicates the Bit Rate Threshold Value which will generate an alarm if the Bit Rate Threshold Alarm is enabled and the Bit Rate of this ifIndex drops below this value.') alarmConfigBitRateThresholdHoldtime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 1, 1, 3), Integer32()).setMaxAccess("readcreate") if mibBuilder.loadTexts: alarmConfigBitRateThresholdHoldtime.setStatus('current') if mibBuilder.loadTexts: alarmConfigBitRateThresholdHoldtime.setDescription('This field indicates the Bit Rate Threshold Holdtime in seconds for which the Bit Rate of the ifIndex must remain below the Bit Rate Threshold Value before an alarm will be generated if the Bit Rate Threshold Alarm is enabled. ') alarmConfigStatusTrap = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 1, 1, 4), TruthValue()).setMaxAccess("readcreate") if mibBuilder.loadTexts: alarmConfigStatusTrap.setStatus('current') if mibBuilder.loadTexts: alarmConfigStatusTrap.setDescription('This field describes the enable status of the Status Trap Alarm. If this field is true (1) then the Status Trap alarm is enabled. If this field is false (2) then the Status Trap Alarm is disabled.') alarmConfigAdminUp = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 1, 1, 5), TruthValue()).setMaxAccess("readcreate") if mibBuilder.loadTexts: alarmConfigAdminUp.setStatus('current') if mibBuilder.loadTexts: alarmConfigAdminUp.setDescription('This field describes the enable status of the Admin Up Alarm. If this field is true (1) then the Admin Up alarm is enabled. If this field is false (2) then the Admin Up Alarm is disabled.') alarmConfigAlarmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 1, 1, 6), ZhoneAlarmSeverity()).setMaxAccess("readcreate") if mibBuilder.loadTexts: alarmConfigAlarmSeverity.setStatus('current') if mibBuilder.loadTexts: alarmConfigAlarmSeverity.setDescription("This object is used to override Trap, Central Alarm Manager and CLI 'LineAlarm' severity levels for the specified interface. ") alarmConfigRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 1, 1, 7), ZhoneRowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: alarmConfigRowStatus.setStatus('current') if mibBuilder.loadTexts: alarmConfigRowStatus.setDescription('This object is used to create, delete or modify a row in this table. ') alarmConfigTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 2)) alarmConfigTrapPrefix = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 2, 0)) if mibBuilder.loadTexts: alarmConfigTrapPrefix.setStatus('current') if mibBuilder.loadTexts: alarmConfigTrapPrefix.setDescription('Description.') zhoneAlarmConfigThresholdTrap = NotificationType((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 2, 0, 1)) if mibBuilder.loadTexts: zhoneAlarmConfigThresholdTrap.setStatus('current') if mibBuilder.loadTexts: zhoneAlarmConfigThresholdTrap.setDescription('This Trap is generated when this ifIndex bandwidth drops below the alarmConfigBitRateThresholdValue for the time specified in alarmConfigBitRateThesholdHoldtime.') zhoneAlarmConfigThresholdClearTrap = NotificationType((1, 3, 6, 1, 4, 1, 5504, 3, 13, 1, 2, 0, 2)) if mibBuilder.loadTexts: zhoneAlarmConfigThresholdClearTrap.setStatus('current') if mibBuilder.loadTexts: zhoneAlarmConfigThresholdClearTrap.setDescription('This trap is generated when this ifIndex bandwidth goes above the alarmConfigBitRateThresholdValue for the time specified in alarmConfigBitRateThresholdHoldtime.') mibBuilder.exportSymbols("ZHONE-GEN-INTERFACE-CONFIG-MIB", alarmConfigRowStatus=alarmConfigRowStatus, alarmConfigMib=alarmConfigMib, zhoneAlarmConfigThresholdTrap=zhoneAlarmConfigThresholdTrap, alarmConfigBitRateThresholdHoldtime=alarmConfigBitRateThresholdHoldtime, alarmConfigStatusTrap=alarmConfigStatusTrap, alarmConfigAdminUp=alarmConfigAdminUp, alarmConfigBitRateThreshold=alarmConfigBitRateThreshold, alarmConfigEntry=alarmConfigEntry, alarmConfigBitRateThresholdValue=alarmConfigBitRateThresholdValue, alarmConfigTraps=alarmConfigTraps, alarmConfigAlarmSeverity=alarmConfigAlarmSeverity, alarmConfigTrapPrefix=alarmConfigTrapPrefix, PYSNMP_MODULE_ID=alarmConfigMib, zhoneAlarmConfigThresholdClearTrap=zhoneAlarmConfigThresholdClearTrap, alarmConfigTable=alarmConfigTable)
python
from typing import Any from rpg.items import Equippable class Armor(Equippable): config_filename = "armor.yaml" __slots__ = ("type",) def __init__(self, **kwargs: Any): self.type: str = kwargs.pop("type") super().__init__(**kwargs) def __repr__(self) -> str: return f"<{self.__class__.__name__} id={self.id} name={self.name} type={self.type} modifiers={self.modifiers}>"
python
from .dual_network import DualNetBounds, robust_loss, robust_loss_parallel, DualNetwork from .dual_layers import DualLinear, DualReLU from .dual_inputs import select_input, InfBallBoxBounds from .utils import DenseSequential, Dense, epsilon_from_model
python
import a1 #$ use=moduleImport("a1") x = a1.blah1 #$ use=moduleImport("a1").getMember("blah1") import a2 as m2 #$ use=moduleImport("a2") x2 = m2.blah2 #$ use=moduleImport("a2").getMember("blah2") import a3.b3 as m3 #$ use=moduleImport("a3").getMember("b3") x3 = m3.blah3 #$ use=moduleImport("a3").getMember("b3").getMember("blah3") from a4.b4 import c4 as m4 #$ use=moduleImport("a4").getMember("b4").getMember("c4") x4 = m4.blah4 #$ use=moduleImport("a4").getMember("b4").getMember("c4").getMember("blah4") import a.b.c.d #$ use=moduleImport("a") ab = a.b #$ use=moduleImport("a").getMember("b") abc = ab.c #$ use=moduleImport("a").getMember("b").getMember("c") abcd = abc.d #$ use=moduleImport("a").getMember("b").getMember("c").getMember("d") x5 = abcd.method() #$ use=moduleImport("a").getMember("b").getMember("c").getMember("d").getMember("method").getReturn() from a6 import m6 #$ use=moduleImport("a6").getMember("m6") x6 = m6().foo().bar() #$ use=moduleImport("a6").getMember("m6").getReturn().getMember("foo").getReturn().getMember("bar").getReturn() import foo.baz.baz as fbb #$ use=moduleImport("foo").getMember("baz").getMember("baz") from foo.bar.baz import quux as fbbq #$ use=moduleImport("foo").getMember("bar").getMember("baz").getMember("quux") from ham.bar.eggs import spam as hbes #$ use=moduleImport("ham").getMember("bar").getMember("eggs").getMember("spam") fbb.quux #$ use=moduleImport("foo").getMember("baz").getMember("baz").getMember("quux") fbbq #$ use=moduleImport("foo").getMember("bar").getMember("baz").getMember("quux") hbes #$ use=moduleImport("ham").getMember("bar").getMember("eggs").getMember("spam") import foo.bar.baz #$ use=moduleImport("foo") # Relative imports. These are ignored from .foo import bar from ..foobar import baz # Use of imports across scopes def use_m4(): x = m4.blah4 #$ use=moduleImport("a4").getMember("b4").getMember("c4").getMember("blah4") def local_import_use(): from foo import bar #$ use=moduleImport("foo").getMember("bar") x = bar() #$ use=moduleImport("foo").getMember("bar").getReturn() from eggs import ham as spam #$ use=moduleImport("eggs").getMember("ham") def bbb(): f = spam #$ use=moduleImport("eggs").getMember("ham") from danger import SOURCE #$ use=moduleImport("danger").getMember("SOURCE") foo = SOURCE #$ use=moduleImport("danger").getMember("SOURCE") def change_foo(): global foo foo = SOURCE #$ use=moduleImport("danger").getMember("SOURCE") def f(): global foo sink(foo) #$ use=moduleImport("danger").getMember("SOURCE") foo = NONSOURCE change_foo() sink(foo) #$ use=moduleImport("danger").getMember("SOURCE") # Built-ins def use_of_builtins(): for x in range(5): #$ use=moduleImport("builtins").getMember("range").getReturn() if x < len([]): #$ use=moduleImport("builtins").getMember("len").getReturn() print("Hello") #$ use=moduleImport("builtins").getMember("print").getReturn() raise Exception("Farewell") #$ use=moduleImport("builtins").getMember("Exception").getReturn() def imported_builtins(): import builtins #$ use=moduleImport("builtins") def open(f): return builtins.open(f) #$ MISSING: use=moduleImport("builtins").getMember("open").getReturn() def redefine_print(): def my_print(x): import builtins #$ use=moduleImport("builtins") builtins.print("I'm printing", x) #$ use=moduleImport("builtins").getMember("print").getReturn() print = my_print print("these words") def local_redefine_chr(): chr = 5 return chr def global_redefine_chr(): global chr chr = 6 return chr def what_is_chr_now(): # If global_redefine_chr has been run, then the following is _not_ a reference to the built-in chr return chr(123) #$ MISSING: use=moduleImport("builtins").getMember("chr").getReturn() def obscured_print(): p = print #$ use=moduleImport("builtins").getMember("print") p("Can you see me?") #$ use=moduleImport("builtins").getMember("print").getReturn() def python2_style(): # In Python 3, `__builtin__` has no special meaning. from __builtin__ import open #$ use=moduleImport("__builtin__").getMember("open") open("hello.txt") #$ use=moduleImport("__builtin__").getMember("open").getReturn()
python
from .users import * # importamos todas las clases del archivo circle.
python
# -*- coding: utf-8 -*- # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile from oslo_concurrency import processutils from os_net_config import impl_eni from os_net_config import objects from os_net_config.tests import base from os_net_config import utils _AUTO = "auto eth0\n" _v4_IFACE_NO_IP = _AUTO + "iface eth0 inet manual\n" _V4_IFACE_STATIC_IP = _AUTO + """iface eth0 inet static address 192.168.1.2 netmask 255.255.255.0 """ _V6_IFACE_STATIC_IP = _AUTO + """iface eth0 inet6 static address fe80::2677:3ff:fe7d:4c netmask ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff """ _IFACE_DHCP = _AUTO + "iface eth0 inet dhcp\n" _OVS_PORT_BASE = _AUTO + "allow-br0 eth0\n" _OVS_PORT_IFACE = _OVS_PORT_BASE + """iface eth0 inet manual ovs_bridge br0 ovs_type OVSPort """ _OVS_BRIDGE_DHCP = """auto br0 allow-ovs br0 iface br0 inet dhcp ovs_type OVSBridge ovs_ports eth0 pre-up ip addr flush dev eth0 """ _OVS_BRIDGE_DHCP_PRIMARY_INTERFACE = _OVS_BRIDGE_DHCP + \ " ovs_extra set bridge br0 other-config:hwaddr=a1:b2:c3:d4:e5\n" _OVS_BRIDGE_DHCP_OVS_EXTRA = _OVS_BRIDGE_DHCP + \ " ovs_extra set bridge br0 other-config:hwaddr=a1:b2:c3:d4:e5" + \ " -- br-set-external-id br-ctlplane bridge-id br-ctlplane\n" _VLAN_NO_IP = """auto vlan5 iface vlan5 inet manual vlan-raw-device eth0 """ _VLAN_OVS_PORT = """auto vlan5 allow-br0 vlan5 iface vlan5 inet manual ovs_bridge br0 ovs_type OVSIntPort ovs_options tag=5 """ _RTS = """up route add -net 172.19.0.0 netmask 255.255.255.0 gw 192.168.1.1 down route del -net 172.19.0.0 netmask 255.255.255.0 gw 192.168.1.1 """ class TestENINetConfig(base.TestCase): def setUp(self): super(TestENINetConfig, self).setUp() self.provider = impl_eni.ENINetConfig() self.if_name = 'eth0' def tearDown(self): super(TestENINetConfig, self).tearDown() def get_interface_config(self, name="eth0"): return self.provider.interfaces[name] def get_route_config(self): return self.provider.routes[self.if_name] def _default_interface(self, addr=[], rts=[]): return objects.Interface(self.if_name, addresses=addr, routes=rts) def test_interface_no_ip(self): interface = self._default_interface() self.provider.add_interface(interface) self.assertEqual(_v4_IFACE_NO_IP, self.get_interface_config()) def test_add_interface_with_v4(self): v4_addr = objects.Address('192.168.1.2/24') interface = self._default_interface([v4_addr]) self.provider.add_interface(interface) self.assertEqual(_V4_IFACE_STATIC_IP, self.get_interface_config()) def test_add_interface_with_v6(self): v6_addr = objects.Address('fe80::2677:3ff:fe7d:4c') interface = self._default_interface([v6_addr]) self.provider.add_interface(interface) self.assertEqual(_V6_IFACE_STATIC_IP, self.get_interface_config()) def test_add_interface_dhcp(self): interface = self._default_interface() interface.use_dhcp = True self.provider.add_interface(interface) self.assertEqual(_IFACE_DHCP, self.get_interface_config()) def test_add_interface_with_both_v4_and_v6(self): v4_addr = objects.Address('192.168.1.2/24') v6_addr = objects.Address('fe80::2677:3ff:fe7d:4c') interface = self._default_interface([v4_addr, v6_addr]) self.provider.add_interface(interface) self.assertEqual(_V4_IFACE_STATIC_IP + _V6_IFACE_STATIC_IP, self.get_interface_config()) def test_add_ovs_port_interface(self): interface = self._default_interface() interface.ovs_port = True interface.bridge_name = 'br0' self.provider.add_interface(interface) self.assertEqual(_OVS_PORT_IFACE, self.get_interface_config()) def test_network_with_routes(self): route1 = objects.Route('192.168.1.1', '172.19.0.0/24') v4_addr = objects.Address('192.168.1.2/24') interface = self._default_interface([v4_addr], [route1]) self.provider.add_interface(interface) self.assertEqual(_V4_IFACE_STATIC_IP, self.get_interface_config()) self.assertEqual(_RTS, self.get_route_config()) def test_network_ovs_bridge_with_dhcp(self): interface = self._default_interface() bridge = objects.OvsBridge('br0', use_dhcp=True, members=[interface]) self.provider.add_bridge(bridge) self.provider.add_interface(interface) self.assertEqual(_OVS_PORT_IFACE, self.get_interface_config()) self.assertEqual(_OVS_BRIDGE_DHCP, self.provider.bridges['br0']) def test_network_ovs_bridge_with_dhcp_and_primary_interface(self): def test_interface_mac(name): return "a1:b2:c3:d4:e5" self.stubs.Set(utils, 'interface_mac', test_interface_mac) interface = objects.Interface(self.if_name, primary=True) bridge = objects.OvsBridge('br0', use_dhcp=True, members=[interface]) self.provider.add_bridge(bridge) self.provider.add_interface(interface) self.assertEqual(_OVS_PORT_IFACE, self.get_interface_config()) self.assertEqual(_OVS_BRIDGE_DHCP_PRIMARY_INTERFACE, self.provider.bridges['br0']) def test_network_ovs_bridge_with_dhcp_and_primary_with_ovs_extra(self): def test_interface_mac(name): return "a1:b2:c3:d4:e5" self.stubs.Set(utils, 'interface_mac', test_interface_mac) interface = objects.Interface(self.if_name, primary=True) ovs_extra = "br-set-external-id br-ctlplane bridge-id br-ctlplane" bridge = objects.OvsBridge('br0', use_dhcp=True, members=[interface], ovs_extra=[ovs_extra]) self.provider.add_bridge(bridge) self.provider.add_interface(interface) self.assertEqual(_OVS_PORT_IFACE, self.get_interface_config()) self.assertEqual(_OVS_BRIDGE_DHCP_OVS_EXTRA, self.provider.bridges['br0']) def test_vlan(self): vlan = objects.Vlan('eth0', 5) self.provider.add_vlan(vlan) self.assertEqual(_VLAN_NO_IP, self.get_interface_config('vlan5')) def test_vlan_ovs_bridge_int_port(self): vlan = objects.Vlan('eth0', 5) bridge = objects.OvsBridge('br0', use_dhcp=True, members=[vlan]) self.provider.add_bridge(bridge) self.provider.add_vlan(vlan) self.assertEqual(_VLAN_OVS_PORT, self.get_interface_config('vlan5')) class TestENINetConfigApply(base.TestCase): def setUp(self): super(TestENINetConfigApply, self).setUp() self.temp_config_file = tempfile.NamedTemporaryFile() self.ifup_interface_names = [] def test_config_path(): return self.temp_config_file.name self.stubs.Set(impl_eni, '_network_config_path', test_config_path) def test_execute(*args, **kwargs): if args[0] == '/sbin/ifup': self.ifup_interface_names.append(args[1]) pass self.stubs.Set(processutils, 'execute', test_execute) self.provider = impl_eni.ENINetConfig() def tearDown(self): self.temp_config_file.close() super(TestENINetConfigApply, self).tearDown() def test_network_apply(self): route = objects.Route('192.168.1.1', '172.19.0.0/24') v4_addr = objects.Address('192.168.1.2/24') interface = objects.Interface('eth0', addresses=[v4_addr], routes=[route]) self.provider.add_interface(interface) self.provider.apply() iface_data = utils.get_file_data(self.temp_config_file.name) self.assertEqual((_V4_IFACE_STATIC_IP + _RTS), iface_data) self.assertIn('eth0', self.ifup_interface_names) def test_apply_noactivate(self): route = objects.Route('192.168.1.1', '172.19.0.0/24') v4_addr = objects.Address('192.168.1.2/24') interface = objects.Interface('eth0', addresses=[v4_addr], routes=[route]) self.provider.add_interface(interface) self.provider.apply(activate=False) iface_data = utils.get_file_data(self.temp_config_file.name) self.assertEqual((_V4_IFACE_STATIC_IP + _RTS), iface_data) self.assertEqual([], self.ifup_interface_names) def test_dhcp_ovs_bridge_network_apply(self): interface = objects.Interface('eth0') bridge = objects.OvsBridge('br0', use_dhcp=True, members=[interface]) self.provider.add_interface(interface) self.provider.add_bridge(bridge) self.provider.apply() iface_data = utils.get_file_data(self.temp_config_file.name) self.assertEqual((_OVS_BRIDGE_DHCP + _OVS_PORT_IFACE), iface_data) self.assertIn('eth0', self.ifup_interface_names) self.assertIn('br0', self.ifup_interface_names)
python
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings from typing import Dict, Iterable, Optional import torch import torch.nn as nn from mmcv.parallel import MMDataParallel, MMDistributedDataParallel from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner, Fp16OptimizerHook, OptimizerHook, build_optimizer, build_runner) from mmcv.utils import ConfigDict, build_from_cfg from mmdet.core import DistEvalHook, EvalHook from mmfewshot.detection.core import (QuerySupportDistEvalHook, QuerySupportEvalHook) from mmfewshot.detection.datasets import (build_dataloader, build_dataset, get_copy_dataset_type) from mmfewshot.utils import compat_cfg, get_root_logger def train_detector(model: nn.Module, dataset: Iterable, cfg: ConfigDict, distributed: bool = False, validate: bool = False, timestamp: Optional[str] = None, meta: Optional[Dict] = None) -> None: cfg = compat_cfg(cfg) logger = get_root_logger(log_level=cfg.log_level) # prepare data loaders dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] train_dataloader_default_args = dict( samples_per_gpu=2, workers_per_gpu=2, # `num_gpus` will be ignored if distributed num_gpus=len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, data_cfg=copy.deepcopy(cfg.data), use_infinite_sampler=cfg.use_infinite_sampler, persistent_workers=False) train_loader_cfg = { **train_dataloader_default_args, **cfg.data.get('train_dataloader', {}) } data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] # put model on gpus if distributed: find_unused_parameters = cfg.get('find_unused_parameters', False) # Sets the `find_unused_parameters` parameter in # torch.nn.parallel.DistributedDataParallel model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) else: # Please use MMCV >= 1.4.4 for CPU training! model = MMDataParallel(model, device_ids=cfg.gpu_ids) # build runner optimizer = build_optimizer(model, cfg.optimizer) # Infinite sampler will return a infinite stream of index. It can NOT # be used in `EpochBasedRunner`, because the `EpochBasedRunner` will # enumerate the dataloader forever. Thus, `InfiniteEpochBasedRunner` # is designed to handle dataloader with infinite sampler. if cfg.use_infinite_sampler and cfg.runner['type'] == 'EpochBasedRunner': cfg.runner['type'] = 'InfiniteEpochBasedRunner' runner = build_runner( cfg.runner, default_args=dict( model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta)) # an ugly workaround to make .log and .log.json filenames the same runner.timestamp = timestamp # fp16 setting fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: optimizer_config = Fp16OptimizerHook( **cfg.optimizer_config, **fp16_cfg, distributed=distributed) elif distributed and 'type' not in cfg.optimizer_config: optimizer_config = OptimizerHook(**cfg.optimizer_config) else: optimizer_config = cfg.optimizer_config # register hooks runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None)) if distributed: if isinstance(runner, EpochBasedRunner): runner.register_hook(DistSamplerSeedHook()) # register eval hooks if validate: # currently only support single images testing val_dataloader_default_args = dict( samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False, persistent_workers=False) val_dataloader_args = { **val_dataloader_default_args, **cfg.data.get('val_dataloader', {}) } val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) assert val_dataloader_args['samples_per_gpu'] == 1, \ 'currently only support single images testing' val_dataloader = build_dataloader(val_dataset, **val_dataloader_args) eval_cfg = cfg.get('evaluation', {}) eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' # Prepare `model_init` dataset for model initialization. In most cases, # the `model_init` dataset contains support images and few shot # annotations. The meta-learning based detectors will extract the # features from images and save them as part of model parameters. # The `model_init` dataset can be mutually configured or # randomly selected during runtime. if cfg.data.get('model_init', None) is not None: # The randomly selected few shot support during runtime can not be # configured offline. In such case, the copy datasets are designed # to directly copy the randomly generated support set for model # initialization. The copy datasets copy the `data_infos` by # passing it as argument and other arguments can be different # from training dataset. if cfg.data.model_init.pop('copy_from_train_dataset', False): if cfg.data.model_init.ann_cfg is not None: warnings.warn( 'model_init dataset will copy support ' 'dataset used for training and original ' 'ann_cfg will be discarded', UserWarning) # modify dataset type to support copying data_infos operation cfg.data.model_init.type = \ get_copy_dataset_type(cfg.data.model_init.type) if not hasattr(dataset[0], 'get_support_data_infos'): raise NotImplementedError( f'`get_support_data_infos` is not implemented ' f'in {dataset[0].__class__.__name__}.') cfg.data.model_init.ann_cfg = [ dict(data_infos=dataset[0].get_support_data_infos()) ] # The `model_init` dataset will be saved into checkpoint, which # allows model to be initialized with these data as default, if # the config of data is not be overwritten during testing. cfg.checkpoint_config.meta['model_init_ann_cfg'] = \ cfg.data.model_init.ann_cfg samples_per_gpu = cfg.data.model_init.pop('samples_per_gpu', 1) workers_per_gpu = cfg.data.model_init.pop('workers_per_gpu', 1) model_init_dataset = build_dataset(cfg.data.model_init) # Noted that `dist` should be FALSE to make all the models on # different gpus get same data results in same initialized models. model_init_dataloader = build_dataloader( model_init_dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=workers_per_gpu, dist=False, shuffle=False) # eval hook for meta-learning based query-support detector, it # supports model initialization before regular evaluation. eval_hook = QuerySupportDistEvalHook \ if distributed else QuerySupportEvalHook runner.register_hook( eval_hook(model_init_dataloader, val_dataloader, **eval_cfg), priority='LOW') else: # for the fine-tuned based methods, the evaluation is the # same as mmdet. eval_hook = DistEvalHook if distributed else EvalHook runner.register_hook( eval_hook(val_dataloader, **eval_cfg), priority='LOW') # user-defined hooks if cfg.get('custom_hooks', None): custom_hooks = cfg.custom_hooks assert isinstance( custom_hooks, list ), f'custom_hooks expect list type, but got {type(custom_hooks)}' for hook_cfg in cfg.custom_hooks: assert isinstance( hook_cfg, dict ), f'Each item in custom_hooks expects dict type, but ' \ f'got {type(hook_cfg)}' hook_cfg = hook_cfg.copy() priority = hook_cfg.pop('priority', 'NORMAL') hook = build_from_cfg(hook_cfg, HOOKS) runner.register_hook(hook, priority=priority) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow)
python
import argparse import gym from gym import wrappers import os.path as osp import random import numpy as np import tensorflow as tf import tensorflow.contrib.layers as layers import dqn from dqn_utils import * from atari_wrappers import * def cartpole_model(img_in, num_actions, scope, reuse=False): # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf with tf.variable_scope(scope, reuse=reuse): # out = tf.ones(tf.shape(img_in)) out = img_in out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=16, activation_fn=tf.nn.relu, scope='fc_input') out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None, scope='fc_head') return out def cartpole_learn(env, session, num_timesteps): # This is just a rough estimate num_iterations = float(num_timesteps) / 4.0 # lr_multiplier = 1.0 # lr_multiplier = 0.1 # lr_schedule = PiecewiseSchedule([ # (0, 1e-4 * lr_multiplier), # (num_iterations / 2, 1e-5 * lr_multiplier), # ], # outside_value=5e-5 * lr_multiplier) lr_schedule = InverseSchedule(initial_p=0.1, gamma=0.6) optimizer = dqn.OptimizerSpec( constructor=tf.train.GradientDescentOptimizer, # constructor=tf.train.AdamOptimizer, # kwargs=dict(epsilon=1e-4), kwargs=dict(), # constructor=tf.train.RMSPropOptimizer, # kwargs=dict(epsilon=1e-1), lr_schedule=lr_schedule ) def stopping_criterion(env, t): # notice that here t is the number of steps of the wrapped env, # which is different from the number of steps in the underlying env return get_wrapper_by_name(env, "Monitor").get_total_steps() >= num_timesteps exploration_schedule = PiecewiseSchedule( [ (0, 1.0), # (0.2 * num_timesteps, 0.9), # (0.5 * num_timesteps, 0.5), (0.1 * num_timesteps, 0.1), ], outside_value=0.01 ) dqn.learn( env, q_func=cartpole_model, optimizer_spec=optimizer, session=session, exploration=exploration_schedule, stopping_criterion=stopping_criterion, replay_buffer_size=100000, batch_size=256, gamma=0.99, learning_starts=2000, learning_freq=1, frame_history_len=4, target_update_freq=1000, grad_norm_clipping=1000, ) env.close() def get_available_gpus(): from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU'] def set_global_seeds(i): try: import tensorflow as tf except ImportError: pass else: tf.set_random_seed(i) np.random.seed(i) random.seed(i) def get_session(): tf.reset_default_graph() tf_config = tf.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) session = tf.Session(config=tf_config) print("AVAILABLE GPUS: ", get_available_gpus()) return session def get_env(task, seed): env_id = task.env_id env = gym.make(env_id) set_global_seeds(seed) env.seed(seed) expt_dir = '/tmp/hw3_vid_dir2/' env = wrappers.Monitor(env, osp.join(expt_dir, "gym"), force=True) env = wrap_deepmind(env) return env def main(): # Run training max_timesteps = 100000 seed = 0 # Use a seed of zero (you may want to randomize the seed!) env = gym.make("CartPole-v0") env.seed(seed) set_global_seeds(seed) env = wrappers.Monitor(env, '/tmp/cartpole-experiment-1', force=True) session = get_session() cartpole_learn(env, session, num_timesteps=max_timesteps) if __name__ == "__main__": main()
python
# This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Composite StateTomography and ProcessTomography experiment tests """ from test.base import QiskitExperimentsTestCase from qiskit import QuantumCircuit import qiskit.quantum_info as qi from qiskit.providers.aer import AerSimulator from qiskit_experiments.framework import BatchExperiment, ParallelExperiment from qiskit_experiments.library import StateTomography, ProcessTomography from .tomo_utils import filter_results class TestCompositeTomography(QiskitExperimentsTestCase): """Test composite tomography experiments""" def test_batch_qst_exp(self): """Test batch state tomography experiment with measurement_qubits kwarg""" # Subsystem unitaries seed = 1111 nq = 3 ops = [qi.random_unitary(2, seed=seed + i) for i in range(nq)] # Preparation circuit circuit = QuantumCircuit(nq) for i, op in enumerate(ops): circuit.append(op, [i]) # Component experiments exps = [] targets = [] for i in range(nq): targets.append(qi.Statevector(ops[i].to_instruction())) exps.append(StateTomography(circuit, measurement_qubits=[i])) # Run batch experiments backend = AerSimulator(seed_simulator=9000) batch_exp = BatchExperiment(exps) batch_data = batch_exp.run(backend) self.assertExperimentDone(batch_data) # Check target fidelity of component experiments f_threshold = 0.95 for i in range(batch_exp.num_experiments): results = batch_data.child_data(i).analysis_results() # Check state is density matrix state = filter_results(results, "state").value self.assertTrue( isinstance(state, qi.DensityMatrix), msg="fitted state is not density matrix" ) # Check fit state fidelity fid = filter_results(results, "state_fidelity").value self.assertGreater(fid, f_threshold, msg="fit fidelity is low") # Manually check fidelity target_fid = qi.state_fidelity(state, targets[i], validate=False) self.assertAlmostEqual(fid, target_fid, places=6, msg="result fidelity is incorrect") def test_parallel_qst_exp(self): """Test parallel state tomography experiment""" # Subsystem unitaries seed = 1221 nq = 4 ops = [qi.random_unitary(2, seed=seed + i) for i in range(nq)] # Component experiments exps = [] targets = [] for i in range(nq): exps.append(StateTomography(ops[i], qubits=[i])) targets.append(qi.Statevector(ops[i].to_instruction())) # Run batch experiments backend = AerSimulator(seed_simulator=9000) par_exp = ParallelExperiment(exps) par_data = par_exp.run(backend) self.assertExperimentDone(par_data) # Check target fidelity of component experiments f_threshold = 0.95 for i in range(par_exp.num_experiments): results = par_data.child_data(i).analysis_results() # Check state is density matrix state = filter_results(results, "state").value self.assertTrue( isinstance(state, qi.DensityMatrix), msg="fitted state is not density matrix" ) # Check fit state fidelity fid = filter_results(results, "state_fidelity").value self.assertGreater(fid, f_threshold, msg="fit fidelity is low") # Manually check fidelity target_fid = qi.state_fidelity(state, targets[i], validate=False) self.assertAlmostEqual(fid, target_fid, places=6, msg="result fidelity is incorrect") def test_batch_qpt_exp_with_measurement_qubits(self): """Test batch process tomography experiment with kwargs""" seed = 1111 nq = 3 ops = [qi.random_unitary(2, seed=seed + i) for i in range(nq)] # Preparation circuit circuit = QuantumCircuit(nq) for i, op in enumerate(ops): circuit.append(op, [i]) # Component experiments exps = [] targets = [] for i in range(nq): targets.append(ops[i]) exps.append(ProcessTomography(circuit, measurement_qubits=[i], preparation_qubits=[i])) # Run batch experiments backend = AerSimulator(seed_simulator=9000) batch_exp = BatchExperiment(exps) batch_data = batch_exp.run(backend) self.assertExperimentDone(batch_data) # Check target fidelity of component experiments f_threshold = 0.95 for i in range(batch_exp.num_experiments): results = batch_data.child_data(i).analysis_results() # Check state is density matrix state = filter_results(results, "state").value self.assertTrue(isinstance(state, qi.Choi), msg="fitted state is not a Choi matrix") # Check fit state fidelity fid = filter_results(results, "process_fidelity").value self.assertGreater(fid, f_threshold, msg="fit fidelity is low") # Manually check fidelity target_fid = qi.process_fidelity(state, targets[i], require_tp=False, require_cp=False) self.assertAlmostEqual(fid, target_fid, places=6, msg="result fidelity is incorrect") def test_parallel_qpt_exp(self): """Test parallel process tomography experiment""" # Subsystem unitaries seed = 1221 nq = 4 ops = [qi.random_unitary(2, seed=seed + i) for i in range(nq)] # Component experiments exps = [] targets = [] for i in range(nq): exps.append(ProcessTomography(ops[i], qubits=[i])) targets.append(ops[i]) # Run batch experiments backend = AerSimulator(seed_simulator=9000) par_exp = ParallelExperiment(exps) par_data = par_exp.run(backend) self.assertExperimentDone(par_data) # Check target fidelity of component experiments f_threshold = 0.95 for i in range(par_exp.num_experiments): results = par_data.child_data(i).analysis_results() # Check state is density matrix state = filter_results(results, "state").value self.assertTrue(isinstance(state, qi.Choi), msg="fitted state is not a Choi matrix") # Check fit state fidelity fid = filter_results(results, "process_fidelity").value self.assertGreater(fid, f_threshold, msg="fit fidelity is low") # Manually check fidelity target_fid = qi.process_fidelity(state, targets[i], require_tp=False, require_cp=False) self.assertAlmostEqual(fid, target_fid, places=6, msg="result fidelity is incorrect") def test_mixed_batch_exp(self): """Test batch state and process tomography experiment""" # Subsystem unitaries state_op = qi.random_unitary(2, seed=321) chan_op = qi.random_unitary(2, seed=123) state_target = qi.Statevector(state_op.to_instruction()) chan_target = qi.Choi(chan_op.to_instruction()) state_exp = StateTomography(state_op) chan_exp = ProcessTomography(chan_op) batch_exp = BatchExperiment([state_exp, chan_exp]) # Run batch experiments backend = AerSimulator(seed_simulator=9000) par_data = batch_exp.run(backend) self.assertExperimentDone(par_data) f_threshold = 0.95 # Check state tomo results state_results = par_data.child_data(0).analysis_results() state = filter_results(state_results, "state").value # Check fit state fidelity state_fid = filter_results(state_results, "state_fidelity").value self.assertGreater(state_fid, f_threshold, msg="fit fidelity is low") # Manually check fidelity target_fid = qi.state_fidelity(state, state_target, validate=False) self.assertAlmostEqual(state_fid, target_fid, places=6, msg="result fidelity is incorrect") # Check process tomo results chan_results = par_data.child_data(1).analysis_results() chan = filter_results(chan_results, "state").value # Check fit process fidelity chan_fid = filter_results(chan_results, "process_fidelity").value self.assertGreater(chan_fid, f_threshold, msg="fit fidelity is low") # Manually check fidelity target_fid = qi.process_fidelity(chan, chan_target, require_cp=False, require_tp=False) self.assertAlmostEqual(chan_fid, target_fid, places=6, msg="result fidelity is incorrect")
python
# %% consumer_key = "idBfc3mYzrfBPxRM1z5AhXxAA" consumer_secret = "K50925I1FObqf6LA8MwiUyCBWlOxtrXXpi0aUAFD0wNCFBPQ3j" access_token = "1245495541330579457-6EBT7O9j98LgAt3dXxzsTK5FFAA2Lg" access_secret = "jUP2N1nHeC6nzD30F4forjx7WxoOI603b4CqHdUnA6wqL" # %% import tweepy auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) # %% api = tweepy.API(auth) # %% public_tweets = api.home_timeline() for tweet in public_tweets: print(tweet.text) # %% api.me().screen_name # %%
python
""" Copyright 2011 Lars Kruse <devel@sumpfralle.de> This file is part of PyCAM. PyCAM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PyCAM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with PyCAM. If not, see <http://www.gnu.org/licenses/>. """ import imp import inspect import os import uuid from pycam.Utils import get_non_conflicting_name from pycam.Utils.events import get_event_handler import pycam.Utils.log import pycam.Utils.locations _log = pycam.Utils.log.get_logger() def _get_plugin_imports(): # We want to import all relevant GUI modules into the namespace of each plugin. # We do this once for all - in order to centralize and minimize error handling. result = {key: None for key in ("gtk", "gdk", "gdkpixbuf", "gdkobject", "gio", "glib", "GL", "GLU", "GLUT")} # By default, Gdk loads the OpenGL 3.2 Core profile. However, PyCAM's rendering # code uses the fixed function pipeline, which was removed in the Core profile. # So we have to resort to this semi-public API to ask Gdk to use a Compatibility # profile instead. os.environ['GDK_GL'] = 'legacy' try: import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk from gi.repository import Gdk from gi.repository import GdkPixbuf from gi.repository import Gio from gi.repository import GLib from gi.repository import GObject result["gtk"] = Gtk result["gdk"] = Gdk result["gdkpixbuf"] = GdkPixbuf result["gio"] = Gio result["glib"] = GLib result["gobject"] = GObject except ImportError: _log.warning("Failed to import GTK3 module. Maybe you want to install 'python3-gi' " "for pycam's graphical user interface.") if result["gtk"]: try: import OpenGL.GL import OpenGL.GLU import OpenGL.GLUT result["GL"] = OpenGL.GL result["GLU"] = OpenGL.GLU result["GLUT"] = OpenGL.GLUT except ImportError: # OpenGL-related plugins will complain later about the missing dependency _log.warning("Failed to import OpenGL module. Maybe you want to install " "'python3-opengl' for the 3D visualization.") return result class PluginBase: UI_FILE = None DEPENDS = [] CATEGORIES = [] ICONS = {} ICON_SIZE = 23 _imports = _get_plugin_imports() def __init__(self, core, name): self.enabled = True self.name = name self.core = core self.gui = None self.log = _log # convenience imports for GUI modules (self._gtk, self._gdk, self._GL, ...) for key, value in self._imports.items(): setattr(self, "_" + key, value) if self.UI_FILE and self._gtk: gtk_build_file = pycam.Utils.locations.get_ui_file_location(self.UI_FILE) if gtk_build_file: self.gui = self._gtk.Builder() try: self.gui.add_from_file(gtk_build_file) except RuntimeError as err_msg: self.log.info("Failed to import UI file (%s): %s", gtk_build_file, err_msg) self.gui = None else: # All windows should share the same accel group (for # keyboard shortcuts). try: common_accel_group = self.core["gtk-accel-group"] except KeyError: self.log.info("Failed to connect to a common GTK accelerator group") common_accel_group = None if common_accel_group: for obj in self.gui.get_objects(): if isinstance(obj, self._gtk.Window): obj.add_accel_group(common_accel_group) if self._gtk: for key in self.ICONS: icon_location = pycam.Utils.locations.get_ui_file_location(self.ICONS[key]) if icon_location: try: self.ICONS[key] = self._gdkpixbuf.Pixbuf.new_from_file_at_size( icon_location, self.ICON_SIZE, self.ICON_SIZE) except self._gobject.GError: self.log.info("Failed to load icon: %s", self.ICONS[key]) self.ICONS[key] = None else: self.log.debug("Failed to locate icon: %s", self.ICONS[key]) self.ICONS[key] = None self._func_cache = {} self._gtk_handler_id_cache = [] self.enabled = True self._state_items = [] def register_state_item(self, path, get_func, set_func=None): group = (path, get_func, set_func) if group in self._state_items: self.log.debug("Trying to register a state item twice: %s", path) else: self._state_items.append(group) def clear_state_items(self): self._state_items = [] def unregister_state_item(self, path, get_func, set_func=None): group = (path, get_func, set_func) if group in self._state_items: self._state_items.remove(group) else: self.log.debug("Trying to unregister an unknown state item: %s", path) def dump_state(self, result): for path, get_func, set_func in self._state_items: if callable(get_func): value = get_func() else: value = get_func result.append((path, value)) def __get_handler_func(self, func, params=None): if params is None: params = [] params = tuple(params) try: key = (hash(func), repr(params)) except TypeError: key = (id(func), repr(params)) if key not in self._func_cache: if callable(func): if not params: result = func else: result = lambda *args, **kwargs: func(*(args + params), **kwargs) else: # it is the name of a signal result = lambda *args: self.core.emit_event(func, *params) self._func_cache[key] = result return self._func_cache[key] def register_event_handlers(self, event_handlers): for name, target in event_handlers: self.core.register_event(name, self.__get_handler_func(target)) def register_gtk_handlers(self, gtk_widget_handlers): for data in gtk_widget_handlers: obj, signal, func = data[:3] params = data[3:] if len(data) > 3 else [] handler_id = obj.connect(signal, self.__get_handler_func(func, params)) self._gtk_handler_id_cache.append((obj, handler_id)) def unregister_event_handlers(self, event_handlers): for name, target in event_handlers: self.core.unregister_event(name, self.__get_handler_func(target)) def unregister_gtk_handlers(self, gtk_widget_handlers): while self._gtk_handler_id_cache: obj, handler_id = self._gtk_handler_id_cache.pop() obj.disconnect(handler_id) def setup(self): raise NotImplementedError("Module %s (%s) does not implement 'setup'" % (self.name, __file__)) def teardown(self): raise NotImplementedError("Module %s (%s) does not implement 'teardown'" % (self.name, __file__)) def _get_gtk_action_group_by_name(self, group_name, create_if_missing=False): ui_manager = self.core.get("gtk-uimanager") # find the action group of the given name or create a new one for action_group in ui_manager.get_action_groups(): if action_group.get_name() == group_name: return action_group else: if create_if_missing: action_group = self._gtk.ActionGroup(name=group_name) ui_manager.insert_action_group(action_group) return action_group else: return None def register_gtk_accelerator(self, groupname, action, accel_string, accel_name): actiongroup = self._get_gtk_action_group_by_name(groupname, create_if_missing=True) accel_path = "<pycam>/%s" % accel_name action.set_accel_path(accel_path) # it is a bit pointless, but we allow an empty accel_string anyway ... if accel_string: key, mod = self._gtk.accelerator_parse(accel_string) self._gtk.AccelMap.change_entry(accel_path, key, mod, True) actiongroup.add_action(action) def unregister_gtk_accelerator(self, groupname, action): actiongroup = self._get_gtk_action_group_by_name(groupname) if actiongroup is None: self.log.warning("Failed to unregister unknown GTK Action Group: %s", groupname) actiongroup.remove_action(action) # remove the connected action group, if it is empty (no more actions assigned) ui_manager = self.core.get("gtk-uimanager") if ui_manager and (len(actiongroup.list_actions()) == 0): ui_manager.remove_action_group(actiongroup) class PluginManager: def __init__(self, core): self.core = core self.modules = {} self.core.set("plugin-manager", self) def import_plugins(self, directory=None, ignore_names=None): if ignore_names is None: ignore_names = [] if directory is None: directory = os.path.dirname(__file__) try: files = os.listdir(directory) except OSError: return plugins = [] for filename in files: if (filename.endswith(".py") and (filename.lower() != "__init__.py") and os.path.isfile(os.path.join(directory, filename))): mod_name = filename[0:-(len(".py"))] if mod_name in ignore_names: _log.info("Skipping plugin %s (marked as 'ignore')", mod_name) continue try: mod_file, mod_filename, mod_desc = imp.find_module(mod_name, [directory]) full_mod_name = "pycam.Plugins.%s" % mod_name mod = imp.load_module(full_mod_name, mod_file, mod_filename, mod_desc) except ImportError as exc: _log.info("Skipping plugin %s: %s", os.path.join(directory, filename), exc) continue for attr in dir(mod): item = getattr(mod, attr) if inspect.isclass(item) and issubclass(item, PluginBase): plugins.append((item, mod_filename, attr)) try_again = True while try_again: try_again = False postponed_plugins = [] for plugin, filename, name in plugins: for dep in plugin.DEPENDS: if dep not in self.modules: # dependency not loaded, yet postponed_plugins.append((plugin, filename, name)) break else: self._load_plugin(plugin, filename, name) try_again = True plugins = postponed_plugins for plugin, filename, name in plugins: # module failed to load due to missing dependencies missing = [] for depend in plugin.DEPENDS: try: # check if this dependency is available self.get_plugin(depend) except KeyError: missing.append(depend) _log.info("Skipping plugin '%s' due to missing dependencies: %s", name, ", ".join(missing)) def _load_plugin(self, obj, filename, plugin_name): if plugin_name in self.modules: _log.debug("Cleaning up module %s", plugin_name) self.modules[plugin_name].teardown() _log.debug("Initializing module %s (%s)", plugin_name, filename) new_plugin = obj(self.core, plugin_name) try: if not new_plugin.setup(): _log.info("Failed to setup plugin '%s'", str(plugin_name)) else: self.modules[plugin_name] = new_plugin self.core.emit_event("plugin-list-changed") except NotImplementedError as err_msg: _log.info("Skipping incomplete plugin '%s': %s", plugin_name, err_msg) def disable_all_plugins(self): _log.info("Disabling all plugins") for plugin_name in self.modules: if self.get_plugin_state(plugin_name): self.disable_plugin(plugin_name, recursively=True) def get_plugin(self, name): if name in self.modules: return self.modules[name] else: raise KeyError("Plugin '%s' is not available" % name) def enable_plugin(self, name): plugin = self.get_plugin(name) if plugin.enabled: _log.debug("Refused to enable an active plugin: %s" % name) return else: plugin.enabled = plugin.setup() def disable_plugin(self, name, recursively=False): plugin = self.get_plugin(name) if not plugin.enabled: _log.debug("Refused to disable an disabled plugin: %s" % name) return else: if recursively and self.is_plugin_required(name): for dep_name in self.get_dependent_plugins(name): if self.get_plugin_state(dep_name): self.disable_plugin(dep_name, recursively=True) if self.is_plugin_required(name): _log.warning("Refusing to disable plugin: %s (dependent plugins: %s)", name, " ".join(self.get_dependent_plugins(name))) else: _log.debug("Disabling plugin: %s", name) plugin.teardown() plugin.enabled = False def get_plugin_state(self, name): plugin = self.get_plugin(name) return plugin.enabled def get_plugins(self): return list(self.modules.values()) def get_plugin_names(self): names = self.modules.keys() return sorted(names) def get_dependent_plugins(self, name): return {plugin.name for plugin in self.modules.values() if plugin.enabled and (name in plugin.DEPENDS)} def is_plugin_required(self, name): return len(self.get_dependent_plugins(name)) > 0 def get_plugin_missing_dependencies(self, name): plugin = self.get_plugin(name) missing = [] for depend in plugin.DEPENDS: if (depend in self.modules) and self.modules[depend].enabled: continue else: missing.append(depend) return missing class ListPluginBase(PluginBase): ACTION_UP, ACTION_DOWN, ACTION_DELETE, ACTION_CLEAR = range(4) COLLECTION_ITEM_TYPE = None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._update_model_funcs = [] self._gtk_modelview = None get_event_handler().register_event(self.COLLECTION_ITEM_TYPE.list_changed_event, self._update_model) def __del__(self): try: unregister = get_event_handler().unregister_event except AttributeError: pass unregister(self.COLLECTION_ITEM_TYPE.list_changed_event, self._update_model) def get_all(self): return tuple(self.get_collection()) def clear(self): self.get_collection().clear() def get_selected(self, **kwargs): if self._gtk_modelview: return self._get_gtk_selected(**kwargs) else: return None def _get_gtk_selected(self, index=False, force_list=False): modelview = self._gtk_modelview if hasattr(modelview, "get_selection"): # a treeview selection selection = modelview.get_selection() if selection is None: # probably we are just shutting down right now selection_mode = None paths = [] else: selection_mode = selection.get_mode() paths = selection.get_selected_rows()[1] elif hasattr(modelview, "get_active"): # combobox selection_mode = self._gtk.SELECTION_SINGLE active = modelview.get_active() if active < 0: paths = [] else: paths = [[active]] else: # an iconview selection_mode = modelview.get_selection_mode() paths = modelview.get_selected_items() if index: get_result = lambda path: path[0] else: get_result = self.get_by_path if (selection_mode == self._gtk.SelectionMode.MULTIPLE) or force_list: result = [] for path in paths: result.append(get_result(path)) else: if not paths: return None else: result = get_result(paths[0]) return result def select(self, selected): if not isinstance(selected, (list, tuple)): selected = [selected] if self._gtk_modelview: self._select_gtk(selected) def _select_gtk(self, selected_objs): selection = self._gtk_modelview.get_selection() selected_uuids = [item.get_id() for item in selected_objs] for index, item in enumerate(self.get_collection()): path = self._gtk.TreePath.new_from_indices((index, )) if item.get_id() in selected_uuids: selection.select_path(path) else: selection.unselect_path(path) def set_gtk_modelview(self, modelview): self._gtk_modelview = modelview def force_gtk_modelview_refresh(self): # force a table update by simulating a change of the list store model = self._gtk_modelview.get_model() if model is not None: model.prepend(None) model.remove(model.get_iter_first()) def _update_gtk_treemodel(self): if not self._gtk_modelview: return treemodel = self._gtk_modelview.get_model() if treemodel is None: # this my happen during shutdown return previous_count = len(treemodel) current_uuids = [item.get_id() for item in self.get_collection()] # remove all superfluous rows from "treemodel" removal_indices = [index for index, item in enumerate(treemodel) if item[0] not in current_uuids] removal_indices.reverse() for index in removal_indices: treemodel.remove(treemodel.get_iter((index, ))) # add all missing items to "treemodel" model_uuids = [row[0] for row in treemodel] for this_uuid in current_uuids: if this_uuid not in model_uuids: treemodel.append((this_uuid, )) # reorder the treemodel according to the current list sorted_indices = [current_uuids.index(row[0]) for row in treemodel] if sorted_indices: treemodel.reorder(sorted_indices) # Explicitly select the first item - otherwise the pre-filled defaults do not cause a # selection. This would be annoying for the ExportSettings, since the Toolpath view uses # the first selected set of settings (but would fail by default). if (previous_count == 0) and current_uuids: self.select(self.get_collection()[0]) def get_by_path(self, path): if not self._gtk_modelview: return None this_uuid = self._gtk_modelview.get_model()[int(path[0])][0] return self.get_collection()[this_uuid] def _update_model(self): self._update_gtk_treemodel() for update_func in self._update_model_funcs: update_func() def register_model_update(self, func): self._update_model_funcs.append(func) def unregister_model_update(self, func): if func in self._update_model_funcs: self._update_model_funcs.remove(func) def _list_action(self, *args): # the second-to-last parameter should be the model view modelview = args[-2] # the last parameter should be the action (ACTION_UP|DOWN|DELETE|CLEAR) action = args[-1] if action not in (self.ACTION_UP, self.ACTION_DOWN, self.ACTION_DELETE, self.ACTION_CLEAR): self.log.info("Invalid action for ListPluginBase.list_action: %s", str(action)) return selected_items = self.get_selected(index=True, force_list=True) selected_items.sort() if action in (self.ACTION_DOWN, self.ACTION_DELETE): selected_items.sort(reverse=True) collection = self.get_collection() new_selection = [] if action == self.ACTION_CLEAR: collection.clear() else: for index in selected_items: if action == self.ACTION_UP: if index > 0: collection.swap_by_index(index, index - 1) new_selection.append(index - 1) elif action == self.ACTION_DOWN: if index < len(self.get_collection()) - 1: collection.swap_by_index(index, index + 1) new_selection.append(index + 1) elif action == self.ACTION_DELETE: del collection[index] if collection: new_selection.append(min(index, len(collection) - 1)) else: pass self._update_model() if hasattr(modelview, "get_selection"): selection = modelview.get_selection() else: selection = modelview selection.unselect_all() for index in new_selection: path = self._gtk.TreePath.new_from_indices((index, )) selection.select_path(path) def get_collection(self): return self.COLLECTION_ITEM_TYPE.get_collection() def _update_list_action_button_state(self, *args): modelview = args[-3] # noqa F841 - maybe we need it later action = args[-2] button = args[-1] paths = self.get_selected(index=True, force_list=True) if action == self.ACTION_CLEAR: button.set_sensitive(len(self.get_collection()) > 0) elif not paths: button.set_sensitive(False) else: if action == self.ACTION_UP: button.set_sensitive(0 not in paths) elif action == self.ACTION_DOWN: button.set_sensitive((len(self.get_collection()) - 1) not in paths) else: button.set_sensitive(True) def register_list_action_button(self, action, button): modelview = self._gtk_modelview if hasattr(modelview, "get_selection"): # a treeview selection = modelview.get_selection() selection.connect("changed", self._update_list_action_button_state, modelview, action, button) else: modelview.connect("selection-changed", self._update_list_action_button_state, modelview, action, button) model = modelview.get_model() for signal in ("row-changed", "row-deleted", "row-has-child-toggled", "row-inserted", "rows-reordered"): model.connect(signal, self._update_list_action_button_state, modelview, action, button) button.connect("clicked", self._list_action, modelview, action) # initialize the state of the button self._update_list_action_button_state(modelview, action, button) def get_visible(self): return [item for item in self.get_all() if item.get_application_value("visible", True)] def edit_item_name(self, cell, path, new_text): item = self.get_by_path(path) if item and (new_text != item.get_application_value("name")) and new_text: item.set_application_value("name", new_text) def render_item_name(self, column, cell, model, m_iter, data): item = self.get_by_path(model.get_path(m_iter)) if item: cell.set_property("text", item.get_application_value("name", "No Name")) def render_item_visible_state(self, column, cell, model, m_iter, data): item = self.get_by_path(model.get_path(m_iter)) if item.get_application_value("visible", True): cell.set_property("pixbuf", self.ICONS["visible"]) else: cell.set_property("pixbuf", self.ICONS["hidden"]) return item, cell def toggle_item_visibility(self, treeview, path, column): item = self.get_by_path(path) if item: item.set_application_value("visible", not item.get_application_value("visible")) self.core.emit_event("visual-item-updated") def get_non_conflicting_name(self, name_template): return get_non_conflicting_name( name_template, [item.get_application_value("name") for item in self.get_all()]) class ObjectWithAttributes(dict): def __init__(self, node_key=None, attributes=None, **kwargs): super().__init__(**kwargs) if attributes is not None: self.update(attributes) self["uuid"] = str(uuid.uuid4()) self.node_key = node_key def filter_list(items, *args, **kwargs): if len(args) > 1: _log.info("This filter accepts only a single unnamed parameter: index(es), but %d " "parameters were given", len(args)) return [] elif len(args) == 1: try: items = [items[index] for index in args[0]] except TypeError: # not iterable try: items = [items[args[0]]] except (IndexError, TypeError): _log.info("Invalid index requested in filter: %s", str(args[0])) return [] else: pass result = [] for item in items: for filter_key in kwargs: try: if not item[filter_key] == kwargs[filter_key]: break except KeyError: _log.info("Tried to filter an unknown attribute: %s", str(filter_key)) break else: # all keys are matching result.append(item) return result def get_filter(items): return lambda *args, **kwargs: filter_list(items, *args, **kwargs)
python
# SPDX-License-Identifier: BSD-3-Clause # Depthcharge: <https://github.com/nccgroup/depthcharge> """ U-Boot environment variable parsing and handling functionality """ import copy import os import re from zlib import crc32 from .. import log from ..arch import Architecture # This is a bit bonkers because U-Boot let's you run pretty wild with # your variable naming... # # Here's a few examples to ruin your day: # setenv ' ' true # setenv '' :) # setenv '\$ foo' 'bar ${ }' # setenv '\$\{bar\} ' 'run echo ${\$ foo}' # setenv '\$omg \$stahp\}' \#cursed # setenv \{test\$\{test 42 # # See U-Boot's lib/hashtable.c for name handling. _VAR_NAME_RE = re.compile(r""" \$\{(.*?)\}| # ${foo} \$([^\$\s\{][^\$\s]*) # $bar """, re.VERBOSE) # Representation of an environment variable stored in NV memory _ENV_VAR_PAT = b'(?P<name>[\x20-\x3c\x3d-\x7f]+)=(?P<value>[\x20-\x7f]+)\x00' def raw_regex(min_entries: int = 5, max_entries: int = None): """ Return a compiled regular expression for locating a U-Boot environment in a binary. This does not include ``env_t`` metadata, such as the environment's CRC32 word and optional flags byte. The *min_entries* and *max_entries* parameters can be used to bound the size (in number of entries) of the environment to be matched. If you haven't already, consider using :py:class:`~depthcharge.hunter.EnvironmentHunter` instead, as this may already do everything you're looking to implement. """ min_entries = min_entries or '' max_entries = max_entries or '' pattern = b'(' + _ENV_VAR_PAT + b'){' pattern += str(min_entries).encode('ascii') pattern += b',' pattern += str(max_entries).encode('ascii') pattern += b'}' return re.compile(pattern) def raw_var_regex(): """ Return a compiled regular expression that can be used to match an environment variable definition in a binary. If you haven't already, consider using :py:class:`~depthcharge.hunter.EnvironmentHunter` instead, as this may already do everything you're looking to implement. """ return re.compile(b'(?P<name>[\x20-\x3c\x3e-\x7f]+)=(?P<value>[\x09\x0a\x0d\x20-\x7f]+)\x00') def parse(text: str) -> dict: """ Parse the contents of the environment contained in the provided *text* (e.g. obtained through the console interface) and return the environment as a dictionary. A :py:exc:`ValueError` is raised if no environment variables are found. """ results = {} prev_name = None expect_continuation = False for line in text.splitlines(): if expect_continuation: results[prev_name] += os.linesep + line expect_continuation = line.endswith('\\') else: if not line or line.startswith('Environment size: '): continue try: delim_idx = line.index('=') except ValueError: # Try to be resilient and ignore bizzare or malformed lines... continue name = line[:delim_idx] value = line[delim_idx+1:] results[name] = value prev_name = name expect_continuation = value.endswith('\\') if not results: raise ValueError('No environment variables found') return results def expand_variable(env: dict, to_expand: str, **kwargs) -> str: """ Return the environment variable named *to_expand* with all variable definitions contained within it fully expanded. A :py:exc:`KeyError` is raised if *to_expand* is not present in the provided *env* dictionary. **Optional Keyword Arguments**: *limit* - Maximum expansion iterations to peform. Default: ``100`` *warn_only* - Print a warning, but do not raise an exception, if the variable definition cannot be fully expended due to an undefined environment variable. This situtaion is possibly indicative of an issue with the U-Boot environment itself, rather than Depthcharge or anything the user has done incorrectly; it may be the case that some incomplete development cruft or reference design vestiges are present in the environment. If this occurs and this setting is set to ``False``, a :py:exc:`ValueError` will be raised. Default: ``True`` *quiet* - Suppress the above warning. (Requires *warn_only=True*.) """ result = None limit = kwargs.get('limit', 100) warn_only = kwargs.get('warn_only', True) quiet = kwargs.get('quiet', False) value = env[to_expand] for _ in range(0, limit): prev = value for match in _VAR_NAME_RE.finditer(value): var_name = match.group(1) or match.group(2) if var_name in env: expansion = env[var_name] if match.group(1): value = value.replace('${' + var_name + '}', expansion) else: value = value.replace('$' + var_name, expansion) if prev == value: result = value break if result is None: raise ValueError('Expansion iteration limit reached') # Are there any unexpanded definitions remaining? match = _VAR_NAME_RE.search(value) if match: var_name = match.group(1) or match.group(2) msg = 'No definition for environment variable "{:s}" found when expanding "{:s}"' msg = msg.format(var_name, to_expand) if warn_only: if not quiet: log.warning(msg) else: raise ValueError(msg) return result def expand(env: dict, **kwargs) -> dict: """ Return a copy of the provided U-Boot environment variable dictionary with all variable definitions fully resolved. This function supports the same keyword arguments as :py:func:`expand_variable()`. """ ret = copy.deepcopy(env) for var in env: ret[var] = expand_variable(env, var, **kwargs) return ret def parse_raw(data: bytes) -> dict: """ Parse the contents of an environment retrieved from flash or memory and provide an equivalent dictionary. The provided *data* should being at the start of the variable definitions. It **must not** contain the ``env_t`` metadata, such as the CRC32 word and the ``flags`` value (only present when compiled with "``CONFIG_SYS_REDUNDAND_ENVIRONMENT``". A :py:exc:`ValueError` is raised if no environment variables are found. """ results = {} regex = raw_var_regex() for match in regex.finditer(data): name = match.group('name').decode('ascii') value = match.group('value').decode('ascii') results[name] = value if not results: raise ValueError('No environment variables found') return results def load(filename: str) -> dict: """ Load a U-Boot environment from a text file and return it as a dictionary. The text file is expected to be in the same format as that used by U-Boot's ``printenv`` command output. A :py:exc:`ValueError` is raised if no environment variables are found. """ with open(filename, 'r') as infile: text = infile.read() return parse(text) def load_raw(filename: str, arch: str, has_crc=True, has_flags=False) -> tuple: """ Load an environment previously carved from a binary or saved with :py:func:`save_raw()`. It is returned as a tuple: ``(env: dict, metadata: dict)`` This function expects the environment (metadata) to begin at offset 0 in the opened file. The name of the target architecture (*arch*) must be provided. The *has_crc* and *has_flags* boolean parameters should be used to specify whether the file contains a U-Boot env_t header. """ with open(filename, 'rb') as infile: data = infile.read() metadata = {} start = 0 if has_crc: arch = Architecture.get(arch) crc = int.from_bytes(data[0:4], arch.endianness) start += 4 metadata['crc'] = crc if has_flags: metadata['flags'] = data[start] start += 1 data = data[start:] metadata['actual_crc'] = crc32(data) metadata['size'] = len(data) env = parse_raw(data) return (env, metadata) def save(filename: str, env: dict): """ Write the contents of an environment to a text file that can later be loaded via :py:func:load()`. """ with open(filename, 'w') as outfile: for name in sorted(env.keys()): value = env[name] outfile.write(name + '=' + value + os.linesep) def save_raw(filename: str, env: dict, size: int, arch: str, flags: int = None, no_header=False): """ Convert the environment information stored in *env* and save it to *filename*. Refer to :py:func:`create_raw_environment` for more information about this function's arguments. """ env_data = create_raw(env, size, arch, flags, no_header) with open(filename, 'wb') as outfile: outfile.write(env_data) def create_raw(env: dict, size: int, arch: str, flags: int = None, no_header=False) -> bytes: """ Convert the environment contained the *env* dictionary to the binary format that can be used to replace an environment in non-volatile storage. The *size* parameter must match the target's compile-time ``CONFIG_ENV_SIZE`` definition. The environment is zero-padded to this length prior to the computation of its CRC32 checksum. If you don't know this value and can extract flash contents, you can use :py:class:`~depthcharge.hunter.EnvironmentHunter` to locate environment instances. The ``src_size`` entry in the results for of :py:meth:`~depthcharge.hunter.EnvironmentHunter.find()` and :py:meth:`~depthcharge.hunter.EnvironmentHunter.finditer()` correspond to this size. The *arch* parameter must name the target architecture that will be processing the environment. Finally, an optional *flags* value can be provided. This is an ``env_t`` structure field present only when U-Boot is compiled with the `CONFIG_SYS_REDUNDAND_ENV <https://gitlab.denx.de/u-boot/u-boot/-/blob/v2020.04/env/Kconfig#L394>`_ (sic) option. This option enables the use of two environment copies, should one become corrupted during the programming operation (e.g. via unexpected power-loss). Although called "flags", it's basically a monotonic modulo-256 counter that's incremented by one at each write to denote the freshest copy. (See `env/common.c <https://gitlab.denx.de/u-boot/u-boot/-/blob/v2020.04/env/common.c#L181>`_) If you are replacing an environment that uses this feature, be sure to provide either the same *flags* value or a greater value. Setting *no_header=True* will create the environment contents without any header metadata (i.e., no CRC word, no flags). """ ret = bytearray() endianness = Architecture.get(arch).endianness env_bin = b'' for name in sorted(env.keys()): env_bin += name.encode('ascii') env_bin += b'=' env_bin += env[name].encode('ascii') env_bin += b'\x00' padding = size - len(env_bin) if no_header is False: padding -= 4 # CRC word if flags is not None: padding -= 1 if padding < 0: msg = 'Environment contents ({:d} bytes) exceed storage size ({:d} bytes)' raise ValueError(msg.format(len(env_bin) - padding, size)) env_bin += b'\x00' * padding crc_bytes = crc32(env_bin).to_bytes(4, endianness) if no_header is not True: ret += crc_bytes if flags is not None: ret += flags.to_bytes(1, 'big') ret += env_bin return bytes(ret)
python
# Function to add looted inventory to player inventory def addToInventory(inventory, addedItems): for loot in addedItems: if loot in inventory: inventory[loot] = inventory[loot] + 1 else: inventory.setdefault(loot, 1) return inventory # Function to display inventory def displayInventory(inventory): print('Inventory:') itemTotal = 0 for item in inventory: itemTotal = itemTotal + inventory[item] print(inventory[item], end=' ') print(item) print('Total number of items: ' + str(itemTotal)) # Player's inventory playerInventory = {'gold coin' : 42, 'rope' : 1} # Dragon's inventory dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby'] # Call functions playerInventory = addToInventory(playerInventory, dragonLoot) displayInventory(playerInventory)
python
import requests url = 'https://images-api.nasa.gov/search?q=Ilan%20Ramon' image_metadata_url= 'https://images-assets.nasa.gov/image/{0}/metadata.json' #KSC-03pd2975/metadata.json' # params = dict( # origin='Chicago,IL', # destination='Los+Angeles,CA', # waypoints='Joplin,MO|Oklahoma+City,OK', # sensor='false' # ) resp = requests.get(url=url) data = resp.json() for item in data['collection']['items']: item_nasa_id = item['data'][0]['nasa_id'] item_href = item['links'][0]['href'] image_metadata = requests.get(url=image_metadata_url.format(item['data'][0]['nasa_id'])) file_type = str(image_metadata.json()['File:FileSize']).split(' ')[1] if file_type != "MB": #kb file_zise = int(str(image_metadata.json()['File:FileSize']).split(' ')[0]) if file_zise > 1000: print(file_zise) print(item_nasa_id) print(item_href)
python
import multiprocessing import os from argparse import ArgumentParser from pathlib import Path import torch from nflows import distributions, transforms from pyprojroot import here from pytorch_lightning import Trainer, seed_everything from pytorch_lightning.loggers import WandbLogger from torch.utils.data import DataLoader, TensorDataset from src.data.toy import get_bivariate_data from src.models.dists import get_base_dist from src.models.flows import Gaussianization2D from src.models.gaussianization import (get_marginalization_transform, get_rotation) from src.viz.bivariate import plot_2d_joint, plot_2d_joint_probs device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") root = here(project_files=[".here"]) home = str(Path.home()) save_path = Path(root).joinpath("reports/figures/experiments/bivariate") def main(args): # ======================= # Initialize Logger # ======================= wandb_logger = WandbLogger(project=args.wandb_project, entity=args.wandb_entity) wandb_logger.experiment.config.update(args) seed_everything(args.seed) X_data = get_bivariate_data( dataset=args.dataset, n_samples=args.n_train, noise=args.noise, seed=args.seed ) X_val = get_bivariate_data( dataset=args.dataset, n_samples=args.n_valid, noise=args.noise, seed=args.seed + 1, ) n_features = 2 # plot data samples plot_2d_joint( X_data, color="blue", label="Real Data", wandb_logger=wandb_logger.experiment, log_name="samples_real", # save=str(save_path.joinpath(f"{args.dataset}_samples_real.png")), ) # get number of layers layers = [] if args.init_rot: # initialize with rotation layer layers.append( get_rotation( n_features=n_features, num_householder=args.num_householder, identity_init=args.identity, rotation=args.rotation, ) ) # loop through layers for _ in range(args.n_layers): # marginal transform layers.append( get_marginalization_transform( n_features=n_features, squash=args.squash, num_bins=args.n_bins, tails=args.tails, tail_bound=args.tail_bound, identity_init=args.identity, ) ) # rotation layers.append( get_rotation( n_features=n_features, num_householder=args.num_householder, identity_init=args.identity, rotation=args.rotation, ) ) # get marginal transformation gauss_flows = transforms.CompositeTransform(layers) # createval_loader # initialize NF trainer gf_model = Gaussianization2D( gauss_flows, base_distribution=get_base_dist(n_features), hparams=args ) # plot initial latent space with torch.no_grad(): z = gf_model.model.transform_to_noise(torch.Tensor(X_data)) plot_2d_joint( z.numpy(), color="green", label="Latent Space", wandb_logger=wandb_logger.experiment, log_name="latent_init", # save=str(save_path.joinpath(f"{args.dataset}_samples_real.png")), ) # ==================================== # DATA # ==================================== X_data, X_val = torch.FloatTensor(X_data), torch.FloatTensor(X_val) train_dataset, val_dataset = TensorDataset(X_data), TensorDataset(X_val) train_loader = DataLoader( train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=multiprocessing.cpu_count(), pin_memory=True, ) val_loader = DataLoader( val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=multiprocessing.cpu_count(), pin_memory=True, ) # ==================================== # TRAINING # ==================================== trainer = Trainer(max_epochs=args.n_epochs, gpus=1, logger=wandb_logger) trainer.fit(gf_model, train_loader, val_loader) # ==================================== # PLOTS # ==================================== with torch.no_grad(): # LATENT SPACE z = gf_model.model.transform_to_noise(X_data) plot_2d_joint( z.detach().numpy(), color="green", label="Latent Space", wandb_logger=wandb_logger.experiment, log_name="latent_trained", # save=str(save_path.joinpath("latent_trained.png")), ) # PROBABILITIES X_logprob = gf_model.model.log_prob(X_data) plot_2d_joint_probs( X_data.detach().numpy(), probs=X_logprob.numpy(), wandb_logger=wandb_logger.experiment, log_name="log_probs", # save=str(save_path.joinpath("latent_trained.png")), ) plot_2d_joint_probs( X_data.detach().numpy(), probs=X_logprob.exp().numpy(), wandb_logger=wandb_logger.experiment, log_name="probs", # save=str(save_path.joinpath("latent_trained.png")), ) # SAMPLING with torch.no_grad(): X_approx = gf_model.model.sample(args.n_samples) plot_2d_joint( X_approx.numpy(), color="red", label="Gen. Samples", wandb_logger=wandb_logger.experiment, log_name="samples_gen", # save=str(save_path.joinpath("samples_gen.png")), ) if __name__ == "__main__": parser = ArgumentParser(add_help=False) # ====================== # Data parameters # ====================== parser.add_argument( "--dataset", type=str, default="rbig", help="2D Dataset", ) parser.add_argument( "--n-train", type=int, default=5_000, help="Number of training samples", ) parser.add_argument( "--n-valid", type=int, default=500, help="Number of validation samples", ) parser.add_argument( "--noise", type=float, default=0.10, help="Noise level", ) # ====================== # Transform Params # ====================== parser.add_argument( "--init-rot", type=int, default=1, help="Init rotation", ) parser.add_argument( "--n-layers", type=int, default=3, help="Number of layers", ) parser.add_argument( "--squash", type=int, default=0, help="Number of bins for spline transformation", ) parser.add_argument( "--n-bins", type=int, default=10, help="Number of bins for spline transformation", ) parser.add_argument( "--tail-bound", type=float, default=10.0, help="Number of bins for spline transformation", ) parser.add_argument( "--tails", type=str, default="linear", help="tails", ) parser.add_argument( "--identity", type=int, default=1, help="Initialize with identity", ) parser.add_argument( "--rotation", type=str, default="pca", help="Rotation layer", ) parser.add_argument( "--num-householder", type=int, default=2, help="Number of householder matrices", ) # ====================== # Training Params # ====================== parser.add_argument( "--lr", type=float, default=1e-2, help="Learning Rate", ) parser.add_argument( "--batch-size", type=int, default=50, help="Batch size", ) parser.add_argument( "--n-epochs", type=int, default=100, help="Number of epochs for training", ) # ====================== # VIZ Params # ====================== parser.add_argument( "--n-samples", type=int, default=5_000, help="Number of samples", ) # ====================== # Testing # ====================== parser.add_argument( "-sm", "--smoke-test", action="store_true", help="to do a smoke test without logging", ) parser.add_argument( "--seed", type=int, default=123, help="Seed for project", ) # ====================== # Logger Parameters # ====================== parser.add_argument("--wandb-entity", type=str, default="emanjohnson91") parser.add_argument("--wandb-project", type=str, default="rbig20-2d") args = parser.parse_args() if args.smoke_test: os.environ["WANDB_MODE"] = "dryrun" args.n_epochs = 5 args.n_train = 100 main(args)
python
# Generated by Django 2.2.5 on 2019-09-07 04:47 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('bhs', '0002_group_pos'), ] operations = [ migrations.AlterField( model_name='group', name='chapters', field=models.CharField(blank=True, editable=False, help_text='\n The denormalized chapter group.', max_length=255), ), migrations.AlterField( model_name='group', name='participants', field=models.CharField(blank=True, default='', editable=False, help_text='Director(s) or Members (listed TLBB)', max_length=255), ), migrations.AlterField( model_name='group', name='pos', field=models.IntegerField(blank=True, editable=False, help_text='\n The number of active performers.', null=True), ), ]
python
# Authors: # Loic Gouarin <loic.gouarin@polytechnique.edu> # Benjamin Graille <benjamin.graille@universite-paris-saclay.fr> # Thibaut Van Hoof <thibaut.vanhoof@cenaero.be> # # License: BSD 3 clause from .model import ModelWidget from .test_case import TestCaseWidget from .lb_scheme import LBSchemeWidget from .stability import StabilityWidget from .simulation import SimulationWidget from .parametric_study import ParametricStudyWidget from .post_treatment import PostTreatmentWidget from .pylbmwidget import out from .debug import debug, debug_func from . import responses
python
""" Пул воркеров. Полное управление и контроль воркерами. """ import logging import threading from functools import partial from multiprocessing import Pool, cpu_count, Queue, Process logger = logging.getLogger(__name__) class Worker(Process): """ Свой процесс. Тут мы вызываем команду. """ def __init__(self, queue, pk, *args, **kwargs): super(Worker, self).__init__(*args, **kwargs) self.queue = queue self.pk = pk def run(self): logger.info('Worker `{}` started.'.format(self.pk)) for data in iter(self.queue.get, None): try: command, args, kwargs = data.get('command', None), data.get('args', []), data.get('kwargs', {}) if command: logger.info('Worker `{}`. Start task:`{}`, args: `{}`, kwargs: `{}`.'.format( self.pk, command, args, kwargs )) result = command(*args, **kwargs) logger.info('Worker `{}`. End task:`{}`, args: `{}`, kwargs: `{}, result: `{}`.'.format( self.pk, command, args, kwargs, result )) else: logger.error('Worker `{}` error. Command not found in `{}`.'.format(self.pk, data)) except: logger.error( 'Exception for worker `{}` command: `{}`. More information: '.format(self.pk, data), exc_info=True ) logger.info('Worker `{}` finished.'.format(self.pk)) class NewPoolWorkers(object): """ Ручной пул воркеров. Простая общая очередь на несколько воркеров. """ def __init__(self, count=None): self.__count_workers = count if count else cpu_count() - 1 self.__queue = Queue() self.__workers = { key + 1: Worker(self.__queue, key + 1) for key in range(count) } for key, val in self.__workers.items(): val.start() def apple_async(self, target, *args, **kwargs): """ Добавление задачи для асинхронного выполнения. :param target: Зачада. :type target: function """ logger.info('Add task for pool. Task: `{}`, args: `{}`, kwargs: `{}`.'.format(target, args, kwargs)) self.__queue.put({"command": target, "args": args, "kwargs": kwargs}) def close(self): """ Убиваем все воркеры и сам пул. Предварительно ждем завершения задачи. """ logger.info('Poll workers shutdown started.') for _ in self.__workers.keys(): self.__queue.put(None) for key, val in self.__workers.items(): val.join() val.terminate() logger.info('Pool workers shutdown finished.') class RenewableWorker(Process): """ Одноразовый воркер. """ def __init__(self, pk, end_callback=None, error_callback=None, *args, **kwargs): """ Одноразовый воркер. :param int pk: ID воркера. :param function end_callback: Функция, выполняющаяся после успешного выполнения задачи. :param function error_callback: Функция, выполняющаяся после ошибки в задаче. """ super(RenewableWorker, self).__init__(*args, **kwargs) self.pk = pk self.end_callback = end_callback self.error_callback = error_callback def run(self): logger.info('Worker `{}` start.'.format(self.pk)) try: result = super(RenewableWorker, self).run() except Exception as e: logger.error('Worker `{}` exception.'.format(self.pk), exc_info=True) return self.error_callback(e) if self.error_callback else None logger.info('Worker `{}` end.'.format(self.pk)) return self.end_callback(result) if self.end_callback else result class PollRenewableWorkers(object): """ Пул возобновляемых воркеров. На каждую задачу создается процесс, после выполнения задачи процесс грохается. """ def __init__(self, count=None): self.__count_workers = count if count else cpu_count() - 1 self.__workers = {} def apple_async(self, target, end_callback=None, error_callback=None, *args, **kwargs): """ Добавление задачи для асинхронного выполнения. :param target: Зачада. :param end_callback: Функция, которая выполнится после успешного завершения задачи. :param error_callback: Функция, которая выполнится после ошибки во время задачи. :type target: function :type end_callback: function :type error_callback: function """ process = RenewableWorker( self.__create_pk(), end_callback=end_callback, error_callback=error_callback, target=target, args=args, kwargs=kwargs ) self.__workers[process.pk] = process self.__workers[process.pk].start() def close(self): """ Завершает все процессы безопасно. """ for key, val in self.__workers.items(): logger.info("Worker `{}` served his own. It's time to retire.".format(key)) val.terminate() val.join() logger.info("Worker `{}` retired. Bye Bye.".format(key)) def __create_pk(self): """ Формирует и возвращает PK воркера. :return: PK для нового воркера. :rtype: int """ pks = sorted(list(self.__workers.keys()), reverse=True) return pks[0] + 1 if pks else 1 class PoolWorkers(object): """ Пул воркеров, с которым работаем. Стандартный пул, без наработок. """ def __init__(self, count=None): self.__count_workers = count if count else cpu_count() - 1 self.__pool = Pool(self.__count_workers) @property def state(self): try: return self.__pool._state except Exception as e: logger.error(e, exc_info=True) return None def apple_async(self, target, end_callback=None, error_callback=None, *args, **kwargs): """ Добавление задачи для асинхронного выполнения. :param target: Зачада. :param end_callback: Функция, которая выполнится после успешного завершения задачи. :param error_callback: Функция, которая выполнится после ошибки во время задачи. :type target: function :type end_callback: function :type error_callback: function """ self.__pool.apply_async(target, args=args, kwds=kwargs, callback=end_callback, error_callback=error_callback) def join(self): """ Ждем выполнения всех воркеров. """ return self.__pool.join() def close(self): """ Убиваем все воркеры и сам пул. Предварительно ждем завершения задачи. """ self.__pool.close() class Timer(threading.Thread): """ Свой таймер, отдельным потоком. Несмотря на GIL, должно работать. """ def __init__(self, handler, args=None, kwargs=None, interval=20 * 60, deffer=False): """ Делаем атрибуты для таймера. :param handler: Функция, которую надо вызывать каждые interval секунд. :param args: Позиционные аргументы для функции. :param kwargs: Именованные аргументы для функции. :param interval: Интервал, через который надо вызывать handler. :param deffer: Отложенный запуск. :type handler: func :type args: tuple :type kwargs: dict :type interval: int :type deffer: bool """ threading.Thread.__init__(self) self.__finished = threading.Event() self.__interval = float(interval) args, kwargs = args if args else (), kwargs if kwargs else {} self.__handler = partial(handler, *args, **kwargs) self.__deffer = deffer def set_interval(self, interval): """ Изменить интервал, на который будет засыпать поток. """ self.__interval = interval def shutdown(self): """ Останавливаем поток. """ self.__finished.set() def __run_deffer(self): """ Запускает отложенный таймер. Т.е. первый раз функция выполнится через interval секунд. """ while True: if self.__finished.isSet(): return # Спим, пока не пройдет интервал или сдохнет процесс. self.__finished.wait(self.__interval) self.__handler() def __run_normal(self): """ Запускает нормальный таймер. Т.е. первый раз функция выполнится сразу. """ while True: if self.__finished.isSet(): return self.__handler() # Спим, пока не пройдет интервал или сдохнет процесс. self.__finished.wait(self.__interval) def run(self): """ Сам запуск задачи. """ logger.info('Start timer target `{}` interval `{}`'.format(self.__handler, self.__interval)) while True: try: if self.__deffer: self.__run_deffer() else: self.__run_normal() except: logger.error( 'In timer exception target `{}` interval `{}`.'.format(self.__handler, self.__interval), exc_info=True ) if self.__finished.isSet(): break if self.__finished.isSet(): break logger.info('Timer target `{}` interval `{}` rerun.'.format(self.__handler, self.__interval)) logger.info('End timer target `{}` interval `{}`.'.format(self.__handler, self.__interval))
python
import sys import tempfile from textwrap import dedent import _pytest import pytest import yaml from mock import Mock from mock import patch from tavern.core import run from tavern.schemas.extensions import validate_file_spec from tavern.testutils.helpers import validate_pykwalify from tavern.testutils.helpers import validate_regex, validate_content from tavern.testutils.pytesthook.item import YamlItem from tavern.util import exceptions from tavern.util.dict_util import _check_parsed_values, format_keys class FakeResponse: def __init__(self, text): self.text = text self.headers = dict(test_header=text) class TestRegex: def test_regex_match(self): response = FakeResponse("abchelloabc") matched = validate_regex(response, "(?P<greeting>hello)") assert "greeting" in matched["regex"] def test_regex_no_match(self): response = FakeResponse("abchelloabc") with pytest.raises(AssertionError): validate_regex(response, "(?P<greeting>hola)") def test_regex_match_header(self): response = FakeResponse("abchelloabc") matched = validate_regex(response, "(?P<greeting>hello)", "test_header") assert "greeting" in matched["regex"] def test_regex_no_match_header(self): response = FakeResponse("abchelloabc") with pytest.raises(AssertionError): validate_regex(response, "(?P<greeting>hola)", "test_header") class TestRunAlone: def test_run_calls_pytest(self): """This should just return from pytest.main()""" with patch("tavern.core.pytest.main") as pmock: run("abc") assert pmock.called def test_normal_args(self): with patch("tavern.core.pytest.main") as pmock: run( **{ "tavern_global_cfg": None, "in_file": "kfdoskdof", "tavern_http_backend": "requests", "tavern_mqtt_backend": "paho-mqtt", "tavern_strict": True, } ) assert pmock.called def test_extra_args(self): with pytest.warns(FutureWarning): with patch("tavern.core.pytest.main") as pmock: run( **{ "tavern_global_cfg": None, "in_file": "kfdoskdof", "tavern_http_backend": "requests", "tavern_mqtt_backend": "paho-mqtt", "tavern_strict": True, "gfg": "2efsf", } ) assert pmock.called class TestTavernRepr: @pytest.fixture(name="fake_item") def fix_fake_item(self, request): item = YamlItem( name="Fake Test Item", parent=request.node, spec={}, path="/tmp/hello" ) return item def _make_fake_exc_info(self, exc_type): # Copied from pytest tests class FakeExcinfo(_pytest._code.ExceptionInfo): pass try: raise exc_type except exc_type: excinfo = FakeExcinfo(sys.exc_info()) return excinfo def test_not_called_for_normal_exception(self, fake_item): """Should call normal pytest repr_info""" fake_info = self._make_fake_exc_info(RuntimeError) with patch("tavern.testutils.pytesthook.item.ReprdError") as rmock: fake_item.repr_failure(fake_info) assert not rmock.called def test_not_called_if_flag_not_enabled(self, fake_item): """Not called by default for tavern exceptions""" fake_info = self._make_fake_exc_info(exceptions.BadSchemaError) with patch("tavern.testutils.pytesthook.item.ReprdError") as rmock: fake_item.repr_failure(fake_info) assert not rmock.called def test_not_called_for_badschema_tavern_exception_(self, fake_item): """Enable ini flag, should be called""" fake_info = self._make_fake_exc_info(exceptions.BadSchemaError) with patch.object(fake_item.config, "getini", return_value=True): with patch("tavern.testutils.pytesthook.item.ReprdError") as rmock: fake_item.repr_failure(fake_info) assert not rmock.called def test_called_for_tavern_exception_ini(self, fake_item): """Enable ini flag, should be called""" fake_info = self._make_fake_exc_info(exceptions.InvalidSettingsError) with patch.object(fake_item.config, "getini", return_value=True): with patch("tavern.testutils.pytesthook.item.ReprdError") as rmock: fake_item.repr_failure(fake_info) assert rmock.called def test_called_for_tavern_exception_cli(self, fake_item): """Enable cli flag, should be called""" fake_info = self._make_fake_exc_info(exceptions.InvalidSettingsError) with patch.object(fake_item.config, "getoption", return_value=True): with patch("tavern.testutils.pytesthook.item.ReprdError") as rmock: fake_item.repr_failure(fake_info) assert rmock.called @pytest.fixture(name="nested_response") def fix_nested_response(): class response_content(object): content = { "top": { "Thing": "value", "float": 0.1, "nested": {"doubly": {"inner_value": "value", "inner_list": [1, 2, 3]}}, }, "an_integer": 123, "a_string": "abc", "a_bool": True, } def json(self): return self.content return response_content() class TestContent: def test_correct_jmes_path(self, nested_response): comparisons = [ {"jmespath": "top.Thing", "operator": "eq", "expected": "value"}, {"jmespath": "an_integer", "operator": "eq", "expected": 123}, { "jmespath": "top.nested.doubly.inner_list", "operator": "type", "expected": "list", }, ] validate_content(nested_response, comparisons) assert True def test_incorrect_jmes_path(self, nested_response): comparisons = [{"jmespath": "userId", "operator": "eq", "expected": 1}] with pytest.raises(exceptions.JMESError): validate_content(nested_response, comparisons) def test_incorrect_value(self, nested_response): comparisons = [{"jmespath": "a_bool", "operator": "eq", "expected": False}] with pytest.raises(exceptions.JMESError): validate_content(nested_response, comparisons) class TestPykwalifyExtension: def test_validate_schema_correct(self, nested_response): correct_schema = dedent( """ type: map required: true mapping: top: type: map required: true mapping: Thing: type: str float: type: float nested: type: any an_integer: type: int a_string: type: str a_bool: type: bool """ ) validate_pykwalify( nested_response, yaml.load(correct_schema, Loader=yaml.SafeLoader) ) def test_validate_schema_incorrect(self, nested_response): correct_schema = dedent( """ type: seq required: true sequence: - type: str """ ) with pytest.raises(exceptions.BadSchemaError): validate_pykwalify( nested_response, yaml.load(correct_schema, Loader=yaml.SafeLoader) ) class TestCheckParseValues(object): @pytest.mark.parametrize( "item", [[134], {"a": 2}, yaml, yaml.load, yaml.SafeLoader] ) def test_warns_bad_type(self, item): with patch("tavern.util.dict_util.logger.warning") as wmock: _check_parsed_values("{fd}", {"fd": item}) assert wmock.called_with( "Formatting 'fd' will result in it being coerced to a string (it is a {})".format( type(item) ) ) @pytest.mark.parametrize("item", [1, "a", 1.3, format_keys("{s}", dict(s=2))]) def test_no_warn_good_type(self, item): with patch("tavern.util.dict_util.logger.warning") as wmock: _check_parsed_values("{fd}", {"fd": item}) assert not wmock.called class TestCheckFileSpec(object): def _wrap_test_block(self, dowith): validate_file_spec({"files": dowith}, Mock(), Mock()) def test_string_valid(self): with tempfile.NamedTemporaryFile() as tfile: self._wrap_test_block(tfile.name) def test_dict_valid(self): with tempfile.NamedTemporaryFile() as tfile: self._wrap_test_block({"file_path": tfile.name}) def test_nonexistsnt_string(self): with pytest.raises(exceptions.BadSchemaError): self._wrap_test_block("kdsfofs") def nonexistent_dict(self): with pytest.raises(exceptions.BadSchemaError): self._wrap_test_block({"file_path": "gogfgl"}) def extra_keys_dict(self): with pytest.raises(exceptions.BadSchemaError): self._wrap_test_block({"file_path": "gogfgl", "blop": 123})
python
import copy import os import json from hpbandster.core.base_iteration import Datum class Run(object): """ Not a proper class, more a 'struct' to bundle important information about a particular run """ def __init__(self, config_id, budget, loss, info, time_stamps, error_logs): self.config_id = config_id self.budget = budget self.error_logs = error_logs self.loss = loss self.info = info self.time_stamps = time_stamps def __repr__(self): return(\ "config_id: %s\t"%(self.config_id,) + \ "budget: %f\t"%self.budget + \ "loss: %s\n"%self.loss + \ "time_stamps: {submitted} (submitted), {started} (started), {finished} (finished)\n".format(**self.time_stamps) + \ "info: %s\n"%self.info ) def __getitem__ (self, k): """ in case somebody wants to use it like a dictionary """ return(getattr(self, k)) def extract_HB_learning_curves(runs): """ function to get the hyperband learning curves This is an example function showing the interface to use the HB_result.get_learning_curves method. Parameters: ----------- runs: list of HB_result.run objects the performed runs for an unspecified config Returns: -------- list of learning curves: list of lists of tuples An individual learning curve is a list of (t, x_t) tuples. This function must return a list of these. One could think of cases where one could extract multiple learning curves from these runs, e.g. if each run is an independent training run of a neural network on the data. """ sr = sorted(runs, key=lambda r: r.budget) return([[(r.budget, r.loss) for r in sr],]) class json_result_logger(object): """ convenience logger for 'semi-live-results' Logger that writes job results into two files (configs.json and results.json). Both files contain propper json objects in each line. This version (v1) opens and closes the files for each result. This might be very slow if individual runs are fast and the filesystem is rather slow (e.g. a NFS). """ def __init__(self, directory, overwrite=False): """ Parameters: ----------- directory: string the directory where the two files 'configs.json' and 'results.json' are stored overwrite: bool In case the files already exist, this flag controls the behavior: > True: The existing files will be overwritten. Potential risk of deleting previous results > False: A FileEvistsError is raised and the files are not modified. """ os.makedirs(directory, exist_ok=True) self.config_fn = os.path.join(directory, 'configs.json') self.results_fn = os.path.join(directory, 'results.json') try: with open(self.config_fn, 'x') as fh: pass except FileExistsError: if overwrite: with open(self.config_fn, 'w') as fh: pass else: raise FileExistsError('The file %s already exists.'%self.config_fn) except: raise try: with open(self.results_fn, 'x') as fh: pass except FileExistsError: if overwrite: with open(self.results_fn, 'w') as fh: pass else: raise FileExistsError('The file %s already exists.'%self.config_fn) except: raise self.config_ids = set() def new_config(self, config_id, config, config_info): if not config_id in self.config_ids: self.config_ids.add(config_id) with open(self.config_fn, 'a') as fh: fh.write(json.dumps([config_id, config, config_info])) fh.write('\n') def __call__(self, job): if not job.id in self.config_ids: #should never happen! TODO: log warning here! self.config_ids.add(job.id) with open(self.config_fn, 'a') as fh: fh.write(json.dumps([job.id, job.kwargs['config'], {}])) fh.write('\n') with open(self.results_fn, 'a') as fh: fh.write(json.dumps([job.id, job.kwargs['budget'], job.timestamps, job.result, job.exception])) fh.write("\n") def logged_results_to_HB_result(directory): """ function to import logged 'live-results' and return a HB_result object You can load live run results with this function and the returned HB_result object gives you access to the results the same way a finished run would. """ data = {} time_ref = float('inf') budget_set = set() with open(os.path.join(directory, 'configs.json')) as fh: for line in fh: line = json.loads(line) if len(line) == 3: config_id, config, config_info = line if len(line) == 2: config_id, config, = line config_info = 'N/A' data[tuple(config_id)] = Datum(config=config, config_info=config_info) with open(os.path.join(directory, 'results.json')) as fh: for line in fh: config_id, budget,time_stamps, result, exception = json.loads(line) id = tuple(config_id) data[id].time_stamps[budget] = time_stamps data[id].results[budget] = result data[id].exceptions[budget] = exception budget_set.add(budget) time_ref = min(time_ref, time_stamps['submitted']) # infer the hyperband configuration from the data budget_list = sorted(list(budget_set)) HB_config = { 'eta' : None if len(budget_list) < 2 else budget_list[1]/budget_list[0], 'min_budget' : min(budget_set), 'max_budget' : max(budget_set), 'budgets' : budget_list, 'max_SH_iter': len(budget_set), 'time_ref' : time_ref } return(Result([data], HB_config)) class Result(object): """ Object returned by the HB_master.run function This class offers a simple API to access the information from a Hyperband run. """ def __init__ (self, HB_iteration_data, HB_config): self.data = HB_iteration_data self.HB_config = HB_config self._merge_results() def __getitem__(self, k): return(self.data[k]) def get_incumbent_id(self): """ Find the config_id of the incumbent. The incumbent here is the configuration with the smallest loss among all runs on the maximum budget! If no run finishes on the maximum budget, None is returned! """ tmp_list = [] for k,v in self.data.items(): try: # only things run for the max budget are considered res = v.results[self.HB_config['max_budget']] if not res is None: tmp_list.append((res['loss'], k)) except KeyError as e: pass except: raise if len(tmp_list) > 0: return(min(tmp_list)[1]) return(None) def get_incumbent_trajectory(self, all_budgets=True): """ Returns the best configurations over time Parameters: ----------- all_budgets: bool If set to true all runs (even those not with the largest budget) can be the incumbent. Otherwise, only full budget runs are considered Returns: -------- dict: dictionary with all the config IDs, the times the runs finished, their respective budgets, and corresponding losses """ all_runs = self.get_all_runs(only_largest_budget = not all_budgets) if not all_budgets: all_runs = list(filter(lambda r: r.budget==res.HB_config['max_budget'], all_runs)) all_runs.sort(key=lambda r: r.time_stamps['finished']) return_dict = { 'config_ids' : [], 'times_finished': [], 'budgets' : [], 'losses' : [], } current_incumbent = float('inf') incumbent_budget = -float('inf') for r in all_runs: if r.loss is None: continue if ((r.budget == incumbent_budget and r.loss < current_incumbent) or \ (r.budget > incumbent_budget)): current_incumbent = r.loss incumbent_budget = r.budget return_dict['config_ids'].append(r.config_id) return_dict['times_finished'].append(r.time_stamps['finished']) return_dict['budgets'].append(r.budget) return_dict['losses'].append(r.info['test_error'][0]) if current_incumbent != r.loss: r = all_runs[-1] return_dict['config_ids'].append(return_dict['config_ids'][-1]) return_dict['times_finished'].append(r.time_stamps['finished']) return_dict['budgets'].append(return_dict['budgets'][-1]) return_dict['losses'].append(return_dict['losses'][-1]) return (return_dict) def get_runs_by_id(self, config_id): """ returns a list of runs for a given config id The runs are sorted by ascending budget, so '-1' will give the longest run for this config. """ d = self.data[config_id] runs = [] for b in d.results.keys(): try: err_logs = d.exceptions.get(b, None) if d.results[b] is None: r = Run(config_id, b, None, None , d.time_stamps[b], err_logs) else: r = Run(config_id, b, d.results[b]['loss'], d.results[b]['info'] , d.time_stamps[b], err_logs) runs.append(r) except: raise runs.sort(key=lambda r: r.budget) return(runs) def get_learning_curves(self, lc_extractor=extract_HB_learning_curves, config_ids=None): """ extracts all learning curves from all run configurations Parameters: ----------- lc_extractor: callable a function to return a list of learning_curves. defaults to hpbanster.HB_result.extract_HP_learning_curves config_ids: list of valid config ids if only a subset of the config ids is wanted Returns: -------- dict a dictionary with the config_ids as keys and the learning curves as values """ config_ids = self.data.keys() if config_ids is None else config_ids lc_dict = {} for id in config_ids: runs = self.get_runs_by_id(id) lc_dict[id] = lc_extractor(runs) return(lc_dict) def get_all_runs(self, only_largest_budget=False): """ returns all runs performed Parameters: ----------- only_largest_budget: boolean if True, only the largest budget for each configuration is returned. This makes sense if the runs are continued across budgets and the info field contains the information you care about. If False, all runs of a configuration are returned """ all_runs = [] for k in self.data.keys(): runs = self.get_runs_by_id(k) if len(runs) > 0: if only_largest_budget: all_runs.append(runs[-1]) else: all_runs.extend(runs) return(all_runs) def get_id2config_mapping(self): """ returns a dict where the keys are the config_ids and the values are the actual configurations """ new_dict = {} for k, v in self.data.items(): new_dict[k] = {} new_dict[k]['config'] = copy.deepcopy(v.config) try: new_dict[k]['config_info'] = copy.deepcopy(v.config_info) except: pass return(new_dict) def _merge_results(self): """ hidden function to merge the list of results into one dictionary and 'normalize' the time stamps """ new_dict = {} for it in self.data: new_dict.update(it) for k,v in new_dict.items(): for kk, vv in v.time_stamps.items(): for kkk,vvv in vv.items(): new_dict[k].time_stamps[kk][kkk] = vvv - self.HB_config['time_ref'] self.data = new_dict def num_iterations(self): return(max([k[0] for k in self.data.keys()]) + 1) def get_fANOVA_data(self, config_space, budgets=None): import numpy as np import ConfigSpace as CS id2conf = self.get_id2config_mapping() if budgets is None: budgets = self.HB_config['budgets'] if len(budgets)>1: config_space.add_hyperparameter(CS.UniformFloatHyperparameter('budget', min(budgets), max(budgets), log=True)) hp_names = list(map( lambda hp: hp.name, config_space.get_hyperparameters())) all_runs = self.get_all_runs(only_largest_budget=False) all_runs=list(filter( lambda r: r.budget in budgets, all_runs)) X = [] y = [] for r in all_runs: if r.loss is None: continue config = id2conf[r.config_id]['config'] if len(budgets)>1: config['budget'] = r.budget config = CS.Configuration(config_space, config) X.append([config[n] for n in hp_names]) y.append(r.loss) return(np.array(X), np.array(y), config_space)
python
userColors = []
python
class AbstractRequest(object): opcode = -1 class AbstractRequestCodec(object): @staticmethod def decode(payload): raise NotImplementedError @staticmethod def encode(request): raise NotImplementedError
python
import cairo import math import random import sys import os sys.path.append(os.path.abspath('..')) from lib import palettes from lib import colors # Final image dimensions IMG_HEIGHT = 2000 IMG_WIDTH = int(IMG_HEIGHT * (16/9)) SPACING = 2 def line(ctx, y, line_interval, color, x_increment=(IMG_WIDTH // 40)): line_width = line_interval // 20 x = 0 ctx.move_to(x, y) nodes = [] while x < IMG_WIDTH: x += random.randint(x_increment // 2, x_increment) y_offset = random.randint(0, line_interval // 2 - SPACING) y_offset = y_offset if random.random() < 0.5 else -1 * y_offset nodes.append((x, y + y_offset)) ctx.line_to(x, y + y_offset) ctx.set_source_rgb(*color) ctx.set_line_width(line_width) ctx.stroke() for node in nodes: (node_x, node_y) = node r = random.randint(line_width * 2, line_width * 4) ctx.arc(node_x, node_y, r, 0, 2 * math.pi) ctx.set_source_rgb(*color) ctx.fill() # Ring around the node ctx.arc(node_x, node_y, r, 0, 2 * math.pi) ctx.set_source_rgb(*random.choice(colors.shades(color, 5))) ctx.set_line_width(line_width) ctx.stroke() def main(filename="output.png", palette=random.choice(palettes.PALETTES), lines=20): ims = cairo.ImageSurface(cairo.FORMAT_ARGB32, IMG_WIDTH, IMG_HEIGHT) ims.set_fallback_resolution(300.0, 300.0) ctx = cairo.Context(ims) # Background ctx.rectangle(0, 0, IMG_WIDTH, IMG_HEIGHT) ctx.set_source_rgb(*palettes.hex_to_tuple(palette['background'])) ctx.fill() line_interval = IMG_HEIGHT // lines for y in range(line_interval, IMG_HEIGHT, line_interval): color = palettes.hex_to_tuple(random.choice(palette['colors'])) line(ctx, y, line_interval, color) ims.write_to_png(filename) if __name__ == "__main__": for idx, l in enumerate([5, 10, 15, 20, 40]): main(filename="output-{}.png".format(idx), palette=random.choice(palettes.PALETTES), lines=l)
python
from typing import Callable, Sequence, Union, TYPE_CHECKING import io from enum import Enum if TYPE_CHECKING: from .expressions import ( ReadSubstitute, WriteSubstitute, ) from .arguments import UncompiledArgument PublicArgument = Union[ str, int, float, 'ReadSubstitute', 'WriteSubstitute', ] PublicKeywordArgument = Union[ bool, str, int, float, 'ReadSubstitute', 'WriteSubstitute', ] InternalArgument = Union[str, 'UncompiledArgument'] ShalchemyFile = Union[ str, io.IOBase, ] ShalchemyOutputStream = Union[ io.IOBase, int, ] KeywordArgumentRenderer = Callable[[str, PublicKeywordArgument], Sequence[str]] class ParenthesisKind(Enum): NEVER = 1 ALWAYS = 2 COMPOUND_ONLY = 3
python
from main.game.ConvertStringArray import historyToArray from main.game.verifyCheck import verificarCheck def especialMove(allpieces,piece,history): history = historyToArray(history) if history != ['']: if piece[0] == 'p': return EnPassant(piece,history) elif piece[0] == 'k': return Castles(allpieces,piece,history) def Castles(allPieces,piece,history): rookRightMoved = False rookLeftMoved = False isMoved = False #checa se o rei já se mexeu for moved in history: if moved: if piece[0] == moved[0]: if piece[1] == moved[1]: isMoved = True #checa se as torres já se mexeram if isMoved == False: for rookMoved in history: if rookMoved: if rookMoved[0] == 'r': if rookMoved[1] == piece[1]: if rookMoved[3]=='7': rookRightMoved = True elif rookMoved[3] == '0': if rookMoved[1] == piece[1]: rookLeftMoved = True myPossibleCastles = '' if isMoved == False: if rookRightMoved == False: if allPieces[int(piece[2])][int(piece[3])+1] == '----': if allPieces[int(piece[2])][int(piece[3])+2] == '----': moves = [piece] moves.append(piece[0]+piece[1]+piece[2]+str(int(piece[3])+1)) moves.append(piece[0]+piece[1]+piece[2]+str(int(piece[3])+2)) realMoves = [piece,moves] movimentosSemCheck = verificarCheck(allPieces,realMoves) if realMoves[1] == movimentosSemCheck[slice(1,4)]: myPossibleCastles = piece[0]+piece[1]+piece[2]+str(int(piece[3])+2) if rookLeftMoved == False: if allPieces[int(piece[2])][int(piece[3])-1] == '----': if allPieces[int(piece[2])][int(piece[3])-2] == '----': if allPieces[int(piece[2])][int(piece[3])-3] == '----': moves = [piece] moves.append(piece[0]+piece[1]+piece[2]+str(int(piece[3])-1)) moves.append(piece[0]+piece[1]+piece[2]+str(int(piece[3])-2)) realMoves = [piece,moves] movimentosSemCheck = verificarCheck(allPieces,realMoves) if realMoves[1] == movimentosSemCheck[slice(1,4)]: myPossibleCastles += piece[0]+piece[1]+piece[2]+str(int(piece[3])-2) return myPossibleCastles def EnPassant(piece,history): lastMove = history[len(history) -2] #exemplo de resultado pb64pb44 #checa se o último movimento foi de um peão if lastMove[4] == 'p': #checa se o peão na casa 6 agora está na casa 4. No caso checa se o peão ao se mover ele mexeu dois quadrados, indicando que era o primeiro movimento dele no jogo. Exemplo pb66 pb46 if int(lastMove[2]) == int(lastMove[6])+2 or int(lastMove[2]) == int(lastMove[6])-2: color = piece[1] if color == 'w': if piece[2] == '4': return(piece[0]+piece[1]+str(int(lastMove[6])+1)+lastMove[7]) if color =='b': if piece[2] == '3': return(piece[0]+piece[1]+str(int(lastMove[6])-1)+lastMove[7])
python
eps = 10e-7
python
from ..geometry import np import math class Quaternion(object): def __init__(self, coeffs=[0., 0., 0., 1.]): self._coeffs = np.array(coeffs) def vec(self): return self._coeffs[0:3] def coeffs(self): return self._coeffs def normalize(self): norm = np.linalg.norm(self._coeffs) self._coeffs = self._coeffs/norm def normalized(self): norm = np.linalg.norm(self._coeffs) coeffs = self._coeffs/norm return Quaternion(coeffs) @property def w(self): return self._coeffs[3] @w.setter def w(self, value): self._coeffs[3] = value @property def x(self): return self._coeffs[0] @x.setter def x(self, value): self._coeffs[0] = value @property def y(self): return self._coeffs[1] @y.setter def y(self, value): self._coeffs[1] = value @property def z(self): return self._coeffs[2] @z.setter def z(self, value): self._coeffs[2] = value def conjugate(self): return Quaternion([-self.x(), -self.y(), -self.z(), self.w()]) def to_rotation_matrix(self): qx, qy, qz, qw = self._coeffs sqw = qw * qw sqx = qx * qx sqy = qy * qy sqz = qz * qz invs = 1. / (sqx + sqy + sqz + sqw) m00 = ( sqx - sqy - sqz + sqw) * invs m11 = (-sqx + sqy - sqz + sqw) * invs m22 = (-sqx - sqy + sqz + sqw) * invs qxy = qx * qy qzw = qw * qz m10 = 2. * (qxy + qzw) * invs m01 = 2. * (qxy - qzw) * invs qxz = qx * qz qyw = qy * qw m20 = 2.0 * (qxz - qyw) * invs m02 = 2.0 * (qxz + qyw) * invs qyz = qy * qz qxw = qx * qw m21 = 2. * (qyz + qxw) * invs m12 = 2. * (qyz - qxw) * invs return np.array([[m00, m01, m02], [m10, m11, m12], [m20, m21, m22]]) @staticmethod def from_rotation_matrix(rotmat): m00, m01, m02 = rotmat[0] m10, m11, m12 = rotmat[1] m20, m21, m22 = rotmat[2] trace = m00 + m11 + m22 if (trace > 0.): S = math.sqrt(trace + 1.0) * 2. qw = 0.25 * S qx = (m21 - m12) / S qy = (m02 - m20) / S qz = (m10 - m01) / S return Quaternion([qx, qy, qz, qw]) elif (m00 > m11 and m00 > m22): S = math.sqrt(1. + m00 - m11 - m22) * 2 qw = (m21 - m12) / S qx = 0.25 * S qy = (m01 + m10) / S qz = (m02 + m20) / S return Quaternion([qx, qy, qz, qw]) elif (m11 > m22): S = math.sqrt(1.0 + m11 - m00 - m22) * 2 qw = (m10 - m20) / S qx = (m01 + m10) / S qy = 0.25 * S qz = (m12 + m21) / S return Quaternion([qx, qy, qz, qw]) else: S = math.sqrt(1.0 + m22 - m00 - m11) * 2 qw = (m10 - m01) / S qx = (m02 + m20) / S qy = (m12 + m21) / S qz = 0.25 * S return Quaternion([qx, qy, qz, qw])
python
from django.http import HttpResponse from django.shortcuts import render from webcam_manager import * import time webcam_manager = WebcamManager() encryption_manager = EncryptionManager() webcam_manager.start() def make_aes_response(response_data): response = encryption_manager.get_aes_packet(response_data) if response == None: return HttpResponse(status=500) else: return HttpResponse(response, content_type='application/octet-stream') def index(request): try: image_data = webcam_manager.get(0) # get most recent image return make_aes_response(image_data) except: return HttpResponse(status=500) def get_zip(request): try: zip_data, zip_name = webcam_manager.get_zip_of_all_files() return make_aes_response(zip_data) except: return HttpResponse(status=500)
python
import json import numpy as np import os from env_rl import EnvRL from pathlib import Path def score_rl_solution(submission_filepath='example_output_rl.json', final_submission=False): base_path = Path(__file__).parent.absolute() test_data_instance_path = base_path.joinpath('data/valid/instances') test_data_adj_path = base_path.joinpath('data/valid/adjs') f = open(submission_filepath) submission = json.load(f) scores = [] rewardss = [] pens = [] n_feas_sols = 0 for instance_name in submission.keys(): x_path = os.path.join(test_data_instance_path, instance_name + '.csv') adj_path = os.path.join(test_data_adj_path, 'adj-' + instance_name + '.csv') seed = submission[instance_name]['seed'] env = EnvRL(from_file=True, seed=seed, x_path=x_path, adj_path=adj_path) instance = submission[instance_name] if final_submission: n_tours = len(instance['tours'].keys()) assert n_tours == 100, f'each instance must have 100 tours, but found {n_tours} in {instance_name}' for tour_name in instance['tours'].keys(): sol = instance['tours'][tour_name] for node in sol[1:]: env.step(node) rewards = env.get_collected_rewards() pen = env.get_incurred_penalties() feas = env.get_feasibility() assert tour_name == env.get_sim_name(), f'submission {tour_name} in {instance_name} is in the wrong order.' score = rewards + pen n_feas_sols += float(feas) scores.append(score) rewardss.append(rewards) pens.append(pen) env.reset() avg_score = np.mean(scores) avg_rewards = np.mean(rewardss) avg_pen = np.mean(pens) print (avg_score, avg_rewards, avg_pen) return np.round(avg_score, 5) if __name__ == '__main__': print(f'Your submission scored {score_rl_solution():.05f}')
python
# Reference: https://leetcode.com/problems/number-of-islands/ # Approach: # 1. Get a list of all locations that have 1 # 2. Iterate through this list and call DFS for every unmarked / unvisited 1 and mark all it's reachable locations with the current_island_count # 3. The final value of current_island_count is the answer # Status: basic test case working. # Issue with: # obj.numIslands([["1","1","0","0","0"],["1","1","0","0","0"],["0","0","1","0","0"],["0","0","0","1","1"]]) # o: 3 # When submitting, leetcode shows my solution as 2, whereas execution in jupyter or even leetcode interactive shows answer as 3? # Strange! post on forums and understand # optimization: land_locations can be a dictionary instead of a list and then you use the sane for visited_dict functionality and find and remove keys (land_locations) whenever they are marked. So you do not have to go through the whole thing of retrieving next land_location and checking whether it's visited. You can be sure that land_locations at all times maintains the unvisited locations due to the removal procedure of newighbours class Solution(object): grid = [] visited_dict = {} rowLen = 0 colLen = 0 islandCount = 0 def numIslands(self, grid): """ :type grid: List[List[str]] :rtype: int """ self.grid = grid # islandCount = 0 land_locations = [] self.rowLen = len(grid) self.colLen = len(grid[0]) # considering symmetric matrix. Also add defensive condition on index 0 access # TODO: can be wrapped in a separate function called get land locations for row in range(self.rowLen): for col in range(self.colLen): if grid[row][col ] == "1": land_locations.append((row, col)) for (row, col) in land_locations: # if already visited, do not recurse on marking if (row, col) not in self.visited_dict: self.islandCount += 1 # mark self and neighbours self.markNeighbours(row, col, self.islandCount) return self.islandCount def markNeighbours(self, row, col, islandCountMarker): # first mark self self.visited_dict[(row, col)] = islandCountMarker # check whether neighbours exist and mark if it's a piece of connected land # TODO: shouldn't this include top & left too? -- mostly it should. try to add and submit on leetcode # bottom if row + 1 < self.rowLen and self.grid[row + 1][col] == "1": self.markNeighbours(row + 1, col, islandCountMarker) # right if col + 1 < self.colLen and self.grid[row][col + 1] == "1": self.markNeighbours(row, col + 1, islandCountMarker) obj = Solution() # obj.numIslands([["1","1","1","1","0"],["1","1","0","1","0"],["1","1","0","0","0"],["0","0","0","0","0"]]) # Output: 1 # obj.numIslands([["1","1","0","0","0"],["1","1","0","0","0"],["0","0","1","0","0"],["0","0","0","1","1"]]) # Output: 3 # obj.numIslands([["1","0"],["0", "1"]]) # o: 2 # obj.numIslands([["1","1"],["1", "1"]]) # o: 1 # obj.numIslands([["1","0","0"],["1","0","0"],["1","1","1"]]) # o: 1 # obj.numIslands([["1","0","1"],["1","0","0"],["0","1","1"]]) # o: 3 # obj.numIslands([["1","0","1"],["1","0","1"],["1","1","0"]]) # o: 2 obj.numIslands([["1","1","0","0","0"],["1","1","0","0","0"],["0","0","1","0","0"],["0","0","0","1","1"]]) # o: 3 # Interesting Pointer: # When local variable islandCount was used, Test Case 2 had issues. Possibly because the local parameter was colliding. Strange but true. # Whereas, when class variable used, it worked perfectly. REM: When you have one value being updated by multiple objects or function calls (here recursive calls), use class variable # (switching between 2 & 3. Error only happened when string "1" used) # Possibly due to the way the for loop and recursive calls are interlocked.
python
from socket import *
python
from __future__ import unicode_literals import frappe import json from toolz.curried import compose, merge, map, filter @frappe.whitelist() def query(doctype, txt, searchfield, start, page_len, filters): station = filters.get("station") cond = ( " OR ".join( [ "so.initial_station = %(station)s", "so.final_station = %(station)s", "sots.station = %(station)s", ] ) if station else "1 = 1" ) return frappe.db.sql( """ SELECT DISTINCT so.name, so.vehicle, so.driver_name, so.driver FROM `tabShipping Order` AS so LEFT JOIN `tabShipping Order Transit Station` AS sots ON sots.parent = so.name WHERE ({cond}) AND ( so.docstatus = 1 AND so.name LIKE %(txt)s ) LIMIT %(start)s, %(page_len)s """.format( cond=cond, ), values={ "station": station, "txt": "%%%s%%" % txt, "start": start, "page_len": page_len, }, ) @frappe.whitelist() def get_history(name): logs = frappe.db.sql( """ SELECT sl.posting_datetime, sl.station, sl.activity, lo.on_load_no_of_packages, lo.off_load_no_of_packages FROM `tabShipping Log` AS sl LEFT JOIN `tabLoading Operation` AS lo ON lo.name = sl.loading_operation WHERE sl.shipping_order = %(shipping_order)s ORDER BY sl.posting_datetime """, values={"shipping_order": name}, as_dict=1, ) def get_message(log): activity = log.get("activity") if activity == "Operation": on_load = log.get("on_load_no_of_packages") off_load = log.get("off_load_no_of_packages") msg = ( " and ".join( filter( None, [ on_load and "Loaded {} packages".format(on_load), off_load and "Unloaded {} packages".format(off_load), ], ) ) or "Operation" ) return "{} at {}".format(msg, log.get("station"),) if activity == "Stopped": return "Stopped at {}".format(log.get("station")) if activity == "Moving": return "Moving to {}".format(log.get("station")) return activity def get_link(log): if log.get("loading_operation"): "#Form/Loading Operation/{}".format(log.get("loading_operation")) return "" def get_event(log): return { "datetime": log.get("posting_datetime"), "status": log.get("activity"), "message": get_message(log), "link": get_link(log), } return [get_event(x) for x in logs] def get_manifest_rows(shipping_order): return frappe.db.sql( """ SELECT lobo.booking_order, lobo.loading_unit, lobo.qty, SUM(lobo.no_of_packages) AS cur_no_of_packages, SUM(lobo.weight_actual) AS cur_weight_actual, GROUP_CONCAT(bofd.item_description SEPARATOR ', ') AS item_description, bo.destination_station, bo.consignor_name, bo.consignee_name, bo.no_of_packages, bo.weight_actual FROM `tabLoading Operation Booking Order` AS lobo LEFT JOIN `tabLoading Operation` AS lo ON lo.name = lobo.parent LEFT JOIN `tabBooking Order` AS bo ON bo.name = lobo.booking_order LEFT JOIN `tabBooking Order Freight Detail` AS bofd ON bofd.name = lobo.bo_detail WHERE lo.docstatus = 1 AND lobo.parentfield = 'on_loads' AND lo.shipping_order = %(shipping_order)s GROUP BY lobo.booking_order ORDER BY lo.name, lobo.idx """, values={"shipping_order": shipping_order}, as_dict=1, ) def get_freight_summary_rows(shipping_order): def get_amount(row): rate = row.get("rate") or 0 if row.get("based_on") == "Packages": return (row.get("cur_no_of_packages") or 0) * rate if row.get("based_on") == "Weight": return (row.get("cur_weight_actual") or 0) * rate return row.get("amount") or 0 freight_rows = frappe.db.sql( """ SELECT bo.name AS booking_order, bo.consignor_name, bo.consignee_name, bofd.item_description, SUM(lobo.no_of_packages) AS cur_no_of_packages, SUM(lobo.weight_actual) AS cur_weight_actual, bofd.based_on, bofd.rate FROM `tabLoading Operation Booking Order` AS lobo LEFT JOIN `tabLoading Operation` AS lo ON lo.name = lobo.parent LEFT JOIN `tabBooking Order` AS bo ON bo.name = lobo.booking_order LEFT JOIN `tabBooking Order Freight Detail` AS bofd ON bofd.name = lobo.bo_detail WHERE lo.docstatus = 1 AND lobo.parentfield = 'on_loads' AND lo.shipping_order = %(shipping_order)s GROUP BY lobo.name ORDER BY lo.name, lobo.idx """, values={"shipping_order": shipping_order}, as_dict=1, ) booking_orders = set([x.get("booking_order") for x in freight_rows]) get_first_loaded_booking_orders = compose( list, map(lambda x: x.get("booking_order")), frappe.db.sql, ) first_loaded_booking_orders = ( get_first_loaded_booking_orders( """ SELECT lobo.booking_order, lo.shipping_order FROM `tabLoading Operation Booking Order` AS lobo LEFT JOIN `tabLoading Operation` AS lo ON lo.name = lobo.parent LEFT JOIN `tabBooking Order Charge` AS boc ON boc.parent = lobo.booking_order WHERE lo.docstatus = 1 AND lobo.parentfield = 'on_loads' AND lobo.booking_order IN %(booking_orders)s GROUP by lobo.booking_order HAVING lo.shipping_order = %(shipping_order)s ORDER BY lo.posting_datetime """, values={"booking_orders": booking_orders, "shipping_order": shipping_order}, as_dict=1, ) if booking_orders else [] ) charges_rows = ( frappe.db.sql( """ SELECT bo.name AS booking_order, bo.consignor_name, bo.consignee_name, GROUP_CONCAT(boc.charge_type SEPARATOR ', ') AS item_description, 0 AS cur_no_of_packages, 0 AS cur_weight_actual, '' AS based_on, 0 AS rate, SUM(boc.charge_amount) AS amount FROM `tabBooking Order` AS bo LEFT JOIN `tabBooking Order Charge` AS boc ON boc.parent = bo.name WHERE bo.name IN %(booking_orders)s AND boc.charge_amount > 0 GROUP BY bo.name """, values={"booking_orders": first_loaded_booking_orders}, as_dict=1, ) if first_loaded_booking_orders else [] ) return sorted( [merge(x, {"amount": get_amount(x)}) for x in freight_rows + charges_rows], key=lambda x: x.get("booking_order"), )
python
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE.txt file in the root directory of this source tree. import argparse import json import os import os.path import random from typing import Counter, DefaultDict, Dict, List, Optional, Tuple import h5py import numpy as np from torchbiggraph.config import ( ConfigSchema, EntitySchema, RelationSchema, get_config_dict_from_module, ) from torchbiggraph.converters.dictionary import Dictionary def collect_relation_types( relation_configs: List[RelationSchema], edge_paths: List[str], dynamic_relations: bool, rel_col: Optional[int], relation_type_min_count: int, ) -> Dictionary: if dynamic_relations: if rel_col is None: raise RuntimeError("Need to specify rel_col in dynamic mode.") print("Looking up relation types in the edge files...") counter: Counter[str] = Counter() for edgepath in edge_paths: with open(edgepath, "rt") as tf: for line_num, line in enumerate(tf, start=1): words = line.split() try: rel_word = words[rel_col] except IndexError: raise RuntimeError( "Line %d of %s has only %d words" % (line_num, edgepath, len(words))) from None counter[rel_word] += 1 print("- Found %d relation types" % len(counter)) if relation_type_min_count > 0: print("- Removing the ones with fewer than %d occurrences..." % relation_type_min_count) counter = Counter({k: c for k, c in counter.items() if c >= relation_type_min_count}) print("- Left with %d relation types" % len(counter)) print("- Shuffling them...") names = list(counter.keys()) random.shuffle(names) else: names = [rconfig.name for rconfig in relation_configs] print("Using the %d relation types given in the config" % len(names)) return Dictionary(names) def collect_entities_by_type( relation_types: Dictionary, entity_configs: Dict[str, EntitySchema], relation_configs: List[RelationSchema], edge_paths: List[str], dynamic_relations: bool, lhs_col: int, rhs_col: int, rel_col: Optional[int], entity_min_count: int, ) -> Dict[str, Dictionary]: counters: Dict[str, Counter[str]] = {} for entity_name in entity_configs.keys(): counters[entity_name] = Counter() print("Searching for the entities in the edge files...") for edgepath in edge_paths: with open(edgepath, "rt") as tf: for line_num, line in enumerate(tf, start=1): words = line.split() try: lhs_word = words[lhs_col] rhs_word = words[rhs_col] rel_word = words[rel_col] if rel_col is not None else None except IndexError: raise RuntimeError( "Line %d of %s has only %d words" % (line_num, edgepath, len(words))) from None if dynamic_relations or rel_col is None: rel_id = 0 else: try: rel_id = relation_types.get_id(rel_word) except KeyError: raise RuntimeError("Could not find relation type in config") counters[relation_configs[rel_id].lhs][lhs_word] += 1 counters[relation_configs[rel_id].rhs][rhs_word] += 1 entities_by_type: Dict[str, Dictionary] = {} for entity_name, counter in counters.items(): print("Entity type %s:" % entity_name) print("- Found %d entities" % len(counter)) if entity_min_count > 0: print("- Removing the ones with fewer than %d occurrences..." % entity_min_count) counter = Counter({k: c for k, c in counter.items() if c >= entity_min_count}) print("- Left with %d entities" % len(counter)) print("- Shuffling them...") names = list(counter.keys()) random.shuffle(names) entities_by_type[entity_name] = Dictionary( names, num_parts=entity_configs[entity_name].num_partitions) return entities_by_type def generate_entity_path_files( entity_path: str, entities_by_type: Dict[str, Dictionary], relation_types: Dictionary, dynamic_relations: bool, ) -> None: print("Preparing entity path %s:" % entity_path) for entity_name, entities in entities_by_type.items(): for part in range(entities.num_parts): print("- Writing count of entity type %s and partition %d" % (entity_name, part)) with open(os.path.join( entity_path, "entity_count_%s_%d.txt" % (entity_name, part) ), "wt") as tf: tf.write("%d" % entities.part_size(part)) if dynamic_relations: print("- Writing count of dynamic relations") with open(os.path.join(entity_path, "dynamic_rel_count.txt"), "wt") as tf: tf.write("%d" % relation_types.size()) def generate_edge_path_files( edge_file_in: str, entities_by_type: Dict[str, Dictionary], relation_types: Dictionary, relation_configs: List[RelationSchema], dynamic_relations: bool, lhs_col: int, rhs_col: int, rel_col: Optional[int], ) -> None: basename, _ = os.path.splitext(edge_file_in) edge_path_out = basename + '_partitioned' print("Preparing edge path %s, out of the edges found in %s" % (edge_path_out, edge_file_in)) os.makedirs(edge_path_out, exist_ok=True) num_lhs_parts = max(entities_by_type[rconfig.lhs].num_parts for rconfig in relation_configs) num_rhs_parts = max(entities_by_type[rconfig.rhs].num_parts for rconfig in relation_configs) print("- Edges will be partitioned in %d x %d buckets." % (num_lhs_parts, num_rhs_parts)) buckets: DefaultDict[Tuple[int, int], List[Tuple[int, int, int]]] = \ DefaultDict(list) processed = 0 skipped = 0 with open(edge_file_in, "rt") as tf: for line_num, line in enumerate(tf, start=1): words = line.split() try: lhs_word = words[lhs_col] rhs_word = words[rhs_col] rel_word = words[rel_col] if rel_col is not None else None except IndexError: raise RuntimeError( "Line %d of %s has only %d words" % (line_num, edge_file_in, len(words))) from None if rel_col is None: rel_id = 0 else: try: rel_id = relation_types.get_id(rel_word) except KeyError: # Ignore edges whose relation type is not known. skipped += 1 continue if dynamic_relations: lhs_type = relation_configs[0].lhs rhs_type = relation_configs[0].rhs else: lhs_type = relation_configs[rel_id].lhs rhs_type = relation_configs[rel_id].rhs try: lhs_part, lhs_offset = \ entities_by_type[lhs_type].get_partition(lhs_word) rhs_part, rhs_offset = \ entities_by_type[rhs_type].get_partition(rhs_word) except KeyError: # Ignore edges whose entities are not known. skipped += 1 continue buckets[lhs_part, rhs_part].append((lhs_offset, rhs_offset, rel_id)) processed = processed + 1 if processed % 100000 == 0: print("- Processed %d edges so far..." % processed) print("- Processed %d edges in total" % processed) if skipped > 0: print("- Skipped %d edges because their relation type or entities were " "unknown (either not given in the config or filtered out as too " "rare)." % skipped) for i in range(num_lhs_parts): for j in range(num_rhs_parts): print("- Writing bucket (%d, %d), containing %d edges..." % (i, j, len(buckets[i, j]))) edges = np.asarray(buckets[i, j]) with h5py.File(os.path.join( edge_path_out, "edges_%d_%d.h5" % (i, j) ), "w") as hf: hf.attrs["format_version"] = 1 hf.create_dataset("lhs", data=edges[:, 0]) hf.create_dataset("rhs", data=edges[:, 1]) hf.create_dataset("rel", data=edges[:, 2]) def convert_input_data( config: str, edge_paths: List[str], lhs_col: int, rhs_col: int, rel_col: Optional[int] = None, entity_min_count: int = 1, relation_type_min_count: int = 1, ) -> None: entity_configs, relation_configs, entity_path, dynamic_relations = \ validate_config(config) some_output_paths = [] some_output_paths.append(os.path.join(entity_path, "dictionary.json")) some_output_paths.extend( os.path.join(entity_path, "entity_count_%s_0.txt" % entity_name) for entity_name in entity_configs.keys()) if dynamic_relations: some_output_paths.append(os.path.join(entity_path, "dynamic_rel_count.txt")) some_output_paths.extend( os.path.join(os.path.splitext(edge_file)[0] + "_partitioned", "edges_0_0.h5") for edge_file in edge_paths) if all(os.path.exists(path) for path in some_output_paths): print("Found some files that indicate that the input data " "has already been preprocessed, not doing it again.") print("These files are: %s" % ", ".join(some_output_paths)) return os.makedirs(entity_path, exist_ok=True) relation_types = collect_relation_types( relation_configs, edge_paths, dynamic_relations, rel_col, relation_type_min_count, ) entities_by_type = collect_entities_by_type( relation_types, entity_configs, relation_configs, edge_paths, dynamic_relations, lhs_col, rhs_col, rel_col, entity_min_count, ) dump = { "relations": relation_types.get_list(), "entities": {k: v.get_list() for k, v in entities_by_type.items()}, } with open(os.path.join(entity_path, "dictionary.json"), "wt") as tf: json.dump(dump, tf, indent=4) generate_entity_path_files( entity_path, entities_by_type, relation_types, dynamic_relations, ) for edge_path in edge_paths: generate_edge_path_files( edge_path, entities_by_type, relation_types, relation_configs, dynamic_relations, lhs_col, rhs_col, rel_col, ) def validate_config( config: str, ) -> Tuple[Dict[str, EntitySchema], List[RelationSchema], str, bool]: user_config = get_config_dict_from_module(config) # validate entites and relations config entities_config = user_config.get("entities") relations_config = user_config.get("relations") entity_path = user_config.get("entity_path") dynamic_relations = user_config.get("dynamic_relations", False) if not isinstance(entities_config, dict): raise TypeError("Config entities is not of type dict") if not isinstance(relations_config, list): raise TypeError("Config relations is not of type list") if not isinstance(entity_path, str): raise TypeError("Config entity_path is not of type str") if not isinstance(dynamic_relations, bool): raise TypeError("Config dynamic_relations is not of type bool") entities = {} relations = [] for entity, entity_config in entities_config.items(): entities[entity] = EntitySchema.from_dict(entity_config) for relation in relations_config: relations.append(RelationSchema.from_dict(relation)) return entities, relations, entity_path, dynamic_relations def main(): config_help = '\n\nConfig parameters:\n\n' + '\n'.join(ConfigSchema.help()) parser = argparse.ArgumentParser( epilog=config_help, # Needed to preserve line wraps in epilog. formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument('config', help='Path to config file') parser.add_argument('edge_paths', nargs='*', help='Input file paths') parser.add_argument('-l', '--lhs-col', type=int, required=True, help='Column index for source entity') parser.add_argument('-r', '--rhs-col', type=int, required=True, help='Column index for target entity') parser.add_argument('--rel-col', type=int, help='Column index for relation entity') parser.add_argument('--relation-type-min-count', type=int, default=1, help='Min count for relation types') parser.add_argument('--entity-min-count', type=int, default=1, help='Min count for entities') opt = parser.parse_args() convert_input_data( opt.config, opt.edge_paths, opt.lhs_col, opt.rhs_col, opt.rel_col, opt.entity_min_count, opt.relation_type_min_count, ) if __name__ == "__main__": main()
python
__author__ = 'rogerjiang' ''' Purpose: 1. Data augmentation, including: 1.1 random translation in horizontal and vertical directions 1.2 horizontal and vertical flipping 1.3 random rotation ''' ''' Class blancing: Each class is trained using a different model, weights should be applied to the true and false labels if imbalanced. Cross validation can be performed at angles different from the training images. Loss options: 1. Jaccard loss 2. Cross entropy Optimizer options: 1. Adam (learning rate drop at around 0.2 of the initial rate for every 30 epochs) 2. NAdam (no improvement over Adam) (50 epochs with a learning rate of 1e-3 and additional 50 epochs with a learning rate of 1e-4. Each epoch was trained on 400 batches, each batch containing 128 image patches (112x112).) Ensembling: 1. Arithmetic averaging over different angles Special treatment: 1. Waterways using NDWI and CCCI). ''' import pandas as pd import os import utils.data_utils as data_utils import numpy as np import cv2 import sys import gc # data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') data_dir = "E:\\workspace\\mystoreroom\\dstl_unet-master" CLASSES = { 1: 'Bldg', 2: 'Struct', 3: 'Road', 4: 'Track', 5: 'Trees', 6: 'Crops', 7: 'Fast H20', 8: 'Slow H20', 9: 'Truck', 10: 'Car', } # train_wkt_v4 = pd.read_csv(os.path.join(data_dir, 'data/train_wkt_v4.csv')) train_wkt_v4 = pd.read_csv(os.path.join(data_dir, 'data\\train_wkt_v4.csv')) # grid_sizes = pd.read_csv(os.path.join(data_dir, 'data/grid_sizes.csv'), # skiprows = 1, names = ['ImageId', 'Xmax', 'Ymin']) grid_sizes = pd.read_csv(os.path.join(data_dir, 'data\\grid_sizes.csv'), skiprows = 1, names = ['ImageId', 'Xmax', 'Ymin']) x_crop = 3345 y_crop = 3338 test_names = ['6110_1_2', '6110_3_1', '6100_1_3', '6120_2_2'] #train_names = list(set(data_utils.all_train_names) - set(test_names)) train_names = data_utils.all_train_names test_ids = [data_utils.train_IDs_dict_r[name] for name in test_names] train_ids = [data_utils.train_IDs_dict_r[name] for name in train_names] # no_train_img = len(train_names) # no_test_img = len(test_names) def generate_train_ids(cl): ''' Create train ids, and exclude the images with no true labels :param cl: :return: ''' df = data_utils.collect_stats() df = df.pivot(index = 'ImageId', columns = 'Class', values = 'TotalArea') df = df.fillna(0) df = df[df[data_utils.CLASSES[cl + 1]] != 0] train_names = sorted(list(df.index.get_values())) return [data_utils.train_IDs_dict_r[name] for name in train_names] def get_all_data(img_ids, train = True): ''' Load all the training feature and label into memory. This requires 35 GB memory on Mac and takes a few minutes to finish. :return: ''' image_feature = [] image_label = [] no_img = len(img_ids) phase = ['validation', 'training'][train] for i in range(no_img): id = img_ids[i] image_data = data_utils.ImageData(id) image_data.create_train_feature() image_data.create_label() image_feature.append(image_data.train_feature[: x_crop, : y_crop, :]) image_label.append(image_data.label[: x_crop, : y_crop, :]) sys.stdout.write('\rLoading {} data: [{}{}] {}%\n'.\ format(phase, '=' * i, ' ' * (no_img - i - 1), 100 * i / (no_img - 1))) sys.stdout.flush() # del image_data # gc.collect() sys.stdout.write('\n') image_feature = np.stack(image_feature, -1) image_label = np.stack(image_label, -1) sys.stdout.write('Labels are{}valid.\n'.format( ' ' if np.isfinite(image_label).all() and \ (image_label >= 0).all() and (image_label <= 1).all() else ' not ')) sys.stdout.write('Image features are{}valid.\n'.format( ' ' if np.isfinite(image_feature).all() and \ (image_feature >= -5000).all() and (image_feature <= 5000).all() \ else ' not ')) sys.stdout.write('\n') sys.stdout.flush() return np.rollaxis(image_feature, 3, 0), np.rollaxis(image_label, 3, 0) def input_data(crop_size, class_id = 0, crop_per_img = 1, reflection = True, rotation = 8, train = True, verbose = False): ''' Returns the training images (feature) and the corresponding labels :param crop_size: :param class_id: :param crop_per_img: :param reflection: :param rotation: :param train: :return: ''' # img_ids = generate_train_ids(class_id) if train else test_ids img_ids = train_ids if train else test_ids no_img = len(img_ids) image_feature, image_label = get_all_data(img_ids, train = train) while True: images = [] labels = [] # Rotation angle is assumed to be the same, so that the # transformation only needs to be calculated once. if not rotation or rotation == 1: crop_diff = 0 crop_size_new = crop_size else: angle = 360. * np.random.randint(0, rotation) / rotation radian = 2. * np.pi * angle / 360. if verbose: print ('Rotation angle : {0}(degree), {1: 0.2f}(radian)'.\ format(int(angle), radian)) crop_size_new = int( np.ceil(float(crop_size) * (abs(np.sin(radian)) + abs(np.cos(radian))))) rot_mat = cv2.getRotationMatrix2D((float(crop_size_new) / 2., float(crop_size_new) / 2.), angle, 1.) crop_diff = int((crop_size_new - crop_size) / 2.) np.random.shuffle(img_ids) for i in range(no_img): id = img_ids[i] for _ in range(crop_per_img): x_base = np.random.randint(0, x_crop - crop_size_new) y_base = np.random.randint(0, y_crop - crop_size_new) if verbose: print ('x_base {} for No. {} image'.format(x_base, id)) print ('y_base {} for No. {} image'.format(y_base, id)) img_crop = np.squeeze(image_feature[i, x_base: x_base + crop_size_new, y_base: y_base + crop_size_new, :]) label_crop = np.squeeze(image_label[i, x_base: x_base + crop_size_new, y_base: y_base + crop_size_new, class_id]) if not rotation or rotation == 1: img_rot = img_crop label_rot = label_crop else: img_rot = cv2.warpAffine(img_crop, rot_mat, (crop_size_new, crop_size_new)) label_rot = cv2.warpAffine(label_crop, rot_mat, (crop_size_new, crop_size_new)) x_step = 1 if not reflection else \ [-1, 1][np.random.randint(0, 2)] y_step = 1 if not reflection else \ [-1, 1][np.random.randint(0, 2)] images.append(img_rot[crop_diff: crop_diff + crop_size:, crop_diff: crop_diff + crop_size, :]\ [:: x_step, :: y_step, :]) labels.append(label_rot[crop_diff: crop_diff + crop_size, crop_diff: crop_diff + crop_size]\ [:: x_step, :: y_step]) yield np.stack(images, 0), np.stack(labels, 0)
python
# Tests should generate (and then clean up) any files they need for testing. No # binary files should be included in the repository. import json import event_model from suitcase.mongo_embedded import Serializer import pytest def test_export(db_factory, example_data): """ Test suitcase-mongo-embedded serializer with default parameters. """ permanent_db = db_factory() serializer = Serializer(permanent_db) run(example_data, serializer, permanent_db) if not serializer._frozen: serializer.close() def test_multithread(db_factory, example_data): """ Test suitcase-mongo-embedded serializer with multiple worker threads. """ permanent_db = db_factory() serializer = Serializer(permanent_db, num_threads=5) run(example_data, serializer, permanent_db) if not serializer._frozen: serializer.close() def test_smallbuffer(db_factory, example_data): """ Test suitcase-mongo-embedded serializer with a small buffer. """ permanent_db = db_factory() serializer = Serializer(permanent_db, embedder_size=3000) run(example_data, serializer, permanent_db) if not serializer._frozen: serializer.close() def test_smallqueue(db_factory, example_data): """ Test suitcase-mongo-embedded serializer with a small buffer. """ permanent_db = db_factory() serializer = Serializer(permanent_db, queue_size=1) run(example_data, serializer, permanent_db) if not serializer._frozen: serializer.close() def test_smallpage(db_factory, example_data): """ Test suitcase-mongo-embedded serializer with a small mongo page saize. """ permanent_db = db_factory() serializer = Serializer(permanent_db, page_size=10000) run(example_data, serializer, permanent_db) if not serializer._frozen: serializer.close() def test_evil_db(db_factory, example_data): """ Test suitcase-mongo-embedded serializer with a db that raises an exception on bulk_write. """ def evil_func(*args, **kwargs): raise RuntimeError permanent_db = db_factory() serializer = Serializer(permanent_db) serializer._bulkwrite_event = evil_func serializer._bulkwrite_datum = evil_func with pytest.raises(RuntimeError): run(example_data, serializer, permanent_db) if not serializer._frozen: serializer.close() def run(example_data, serializer, permanent_db): """ Testbench for suitcase-mongo-embedded serializer. This stores all documents that are going to the serializer into a dictionary. After the run completes, it then queries the permanent mongo database, and reads the documents to a separate dictionary. The two dictionaries are checked to see if they match. """ run_dict = {'start': {}, 'stop': {}, 'descriptor': [], 'resource': [], 'event': [], 'datum': []} documents = example_data() mongo_serializer = serializer for item in documents: # Fix formatting for JSON. item = event_model.sanitize_doc(item) # Send the bluesky doc to the serializer mongo_serializer(*item) # Bulk_event/datum need to be converted to a list of events/datum # before inserting in the run_dict. if item[0] in {'bulk_events', 'bulk_datum'}: pages = bulk_to_pages(*item) doc_list = pages_to_list(pages) for doc in doc_list: run_dict[doc[0]].append(doc[1]) else: if item[0] in {'event_page', 'datum_page'}: doc_list = page_to_list(*item) for doc in doc_list: run_dict[doc[0]].append(doc[1]) else: if type(run_dict.get(item[0])) == list: run_dict[item[0]].append(item[1]) else: run_dict[item[0]] = item[1] # Read the run from the mongo database and store in a dict. frozen_run_dict = run_list_to_dict(get_embedded_run( permanent_db, run_dict['start']['uid'])) # Sort the event field of each dictionary. With multiple streams, the # documents that don't go through the serializer don't appear to be sorted # correctly. if len(run_dict['event']): run_dict['event'] = sorted(run_dict['event'], key=lambda x: x['descriptor']) frozen_run_dict['event'] = sorted(frozen_run_dict['event'], key=lambda x: x['descriptor']) # Compare the two dictionaries. assert (json.loads(json.dumps(run_dict, sort_keys=True)) == json.loads(json.dumps(frozen_run_dict, sort_keys=True))) def run_list_to_dict(embedded_run_list): """ Converts a run from the mongo database to a dictionary. """ run_dict = {'start': {}, 'stop': {}, 'descriptor': [], 'resource': [], 'event': [], 'datum': []} header = embedded_run_list[0][1] run_dict['start'] = header['start'][0] run_dict['stop'] = header['stop'][0] run_dict['descriptor'] = header.get('descriptors', []) run_dict['resource'] = header.get('resources', []) for name, doc in embedded_run_list[1:]: if name == 'event': run_dict['event'] += list(event_model.unpack_event_page(doc)) elif name == 'datum': run_dict['datum'] += list(event_model.unpack_datum_page(doc)) return run_dict def get_embedded_run(db, run_uid): """ Gets a run from a database. Returns a list of the run's documents. """ run = list() # Get the header. header = db.header.find_one({'run_id': run_uid}, {'_id': False}) if header is None: raise RuntimeError(f"Run not found {run_uid}") run.append(('header', header)) # Get the events. if 'descriptors' in header.keys(): for descriptor in header['descriptors']: run += [('event', doc) for doc in db.event.find({'descriptor': descriptor['uid']}, {'_id': False})] # Get the datum. if 'resources' in header.keys(): for resource in header['resources']: run += [('datum', doc) for doc in db.datum.find({'resource': resource['uid']}, {'_id': False})] return run def bulk_to_pages(name, doc): """ Converts bulk_events/datum to event/datum_page. """ key_map = {'bulk_events': 'event_page', 'bulk_datum': 'datum_page'} if name == 'bulk_events': doc = event_model.bulk_events_to_event_pages(doc) elif name == 'bulk_datum': doc = event_model.bulk_datum_to_datum_pages(doc) page_list = [[key_map[name], item] for item in doc] return page_list def pages_to_list(pages): """ Converts event/datum_page to event/datum lists. """ doc_list = [] for page in pages: if page[0] == 'event_page': doc_list.extend([['event', event] for event in event_model.unpack_event_page(page[1])]) if page[0] == 'datum_page': doc_list.extend([['datum', datum] for datum in event_model.unpack_datum_page(page[1])]) return doc_list def page_to_list(name, page): """ Converts event/datum_page to event/datum lists. """ doc_list = [] if name == 'event_page': doc_list.extend([['event', event] for event in event_model.unpack_event_page(page)]) if name == 'datum_page': doc_list.extend([['datum', datum] for datum in event_model.unpack_datum_page(page)]) return doc_list
python
#!/usr/bin/env python import json import os import logging from ruv_dl.constants import CACHE_LOCATION, CACHE_VERSION, CACHE_VERSION_KEY logger = logging.getLogger(__name__) class CacheVersionException(Exception): pass class DiskCache: def __init__(self, program_id): self.location = os.path.join(CACHE_LOCATION, f'{program_id}.json') try: with open(self.location, 'r') as f: self._data = json.loads(f.read()) SAVED_CACHE_VERSION = self._data.get(CACHE_VERSION_KEY) if SAVED_CACHE_VERSION != CACHE_VERSION: logger.info( f'Have cache version "{SAVED_CACHE_VERSION}" but ' f'want {CACHE_VERSION}. Starting with empty cache.' ) raise CacheVersionException() logger.debug('Cache version OK.') except (FileNotFoundError, CacheVersionException): self._data = { CACHE_VERSION_KEY: CACHE_VERSION, } def get(self, key): return self._data[key] def set(self, key, data): self._data[key] = data def has(self, key): return key in self._data def remove(self, key): del self._data[key] def write(self): with open(self.location, 'w') as f: f.write(json.dumps(self._data))
python
import socket, time, signal def resolves(domain, timeout): try: socket.gethostbyname(domain) return True except socket.gaierror: return False
python
from __future__ import annotations from typing import Union, List, Set, FrozenSet, Optional, Dict, IO, Callable from pathlib import Path from gd2c.project import Project from gd2c.target import Target from gd2c.gdscriptclass import GDScriptClass, GDScriptFunction, GDScriptMember, GDScriptGlobal from gd2c.targets._gdnative.context import GlobalContext, ClassContext, FunctionContext from gd2c.variant import VariantType from gd2c import controlflow import gd2c.targets._gdnative.transform as transform import gd2c.targets._gdnative.class_codegen as class_codegen import gd2c.targets._gdnative.function_codegen as function_codegen class CPPNativeTarget(Target): project: Project def __init__(self, project: Project): self.project = project def transform(self) -> None: for cls in self.project.classes(): for func in cls.functions(): func.cfg = controlflow.build_control_flow_graph(func) func.cfg.live_variable_analysis() transform.insert_initializers_transformation(func) transform.insert_parameter_copies(func) #transform.replace_init_calls_with_noop_transformation(func) transform.insert_destructors_transformation(func) def emit(self, output_path: str) -> None: gen = CPPNativeCodeGen(self.project, output_path) gen.transpile() class CPPNativeCodeGen: def __init__(self, project: Project, output_path: Union[str, Path]): self.project = project self.global_context = GlobalContext() self.class_contexts: Dict[int, ClassContext] = {} self.output_path = Path(output_path) @property def output_path(self) -> Path: return self._output_path @output_path.setter def output_path(self, value: str): p = Path(value) assert p.is_dir(), "output_path must be a directory" assert not str(p.resolve()).startswith(str(Path(self.project.root).resolve())) self._output_path = p def transpile(self): self._initialize_contexts() self._transpile_header_file() self._transpile_c_file() def _initialize_contexts(self): self.class_contexts = {} for cls in self.project.iter_classes_in_dependency_order(): context = ClassContext(cls, self.global_context, self.class_contexts.get(cls.base.type_id, None) if cls.base else None) self.class_contexts[cls.type_id] = context self.global_context.initialize_globals(next(iter(self.class_contexts.values())).cls.globals) def _transpile_header_file(self): p = Path(self._output_path, "godotproject.h") with p.open(mode="w") as header: header.write(f"""\ #ifndef __GD2C_GODOTPROJECT__ #define __GD2C_GODOTPROJECT__ #include "gd2c.h" """) for cls in self.project.iter_classes_in_dependency_order(): class_context = self.class_contexts[cls.type_id] class_codegen.transpile_struct(class_context, header) class_codegen.transpile_constant_declarations(class_context, header) for func in cls.functions(): if func.has_constants: func_context = class_context.get_function_context(func) if len(func.global_names) > 0: header.write(f"""\ godot_string_name {func_context.global_names_identifier}[{len(func.global_names)}]; godot_string {func_context.global_strings_identifier}[{len(func.global_names)}]; """) if func.len_constants: header.write(f"""godot_variant {func_context.local_constants_array_identifier}[{func.len_constants}];\n""") header.write(f"""int {func_context.initialized_local_constants_array_identifier} = 0;\n""") for cls in self.project.iter_classes_in_dependency_order(): class_context = self.class_contexts[cls.type_id] class_codegen.transpile_property_signatures(class_context, header) for cls in self.project.iter_classes_in_dependency_order(): class_context = self.class_contexts[cls.type_id] class_codegen.transpile_ctor_signature(class_context, header) header.write(";\n") class_codegen.transpile_dtor_signature(class_context, header) header.write(";\n") for func_context in class_context.function_contexts.values(): function_codegen.transpile_signature(func_context, header) header.write(f"""\ #endif """) def _transpile_c_file(self): p = Path(self._output_path, "godotproject.cpp") with p.open(mode="w") as writer: writer.write(f"""\ #include "gd2c.h" #include "godotproject.h" #include "math.h" {self.global_context.define()} """) for cls in self.project.iter_classes_in_dependency_order(): class_context = self.class_contexts[cls.type_id] class_codegen.transpile_ctor(class_context, writer) class_codegen.transpile_dtor(class_context, writer) class_codegen.transpile_property_implementations(class_context, writer) for func_context in class_context.function_contexts.values(): function_codegen.transpile_function(func_context, writer) class_codegen.transpile_vtable(class_context, writer) self._transpile_gdnative_init(writer) self._transpile_gdnative_terminate(writer) self._transpile_nativescript_init(writer) def _transpile_gdnative_init(self, writer: IO): writer.write(f"""\ void GDN_EXPORT {self.project.export_prefix}_gdnative_init(godot_gdnative_init_options *p_options) {{ //printf("Enter: {self.project.export_prefix}_gdnative_init\\n"); api10 = p_options->api_struct; const godot_gdnative_api_struct *extension = api10->next; while (extension) {{ if (extension->version.major == 1 && extension->version.minor == 1) {{ //printf(" Found api11\\n"); api11 = (const godot_gdnative_core_1_1_api_struct*)extension; }} if (extension == extension->next) break; extension = extension->next; }} for (int i = 0; i < api10->num_extensions; ++i) {{ switch (api10->extensions[i]->type) {{ case GDNATIVE_EXT_NATIVESCRIPT: {{ extension = api10->extensions[i]; nativescript10 = (godot_gdnative_ext_nativescript_api_struct*)extension; while (extension) {{ if (extension->version.major == 1 && extension->version.minor == 1) {{ //printf(" Found nativescript11\\n"); nativescript11 = (const godot_gdnative_ext_nativescript_1_1_api_struct*)extension; }} if (extension == extension->next) break; extension = extension->next; }} }}; break; default: break; }} }} gd2c_api_initialize(); vtable_init_base(); api10->godot_variant_new_nil(&__nil); //printf("Exit: {self.project.export_prefix}_gdnative_init\\n"); }} """) def _transpile_gdnative_terminate(self, writer: IO): writer.write(f"""\ void GDN_EXPORT {self.project.export_prefix}_gdnative_terminate(godot_gdnative_terminate_options *p_options) {{ //printf("Enter: {self.project.export_prefix}_gdnative_terminate\\n"); api10->godot_variant_destroy(&__nil); """) self._transpile_class_constants_destruction(writer) self._transpile_global_constants_array_destruction(writer) writer.write(f"""}}\n""") def _transpile_nativescript_init(self, writer: IO): writer.write(f"""\ void GDN_EXPORT {self.project.export_prefix}_nativescript_init(void *p_handle) {{ //printf("Enter: {self.project.export_prefix}_nativescript_init\\n"); """) def visitor(cls: GDScriptClass, depth: int): class_context = self.class_contexts[cls.type_id] writer.write(f"""\ {{ //printf(" Register class: {cls.name}\\n"); godot_instance_create_func create = {{ NULL, NULL, NULL }}; create.create_func = {class_context.ctor_identifier}; godot_instance_destroy_func destroy = {{ NULL, NULL, NULL }}; destroy.destroy_func = {class_context.dtor_identifier}; nativescript10->godot_nativescript_register_class(p_handle, "{cls.name}", "{cls.built_in_type}", create, destroy); }} """) writer.write(f"""\ {{ //printf(" Register method: __gd2c_is_class_instanceof\\n"); godot_instance_method method = {{ NULL, NULL, NULL }}; method.method = &__gd2c_is_class_instanceof; godot_method_attributes attributes = {{ GODOT_METHOD_RPC_MODE_DISABLED }}; nativescript10->godot_nativescript_register_method(p_handle, "{cls.name}", "__gd2c_is_class_instanceof", attributes, method); }} """) for entry in class_context.vtable_entries: writer.write(f"""\ {{ //printf(" Register method: {entry.func_context.function_identifier}\\n"); godot_instance_method method = {{ NULL, NULL, NULL }}; method.method = &{entry.func_context.function_identifier}; godot_method_attributes attributes = {{ GODOT_METHOD_RPC_MODE_DISABLED }}; nativescript10->godot_nativescript_register_method(p_handle, "{cls.name}", "{entry.func_context.func.name}", attributes, method); }} """) for signal in cls.signals(): writer.write(f"""\ {{ //printf(" Register signal: {signal}\\n"); godot_string name = api10->godot_string_chars_to_utf8("{signal}"); godot_signal signal = {{ name, 0, NULL, 0, NULL }}; nativescript10->godot_nativescript_register_signal(p_handle, "{signal}", &signal); }} """) for member_context in class_context.member_contexts.values(): writer.write(f"""\ {{ //printf(" Register member: {member_context.member_identifier}\\n"); godot_property_set_func setter = {{ NULL, NULL, NULL }}; setter.set_func = &{member_context.setter_identifier}; godot_property_get_func getter = {{ NULL, NULL, NULL }}; getter.get_func = &{member_context.getter_identifier}; godot_property_attributes attributes = {{ GODOT_METHOD_RPC_MODE_DISABLED }}; nativescript10->godot_nativescript_register_property(p_handle, "{class_context.cls.name}", "{member_context.path}", &attributes, setter, getter); }} """) writer.write(f"""\ {class_context.vtable_init_function_identifier}(); """) self.project.visit_classes_in_dependency_order(visitor) self._transpile_global_constants_array_initialization(writer) self._transpile_class_constants_initialization(writer) writer.write(f"""\ //printf("Exit: {self.project.export_prefix}_nativescript_init\\n"); }} """) def _transpile_global_constants_array_initialization(self, writer: IO) -> None: for i in range(0, len(self.global_context.globals) + 1): if i in self.global_context.globals: cnst = self.global_context.globals[i] if cnst.source in (GDScriptGlobal.SOURCE_CONSTANT, GDScriptGlobal.SOURCE_HARDCODED): if cnst.vtype == VariantType.INT: writer.write(f"api10->godot_variant_new_int({self.global_context.address_of_expression(cnst.index)}, {cnst.value});\n") elif cnst.vtype == VariantType.REAL: literal = cnst.value if cnst.value == "inf": literal = "INFINITY" elif cnst.value == "nan": literal = "NAN" writer.write(f"api10->godot_variant_new_real({self.global_context.address_of_expression(cnst.index)}, {literal});\n") elif cnst.source == GDScriptGlobal.SOURCE_SINGLETON: writer.write(f"""\ {{ godot_object *singleton = api10->godot_global_get_singleton("{cnst.original_name}"); api10->godot_variant_new_object({self.global_context.address_of_expression(cnst.index)}, singleton); }} """) elif cnst.source == GDScriptGlobal.SOURCE_CLASSDB: utf8 = bytes(cnst.original_name, "UTF-8") writer.write(f"""\ {{ // {cnst.original_name} char data[] = {{ {','.join(map(lambda b: str(b), utf8))} }}; register_classdb_global(\ {self.global_context.address_of_expression(cnst.index)}, \ (const char *)data, \ {len(utf8)}); }} """) def _transpile_global_constants_array_destruction(self, writer: IO) -> None: for i in range(0, len(self.global_context.globals) + 1): if i in self.global_context.globals: cnst = self.global_context.globals[i] if cnst.source in (GDScriptGlobal.SOURCE_CONSTANT, GDScriptGlobal.SOURCE_HARDCODED): if cnst.vtype == VariantType.INT: writer.write(f"api10->godot_variant_destroy({self.global_context.address_of_expression(cnst.index)});\n") elif cnst.vtype == VariantType.REAL: writer.write(f"api10->godot_variant_destroy({self.global_context.address_of_expression(cnst.index)});\n") elif cnst.source == GDScriptGlobal.SOURCE_SINGLETON: writer.write(f"api10->godot_variant_destroy({self.global_context.address_of_expression(cnst.index)});\n") elif cnst.source == GDScriptGlobal.SOURCE_CLASSDB: writer.write(f"api10->godot_variant_destroy({self.global_context.address_of_expression(cnst.index)});\n") def _transpile_class_constants_initialization(self, writer: IO) -> None: for cls in self.project.iter_classes_in_dependency_order(): class_context = self.class_contexts[cls.type_id] for cc in class_context.constant_contexts.values(): writer.write(f"""\ {{ uint8_t data[] = {{ {','.join(map(lambda b: str(b), cc.constant.data))} }}; int bytesRead; gd2c10->variant_decode(&{class_context.constants_array_identifier}[{cc.index}], data, {len(cc.constant.data)}, &bytesRead, true); }} """) def _transpile_class_constants_destruction(self, writer: IO) -> None: for cls in self.project.iter_classes_in_dependency_order(): class_context = self.class_contexts[cls.type_id] for cc in class_context.constant_contexts.values(): writer.write(f"""\ api10->godot_variant_destroy(&{class_context.constants_array_identifier}[{cc.index}]); """) for func in class_context.cls.functions(): function_context = class_context.get_function_context(func.name) assert function_context if function_context.func.len_constants: writer.write(f"""if (0 != {function_context.initialized_local_constants_array_identifier}) {{\n""") for i in range(function_context.func.len_constants): writer.write(f"""api10->godot_variant_destroy(&{function_context.local_constants_array_identifier}[{i}]);\n""") writer.write(f"""}}\n""")
python
from django import template from django.conf import settings from django.urls import reverse from django.utils.html import format_html from django_gravatar.helpers import get_gravatar_url register = template.Library() @register.simple_tag def user_link(user): gravatar_url = get_gravatar_url(user.email, size=16) profile_url = reverse('user_profile', args=[user.username]) return format_html("""<a href="{0}"><img class="gravatar-small" src="{1}"/>{2}</a>""", profile_url, gravatar_url, user.get_full_name()) @register.inclusion_tag('assets/asset_title.html') def asset_title(asset, as_link): return {'asset': asset, 'as_link': as_link} @register.inclusion_tag('assets/asset_common.html') def asset_common(user, asset, verbose): return {'user': user, 'asset': asset, 'verbose': verbose} @register.inclusion_tag('assets/asset_thumbnail.html') def asset_thumbnail(asset, as_link=True): return {'asset': asset, 'as_link': as_link}
python
# -*- coding: utf-8 -*- """ Test of the non-stationary poisson process sampling func. """ import numpy as np import simpy from forecast_ed.sampling import nspp fname = 'data/arrivals.csv' data = np.genfromtxt(fname, delimiter=',', skip_header=1) arrivals = [] def generate(env): a = nspp(data) for time in a: iat = time - env.now arrivals.append(time) print("Now: {0}; IAT: {1}; Next: {2}".format(env.now, iat, env.now+iat)) yield env.timeout(iat) run_time = 1440*5 time = 0 env = simpy.Environment() env.process(generate(env)) env.run(until=run_time) np.savetxt('data.csv', np.array(arrivals), delimiter=',')
python
from dotenv import load_dotenv import os import requests load_dotenv() import json API_URL=os.getenv("shopify_product_url") url=API_URL+'?limit=250' products=[] headers={'Content-Type': 'application/json'} r=requests.get(url,headers=headers) products=products+r.json()['products'] header_link=r.headers['Link'] header_link_arr=header_link.split(',') print(header_link_arr) while not(header_link.find('rel="next"')==-1): # if(len(header_link_arr)==2): # print(header_link_arr[0]) # print(header_link_arr[1]) # break # print(page_rel) if(len(header_link_arr)==2): page_rel=header_link_arr[1] page_rel=page_rel[page_rel.find('&')+1:] else: page_rel=header_link_arr[0] page_rel=page_rel[page_rel.find('&')+1:] next_page_rel=page_rel[page_rel.find('=')+1:page_rel.find('>')] url=API_URL+'?limit=250&page_info='+next_page_rel r=requests.get(url,headers=headers) products=products+r.json()['products'] header_link=r.headers['Link'] header_link_arr=header_link.split(',') print(header_link_arr) # if not(page_rel.find('rel="next"')==-1): # next_page_rel=page_rel[page_rel.find('=')+1:page_rel.find('>')] # print(next_page_rel) with open('products.json', 'w') as fout: json.dump(products , fout)
python
from .base_state import * from .channel_state import * from .emoji_state import * from .guild_state import * from .message_state import * from .role_state import * from .user_state import *
python
from django.urls import re_path from .views import SettingsView, UpdateSettingsView app_name = "baserow.api.settings" urlpatterns = [ re_path(r"^update/$", UpdateSettingsView.as_view(), name="update"), re_path(r"^$", SettingsView.as_view(), name="get"), ]
python
# coding: utf-8 """Test device 1.""" from . import release from .TestDevice1 import TestDevice1 from .TestDevice2 import TestDevice2 __version__ = release.__version__ __version_info__ = release.__version_info__
python
#!/usr/bin/env python # -*- coding: utf-8 -*- import socket import threading import sys import time from filesocket import filesocket '''path to temporary directory used for file sockets''' SOCKSER_DIR ='' '''SOCKS5 RFC described connection methods''' CONNECT = 1 BIND = 2 UDP_ASSOCIATE = 3 '''SOCKS5 RFC described supported address types''' IPV4 = 1 DOMAINNAME = 3 IPV6 = 4 '''ERROR messages''' CONNECT_SUCCESS = 0 ERROR_ATYPE = "[-] Client address error!" ERROR_VERSION = "[-] Client version error!" ERROR_METHOD = "[-] Client method error!" ERROR_RSV = "[-] Client Reserved byte error!" ERROR_CMD = "[-] Command not implemented by server error!" ''' Reserver byte ''' RSV = 0 ''' ''' BNDADDR = "\x00" * 4 BNDPORT = "\x00" * 2 '''SOCKS VERSION (used in initial negotiation)''' SOCKS_VERSION = 5 # ALLOWED_METHOD = [0, 2] ALLOWED_METHOD = [0] def main(): global SOCKSER_DIR if len(sys.argv) != 4: print "Usage : " print "\tpython %s [L_HOST] [L_PORT] [SOCKSER_TMP_DIRECTORY]" % (sys.argv[0]) print "Example : " print "\tpython %s 127.0.0.1 1080 /tmp/sockser/" % (sys.argv[0]) exit(1) LOCAL_HOST = sys.argv[1] LOCAL_PORT = int(sys.argv[2]) MAX_CONNECTION = 0x100 SOCKSER_DIR = sys.argv[3] if SOCKSER_DIR[-1] != '/': SOCKSER_DIR += '/' print "Sockser dir :" + SOCKSER_DIR server(LOCAL_HOST, LOCAL_PORT, MAX_CONNECTION) def server(local_host, local_port, max_connection): try: server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind((local_host, local_port)) server_socket.listen(max_connection) print '[+] Server started [%s:%d]' % (local_host, local_port) while True: local_socket, local_address = server_socket.accept() print '[+] Detect connection from [%s:%s]' % (local_address[0], local_address[1]) result = socks_selection(local_socket) if not result[0]: print "[-] socks selection error!" break result = socks_request(result[1]) if not result[0]: print "[-] socks request error!" break local_socket, remote_socket = result[1] # TODO : loop all socket to close... print "[+] Releasing resources..." local_socket.close() print "[+] Closing server..." server_socket.close() print "[+] Server shuted down!" except KeyboardInterrupt: print ' Ctl-C stop server' try: remote_socket.close() except: pass try: local_socket.close() except: pass try: server_socket.close() except: pass return def socks_selection(socket): '''Parses first request and retrieves client info (host,port,socks version and method)''' ''' retrieves client supported version number''' client_version = ord(socket.recv(1)) print "[+] client version : %d" % client_version ''' checks if client supported version is supported by server''' if not client_version == SOCKS_VERSION: socket.shutdown(socket.SHUT_RDWR) socket.close() return False, ERROR_VERSION ''' retrieves client supported connection methods''' support_method_number = ord(socket.recv(1)) print "[+] Client Supported method number : %d" % support_method_number ''' creates supported methods list''' support_methods = [] for i in range(support_method_number): method = ord(socket.recv(1)) print "[+] Client Method : %d" % method support_methods.append(method) ''' chooses method from those supported''' selected_method = None for method in ALLOWED_METHOD: if method in support_methods: selected_method = 0 ''' checks if method was chosen ''' if selected_method is None: socket.shutdown(socket.SHUT_RDWR) socket.close() return False, ERROR_METHOD ''' sends chosen method to client ''' print "[+] Server select method : %d" % selected_method response = chr(SOCKS_VERSION) + chr(selected_method) socket.send(response) ''' returns socket if everything went well''' return True, socket def socks_request(local_socket): # start SOCKS negotiation client_version = ord(local_socket.recv(1)) print "[+] client version : %d" % client_version if not client_version == SOCKS_VERSION: local_socket.shutdown(socket.SHUT_RDWR) local_socket.close() return False, ERROR_VERSION cmd = ord(local_socket.recv(1)) if cmd == CONNECT: print "[+] CONNECT request from client" rsv = ord(local_socket.recv(1)) if rsv != 0: local_socket.shutdown(socket.SHUT_RDWR) local_socket.close() return False, ERROR_RSV atype = ord(local_socket.recv(1)) if atype == IPV4: dst_address = ("".join(["%d." % (ord(i)) for i in local_socket.recv(4)]))[0:-1] print "[+] IPv4 : %s" % dst_address dst_port = ord(local_socket.recv(1)) * 0x100 + ord(local_socket.recv(1)) print "[+] Port : %s" % dst_port ''' setting up filesocket ''' remote_socket = filesocket.filesocket(socket_dir = SOCKSER_DIR) try: print "[+] Fake connecting : %s:%s" % (dst_address, dst_port) timestamp = str(int(time.time())) remote_socket.connect((dst_address, dst_port),timestamp) response = "" response += chr(SOCKS_VERSION) response += chr(CONNECT_SUCCESS) response += chr(RSV) response += chr(IPV4) response += BNDADDR response += BNDPORT local_socket.send(response) print "[+] Tunnel connected! Transferring data..." r = threading.Thread(target=transfer_in, args=( local_socket, remote_socket)) r.start() s = threading.Thread(target=transfer_out, args=( remote_socket, local_socket)) s.start() return True, (local_socket, remote_socket) except socket.error as e: print e remote_socket.shutdown(socket.SHUT_RDWR) remote_socket.close() local_socket.shutdown(socket.SHUT_RDWR) local_socket.close() elif atype == DOMAINNAME: domainname_length = ord(local_socket.recv(1)) domainname = "" for i in range(domainname_length): domainname += (local_socket.recv(1)) print "[+] Domain name : %s" % (domainname) dst_port = ord(local_socket.recv(1)) * 0x100 + ord(local_socket.recv(1)) print "[+] Port : %s" % (dst_port) # SETTING UP FILENAME instead of preparing socket remote_socket = filesocket.filesocket(socket_dir = SOCKSER_DIR) try: print "[+] Fake connecting : %s:%s" % (domainname, dst_port) timestamp = str(int(time.time())) remote_socket.connect((domainname, dst_port),timestamp) response = "" response += chr(SOCKS_VERSION) response += chr(CONNECT_SUCCESS) response += chr(RSV) response += chr(IPV4) response += BNDADDR response += BNDPORT local_socket.send(response) print "[+] Tunnel connected! Transferring data..." r = threading.Thread(target=transfer_in, args=( local_socket, remote_socket)) r.start() s = threading.Thread(target=transfer_out, args=( remote_socket, local_socket)) s.start() return (True, (local_socket, remote_socket)) except socket.error as e: print e remote_socket.shutdown(socket.SHUT_RDWR) remote_socket.close() local_socket.shutdown(socket.SHUT_RDWR) local_socket.close() elif atype == IPV6: #TODO dst_address = int(local_socket.recv(4).encode("hex"), 16) print "[+] IPv6 : %x" % (dst_address) dst_port = ord(local_socket.recv(1)) * 0x100 + ord(local_socket.recv(1)) print "[+] Port : %s" % (dst_port) # TODO IPv6 under constrution print "IPv6 support under constrution" local_socket.shutdown(socket.SHUT_RDWR) local_socket.close() return (False, ERROR_ATYPE) else: local_socket.shutdown(socket.SHUT_RDWR) local_socket.close() return (False, ERROR_ATYPE) elif cmd == BIND: # TODO print "socks5 BIND command is not supported for now." local_socket.shutdown(socket.SHUT_RDWR) local_socket.close() return (False, ERROR_CMD) elif cmd == UDP_ASSOCIATE: # TODO print "socks5 UDP_ASSOCIATE command is not supported for now." local_socket.shutdown(socket.SHUT_RDWR) local_socket.close() return (False, ERROR_CMD) else: local_socket.shutdown(socket.SHUT_RDWR) local_socket.close() return (False, ERROR_CMD) return (True, local_socket) def transfer_in(local_socket, remote_socket): ''' local_socket - local socket ''' ''' remote_socket - fileSocket ''' local_socket_name = local_socket.getpeername() local_socket_address = local_socket_name[0] local_socket_port = local_socket_name[1] remote_socket_address = remote_socket.getHostname() remote_socket_port = str(remote_socket.getPort()) print "[+] Starting transfer [%s:%s] => [%s:%s]" % (local_socket_address, local_socket_port, remote_socket_address, remote_socket_port) while True: ''' receive from local socket''' buff = local_socket.recv(0x1000) ''' if buffer not empty send to filesocket''' if buff: #remote_socket.send(handle(buff)) remote_socket.send(buff) ''' if socket broke break ''' if not buff or remote_socket.is_out_closed(): print "[-] No data received from NETWORK! Breaking filesocket and remote connection..." remote_socket.close_in() print "[+] Closing connections! [%s:%s]" % (local_socket_address, local_socket_port) local_socket.close() break print "[+] %s:%d => %s:%s [%s]" % (local_socket_address, local_socket_port, remote_socket_address, remote_socket_port, repr(buff)) print "[+] %s:%s => %s:%s => Length : [%d]" % (local_socket_address, local_socket_port, remote_socket_address, remote_socket_port, len(buff)) def transfer_out(remote_socket, local_socket): ''' Description : this function reads in all the data from the *.out file and closes it when all is read then sends data to local socket''' ''' remote_socket - the file socket ''' ''' local_socket - local socket ''' remote_socket_address = remote_socket.getHostname() remote_socket_port = remote_socket.getPort() local_socket_name = local_socket.getpeername() local_socket_address = local_socket_name[0] local_socket_port = local_socket_name[1] print "[+] Starting transfer [%s:%s] => [%s:%s]" % (remote_socket_address, remote_socket_port, local_socket_address, local_socket_port) while True: ''' receive from file socket''' buff = remote_socket.recv() ''' if buffer not empty send to local socket''' if buff: ''' NOTE : this try except block is present only in transfer_out since a socket.error occurs on send to dead socket on recv the buffer is just empty but no error is triggered ''' try: #local_socket.send(handle(buff)) local_socket.send(buff) except socket.error as e: ''' if socket is closed we close our input too ''' print "[-] socket error in transfer_out" print "[-] No data could be sent to socket" print "[-] Closing in connection on FILESOCKET " remote_socket.close_in() ''' if socket broke, break ''' if (not buff) or remote_socket.is_in_closed(): print "[-] No data received from FILESOCKET! Closing out connection on filesocket and breaking connection!" remote_socket.close_out() print "[+] Closing connection! [%s:%s]" % (local_socket, local_socket) local_socket.close() break def handle(buffer): return buffer if __name__ == "__main__": main()
python
from django.urls import path from . import views urlpatterns = [ path('friendrequest', views.send_friend_request, name="send_friend_request"), path('friendrequest/handle', views.handle_friend_request, name="handle_friend_request"), path('friendrequest/<slug:author_id>/', views.retrieve_friend_request_of_author_id, name="retrieve_friend_request_of_author_id") ]
python
import itertools import sys import os from rdkit import Chem from rdkit.Chem import rdMolTransforms, rdMolAlign import openbabel from qmconftool import QMMol def find_dihedral_idx(mol,smarts_patt): patt_mol = Chem.MolFromSmarts(smarts_patt) matches = mol.GetSubstructMatches(patt_mol) unique_match = list() match_list = list() for m in matches: if m[:3] not in match_list: unique_match.append(m) match_list.append(m[:3]) if len(unique_match) != 2: print("more than two dihedrals in " + filename) quit() return unique_match def changeAndOpt(rdkit, theta): Chem.SanitizeMol(rdkit) initconf = rdkit.GetConformer() # set outer most dihedral to 180 degrees. smarts_patt = "C-S-C-[C,Si,Ge;H0]" outer_dihedral_idx = find_dihedral_idx(rdkit, smarts_patt) for k, i, j, l in outer_dihedral_idx: rdMolTransforms.SetDihedralDeg(initconf, k,i,j,l, 180.0) # change second outmost dihedral with +-120 degrees. patt = "S-C-[C,Si,Ge;H0]-[C,Si,Ge]" dihedral_idx = find_dihedral_idx(rdkit, patt) new_angles = list() for k, i, j, l in dihedral_idx: init_dihedral_angle = rdMolTransforms.GetDihedralDeg(initconf, k,i,j,l) new_angles.append([init_dihedral_angle + x*theta for x in range(int(360./theta))]) angle_combinations = list(itertools.product(*new_angles)) # all combinations. for dihedrals in angle_combinations: for (k,i,j,l), angle in zip(dihedral_idx, dihedrals): rdMolTransforms.SetDihedralDeg(initconf, k,i,j,l, angle ) rdkit.AddConformer(initconf, assignId=True) rdMolAlign.AlignMolConformers(rdkit) mol_list = list() for idx, conf in enumerate(rdkit.GetConformers()): if idx == 0: continue sdf_txt = Chem.SDWriter.GetText(rdkit, conf.GetId()) m = Chem.MolFromMolBlock(sdf_txt, removeHs=False) conf_name = m.GetProp("_Name") + "-" + str(idx-1) m.SetProp("_Name", conf_name) mol_list.append(m) # Optimize structures with new dihedrals. confqmmol = QMMol(mol_list, fmt="mol_list", charge=0, multi=1, charged_fragments=True) confqmmol.optimize(program="xtb", method="opt", cpus=24, babelAC=True) # Write xyz files of conformers for newConf in confqmmol.GetConformers(): obConversion = openbabel.OBConversion() obConversion.SetInAndOutFormats("sdf", "xyz") newConfm = openbabel.OBMol() obConversion.ReadString(newConfm, Chem.MolToMolBlock(newConf)) new_xyz = obConversion.WriteString(newConfm) with open(newConf.GetProp("_Name") + ".xyz", 'w') as f: f.write(new_xyz) if __name__ == "__main__": mols = list() for fname in os.listdir('.'): if fname.endswith("sdf"): m = Chem.MolFromMolFile(fname, removeHs=False) m.SetProp("_Name", fname.split('.')[0]) mols.append(m) # optimize mol with xTB. qmmol = QMMol(mols, fmt="mol_list", charge=0, multi=1, charged_fragments=True) qmmol.optimize(program="xtb", method="opt", cpus=47, babelAC=True) theta_change = 120. # Change dihedrals for c in qmmol.GetConformers(): changeAndOpt(c, theta_change)
python
"""Root of podpointclient"""
python
import unittest from cpuinfo import * import helpers class MockDataSource_enforcing(object): @staticmethod def has_sestatus(): return True @staticmethod def sestatus_b(): returncode = 0 output = r''' SELinux status: enabled SELinuxfs mount: /sys/fs/selinux SELinux root directory: /etc/selinux Loaded policy name: targeted Current mode: enforcing Mode from config file: enforcing Policy MLS status: enabled Policy deny_unknown status: allowed Memory protection checking: actual (secure) Max kernel policy version: 31 ''' return returncode, output class MockDataSource_not_enforcing(object): @staticmethod def has_sestatus(): return True @staticmethod def sestatus_b(): returncode = 0 output = r''' SELinux status: enabled SELinuxfs mount: /sys/fs/selinux SELinux root directory: /etc/selinux Loaded policy name: targeted Current mode: eating Mode from config file: enforcing Policy MLS status: enabled Policy deny_unknown status: allowed Memory protection checking: actual (secure) Max kernel policy version: 31 ''' return returncode, output class MockDataSource_exec_mem_and_heap(object): @staticmethod def has_sestatus(): return True @staticmethod def sestatus_b(): returncode = 0 output = r''' allow_execheap on allow_execmem on ''' return returncode, output class MockDataSource_no_exec_mem_and_heap(object): @staticmethod def has_sestatus(): return True @staticmethod def sestatus_b(): returncode = 0 output = r''' allow_execheap off allow_execmem off ''' return returncode, output class TestSELinux(unittest.TestCase): def setUp(self): helpers.backup_data_source(cpuinfo) self.trace = Trace(False, False) def tearDown(self): helpers.restore_data_source(cpuinfo) def test_enforcing(self): helpers.monkey_patch_data_source(cpuinfo, MockDataSource_enforcing) self.assertEqual(True, cpuinfo._is_selinux_enforcing(self.trace)) def test_not_enforcing(self): helpers.monkey_patch_data_source(cpuinfo, MockDataSource_not_enforcing) self.assertEqual(False, cpuinfo._is_selinux_enforcing(self.trace)) def test_exec_mem_and_heap(self): helpers.monkey_patch_data_source(cpuinfo, MockDataSource_exec_mem_and_heap) self.assertEqual(False, cpuinfo._is_selinux_enforcing(self.trace)) def test_no_exec_mem_and_heap(self): helpers.monkey_patch_data_source(cpuinfo, MockDataSource_no_exec_mem_and_heap) self.assertEqual(True, cpuinfo._is_selinux_enforcing(self.trace))
python
from .context_processors import * from .middleware import * from .templatetags import * from .http_client import *
python