content
stringlengths
0
894k
type
stringclasses
2 values
__author__ = 'Irina.Chegodaeva'
python
from django.shortcuts import render, get_object_or_404 from .models import BlogPost def blogIndex(request): blogposts = BlogPost.objects.order_by('-pub_date') context = { 'heading':'The Blog', 'subheading':'', 'title':'Blog', 'copyright':'Pending', 'blogposts':blogposts, } return render(request,'blog-home-2.html',context) def blogDetail(request,postid): post = get_object_or_404(BlogPost, pk=postid) context = { 'post' : post, 'copyright':'Pending', } return render(request,'blog-post.html',context)
python
"""Module test_listwrapper. The MIT License Copyright 2022 Thomas Lehmann. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # pylint: disable=compare-to-zero,no-self-use from unittest import TestCase from responsive.data import make_responsive from responsive.wrapper import ListWrapper class ListWrapperTest(TestCase): """Testing class ListWrapper.""" def test_len(self): """Testing length of list.""" wrapper = ListWrapper([1, 2, 3, 4], make_responsive) self.assertEqual(len(wrapper), 4) def test_set_and_get_by_index(self): """Testing __setitem__ and __getitem__.""" data = [1, 2, 3, 4] wrapper = ListWrapper([1, 2, 3, 4], make_responsive) wrapper[2] = 9 self.assertEqual(wrapper[2], 9) self.assertEqual(data, [1, 2, 3, 4]) def test_eq(self): """Testing __eq__.""" data = [1, 2, 3, 4] wrapper = ListWrapper(data, make_responsive) self.assertEqual(wrapper, data) self.assertNotEqual(wrapper, 1234) def test_iter(self): """Testing in and not in.""" data = [1, 2, 3, 4] wrapper = ListWrapper(data, make_responsive) self.assertTrue(2 in wrapper) self.assertTrue(5 not in wrapper) self.assertEqual(list(wrapper), data)
python
import cv2 import dlib import imutils from imutils import face_utils import winsound from scipy.spatial import distance detector=dlib.get_frontal_face_detector() predict=dlib.shape_predictor("C:/Users/kushal asn/Downloads/shape_predictor_68_face_landmarks.dat") def eye_aspect_ratio(Eye): A=distance.euclidean(Eye[1],Eye[5]) B=distance.euclidean(Eye[2],Eye[4]) C=distance.euclidean(Eye[0],Eye[3]) ear=(A+B)/(2*C) return ear thresh=0.30 frame_rate=30 duration=1000 frequency=2500 (lstart,lend)=face_utils.FACIAL_LANDMARKS_IDXS["left_eye"] (rstart,rend)=face_utils.FACIAL_LANDMARKS_IDXS["right_eye"] cap=cv2.VideoCapture(0) flag=0 while(True): ret,frame=cap.read() frame=imutils.resize(frame,width=500) if ret: gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) subjects=detector(gray,0) for subject in subjects: shape=predict(gray,subject) shape=face_utils.shape_to_np(shape) leye=shape[lstart:lend] reye=shape[rstart:rend] lear=eye_aspect_ratio(leye) rear=eye_aspect_ratio(reye) lhull=cv2.convexHull(leye) rhull=cv2.convexHull(reye) ear=(lear+rear)/2 if(ear<thresh): flag+=1 print(flag) if(flag>frame_rate): winsound.Beep(frequency,duration) print("drowsy alert") else: flag=0 cv2.imshow("Frame",frame) if(cv2.waitKey(1)==ord("q")): break cv2.destroyAllWindows() cap.release()
python
import re import cltk.corpus.persian.alphabet as alphabet from cltk.corpus.arabic.alphabet import * to_reform = [ { "characters": [ HAMZA, HAMZA_BELOW, HAMZA_ABOVE, HAMZA_ISOLATED, MINI_ALEF, SMALL_ALEF, SMALL_WAW, SMALL_YEH, KASHEEDA, FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SHADDA, SUKUN, alphabet.THOUSANDS, alphabet.DECIMAL ], "to_be": "" }, { "characters": [ ALEF_MADDA, ALEF_WASLA, HAMZA_BELOW_ALEF, HAMZA_ABOVE_ALEF, ], "to_be": alphabet.ALEF }, { "characters": [ ALEF_MAKSURA, YEH, ], "to_be": alphabet.YE }, { "characters": [KAF], "to_be": alphabet.KAF }, { "characters": [ LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_HAMZA_BELOW, LAM_ALEF_MADDA_ABOVE, ], "to_be": alphabet.LAM + alphabet.ALEF }, { "characters": [TEH_MARBUTA], "to_be": alphabet.HE2 }, ] replacementDict = {} for rule in toReform: for character in rule["characters"]: replacementDict[character] = rule["to_be"] for originalForm, shapedForms in SHAPED_FORMS.items(): for form in shapedForms: replacementDict[form] = replacementDict.get(originalForm, originalForm) for i in range(10): replacementDict[EASTERN_ARABIC_NUMERALS[i]] = alphabet.NUMERALS[i] replacementDict[WESTERN_ARABIC_NUMERALS[i]] = alphabet.NUMERALS[i] # Use the commented parts for Word2Vec embeddings # replacementDict[alphabet.NUMERALS[i]] = " %s " % alphabet.NUMERALS_WRITINGS[i] # for char in '[!"#%\'()*+,-./:;<=>?@\[\]^_`{|}~’”“′‘\\\]؟؛«»،٪': # replacementDict[char] = " " # # replacementDict[" +"] = " " replacementRegex = re.compile("(%s)" % "|".join(map(re.escape, replacementDict.keys()))) def standardize(text): return replacementRegex.sub(lambda mo: replacementDict[mo.string[mo.start():mo.end()]], text)
python
from __future__ import absolute_import, division, print_function VERSION = '1.4.0' def get_version(): return VERSION __version__ = get_version() def get_changelist(): # Legacy from the perforce era, but keeping this. It's not worth breaking return "UnknownChangelist"
python
""" * Copyright 2019 TIBCO Software Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); You may not use this file except * in compliance with the License. * A copy of the License is included in the distribution package with this file. * You also may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * File name :connectionimpl.py * Created on: 5/15/2019 * Created by: suresh * * SVN Id: $Id: connectionimpl.py 3256 2019-06-10 03:31:30Z ssubrama $ * * This file encapsulates connection interfaces """ from tgdb.log import * import tgdb.log as tglog from tgdb.utils import * from tgdb.impl.atomics import * import typing import tgdb.channel as tgchannel import tgdb.impl.channelimpl as tgchannelimpl import tgdb.pdu as tgpdu import tgdb.impl.pduimpl as tgpduimpl import tgdb.connection as tgconn import tgdb.model as tgmodel import tgdb.impl.entityimpl as tgentimpl import tgdb.impl.gmdimpl as tggmdimpl import tgdb.query as tgquery import tgdb.impl.queryimpl as tgqueryimpl import tgdb.exception as tgexception import tgdb.bulkio as tgbulk import tgdb.admin as tgadm def findCommandForLang(lang: str) -> tgquery.TGQueryCommand: retCommand: tgquery.TGQueryCommand if lang == "tgql": retCommand = tgquery.TGQueryCommand.Execute elif lang == "gremlin": retCommand = tgquery.TGQueryCommand.ExecuteGremlinStr elif lang == "gbc": retCommand = tgquery.TGQueryCommand.ExecuteGremlin else: raise tgexception.TGException("Unknown property for ConnectionDefaultQueryLanguage: %s", lang) return retCommand def findCommandAndQueryString(query: str, props: tgchannel.TGProperties) -> typing.Tuple[tgquery.TGQueryCommand, str]: lang: str = props.get(ConfigName.ConnectionDefaultQueryLanguage, ConfigName.ConnectionDefaultQueryLanguage.defaultvalue) retCommand: tgquery.TGQueryCommand retStr = query try: idx: int = query.index("://") prefix = query[:idx].lower() retCommand = findCommandForLang(prefix) retStr = query[idx + 3:] except ValueError: lang = lang.lower() retCommand = findCommandForLang(lang) return retCommand, retStr class ConnectionImpl(tgconn.TGConnection): def __init__(self, url, username, password, dbName: typing.Optional[str], env): self.__url__ = url self.__username__ = username self.__password__ = password self.__props__: TGProperties = TGProperties(env) self._dbName = dbName self.__channel__: tgchannel.TGChannel = tgchannel.TGChannel.createChannel(url, username, password, dbName, self.__props__) self.__props__.update(tgchannelimpl.LinkUrl.parse(url).properties) self.__gof__: tggmdimpl.GraphObjectFactoryImpl = tggmdimpl.GraphObjectFactoryImpl(self) self.__addEntities__: typing.Dict[int, tgentimpl.AbstractEntity] = {} self.__updateEntities__: typing.Dict[int, tgentimpl.AbstractEntity] = {} self.__removeEntities__: typing.Dict[int, tgentimpl.AbstractEntity] = {} self.__requestIds__ = AtomicReference('i', 0) def _genBCRWaiter(self) -> tgchannelimpl.BlockingChannelResponseWaiter: timeout = self.__props__.get(ConfigName.ConnectionOperationTimeoutSeconds, None) if timeout is not None and isinstance(timeout, str): timeout = float(timeout) requestId = self.__requestIds__.increment() return tgchannelimpl.BlockingChannelResponseWaiter(requestId, timeout) def connect(self): tglog.gLogger.log(tglog.TGLevel.Debug, "Attempting to connect") self.__channel__.connect() tglog.gLogger.log(tglog.TGLevel.Debug, "Connected, now logging in.") self.__channel__.start() tglog.gLogger.log(tglog.TGLevel.Debug, "Logged in, now acquiring metadata.") self.__initMetadata__() tglog.gLogger.log(tglog.TGLevel.Debug, "Acquired metadata, now sending connection properties.") self.__sendConnectionProperties() tglog.gLogger.log(tglog.TGLevel.Debug, 'Connected successfully') def __initMetadata__(self): waiter = self._genBCRWaiter() request = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.MetadataRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) response = self.__channel__.send(request, waiter) if response.verbid != tgpdu.VerbId.MetadataResponse: raise tgexception.TGException('Invalid response object received') self.__gof__.graphmetadata.registry = response.typeregistry def disconnect(self): self.__channel__.disconnect() self.__channel__.stop() def commit(self): channelResponse = self._genBCRWaiter() try: if gLogger.level is TGLevel.Debug: def echoAttributes(ent: tgmodel.TGEntity): gLogger.log(TGLevel, "Entity ID: %d", ent.virtualId) attr: tgmodel.TGAttribute for attr in ent.attributes: gLogger.log(TGLevel, " Attribute: %s", attr._value) [echoAttributes(ent) for ent in self.__addEntities__.values()] [echoAttributes(ent) for ent in self.__updateEntities__.values()] [echoAttributes(ent) for ent in self.__removeEntities__.values()] request: tgpduimpl.CommitTransactionRequestMessage = tgpduimpl.TGMessageFactory.createMessage( tgpdu.VerbId.CommitTransactionRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) attrDescSet = self.graphObjectFactory.graphmetadata.attritubeDescriptors request.addCommitList(self.__addEntities__, self.__updateEntities__, self.__removeEntities__, attrDescSet) response: tgpduimpl.CommitTransactionResponseMessage = self.__channel__.send(request, channelResponse) if response.exception is not None: raise response.exception response.finishReadWith(self.__addEntities__, self.__updateEntities__, self.__removeEntities__, self.__gof__.graphmetadata.registry) for id in self.__removeEntities__: self.__removeEntities__[id].markDeleted() if gLogger.isEnabled(TGLevel.Debug): gLogger.log(TGLevel.Debug, "Transaction commit succeeded") except IOError as e: raise tgexception.TGException.buildException("IO Error", cause=e) finally: for id in self.__addEntities__: self.__addEntities__[id].resetModifiedAttributes() for id in self.__updateEntities__: self.__updateEntities__[id].resetModifiedAttributes() self.__addEntities__.clear() self.__updateEntities__.clear() self.__removeEntities__.clear() def refreshMetadata(self): self.__initMetadata__() def rollback(self): self.__addEntities__.clear() self.__updateEntities__.clear() self.__removeEntities__.clear() def __sendConnectionProperties(self): request: tgpduimpl.ConnectionPropertiesMessage = tgpduimpl.TGMessageFactory.createMessage( tgpdu.VerbId.ConnectionPropertiesMessage, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) request.props = self.__channel__.properties self.__channel__.send(request) """ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Begin Bulk Import Stuff // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// """ def startImport(self, loadopt: typing.Union[str, tgbulk.TGLoadOptions] = tgbulk.TGLoadOptions.Insert, erroropt: typing.Union[str, tgbulk.TGErrorOptions] = tgbulk.TGErrorOptions.Stop, dateformat: typing.Union[str, tgbulk.TGDateFormat] = tgbulk.TGDateFormat.YMD, props: typing.Optional[TGProperties] = None): import tgdb.impl.bulkioimpl as tgbulkimpl ret: tgbulkimpl.BulkImportImpl channelResponseWaiter = self._genBCRWaiter() request: tgpduimpl.BeginImportSessionRequest request = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.BeginImportRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) if isinstance(loadopt, str): loadopt = tgbulk.TGErrorOptions.findVal(loadopt) if loadopt == tgbulk.TGLoadOptions.Invalid: raise tgexception.TGException("Bad argument: cannot have an invalid load option!") if isinstance(erroropt, str): erroropt = tgbulk.TGErrorOptions.findVal(erroropt) if erroropt == tgbulk.TGErrorOptions.Invalid: raise tgexception.TGException("Bad argument: cannot have an invalid error option!") if isinstance(dateformat, str): dateformat = tgbulk.TGDateFormat.findVal(dateformat) if dateformat == tgbulk.TGDateFormat.Invalid: raise tgexception.TGException("Bad argument: cannot have an invalid Date-Time Format!") request.loadopt = loadopt request.erroropt = erroropt request.dtformat = dateformat response: tgpduimpl.BeginImportSessionResponse = self.__channel__.send(request, channelResponseWaiter) if response.error is not None: raise response.error ret = tgbulkimpl.BulkImportImpl(self, props) return ret def partialImportEntity(self, entType: tgmodel.TGEntityType, reqIdx: int, totReqs: int, data: str, attrList: typing.List[str]) -> typing.List[tgadm.TGImportDescriptor]: channelResponseWaiter = self._genBCRWaiter() request: tgpduimpl.PartialImportRequest = tgpduimpl.TGMessageFactory.createMessage( tgpdu.VerbId.PartialImportRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) request.type = entType request.reqIdx = reqIdx request.totalRequestsForType = totReqs request.data = data request.attrList = attrList response: tgpduimpl.PartialImportResponse = self.__channel__.send(request, channelResponseWaiter) if response.error is not None: raise response.error return response.resultList def endBulkImport(self): channelResponseWaiter = self._genBCRWaiter() request: tgpduimpl.EndBulkImportSessionRequest = tgpduimpl.TGMessageFactory.createMessage( tgpdu.VerbId.EndImportRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) response: tgpduimpl.PartialImportResponse = self.__channel__.send(request, channelResponseWaiter) return response.resultList """ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // End Bulk Import Stuff // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// """ """ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Begin Bulk Export Stuff // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// """ def startExport(self, props: typing.Optional[TGProperties] = None, zip: typing.Optional[str] = None, isBatch: bool = True): import tgdb.impl.bulkioimpl as tgbulkimpl channelResponseWaiter = self._genBCRWaiter() request: tgpduimpl.BeginExportRequest = tgpduimpl.TGMessageFactory.createMessage( tgpdu.VerbId.BeginExportRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) request.zipName = zip request.isBatch = isBatch request.maxBatchEntities = int(ConfigName.BulkIOEntityBatchSize.defaultvalue)\ if props is None or props[ConfigName.BulkIOEntityBatchSize] is None else\ int(props[ConfigName.BulkIOEntityBatchSize]) response: tgpduimpl.BeginExportResponse = self.__channel__.send(request, channelResponseWaiter) if response.error is not None: raise response.error return tgbulkimpl.BulkExportImpl(self, props, response.typeList, response.numRequests) def partialExport(self, reqNum: int) -> typing.Tuple[str, bytes, bool, int, typing.Optional[typing.Tuple[str, typing.List[str]]]]: channelResponseWaiter = self._genBCRWaiter() request: tgpduimpl.PartialExportRequest = tgpduimpl.TGMessageFactory.createMessage( tgpdu.VerbId.PartialExportRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) request.requestNum = reqNum response: tgpduimpl.PartialExportResponse = self.__channel__.send(request, channelResponseWaiter) return response.fileName, response.data, response.hasMore, response.numEntities,\ (response.typeName, response.attrList) if response.newType else None """ def startExport(self, props: Optional[TGProperties] = None) -> tgbulk.TGBulkExport: channelResponseWaiter = self.__genBCRWaiter() request = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.BeginBulkExportSessionRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) _ = self.__channel__.send(request, channelResponseWaiter) return tgbulkimpl.BulkExportImpl(self, props) def beginBatchExportEntity(self, entkind: tgmodel.TGEntityKind, enttype: tgmodel.TGEntityType, batchSize: int) \ -> Tuple[int, List[str]]: channelResponseWaiter = self.__genBCRWaiter() request: tgpduimpl.BeginBatchExportEntityRequest = tgpduimpl.TGMessageFactory.createMessage( tgpdu.VerbId.BeginBatchExportEntityRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) request.entKind = entkind request.entType = enttype request.batchSize = batchSize response: tgpduimpl.BeginBatchExportEntityResponse = self.__channel__.send(request, channelResponseWaiter) return response.descriptor, response.columnLabels def singleBatchExportEntity(self, desc: int) -> Tuple[int, str, bool]: channelResponseWaiter = self.__genBCRWaiter() request: tgpduimpl.SingleBatchExportEntityRequest = tgpduimpl.TGMessageFactory.createMessage( tgpdu.VerbId.SingleBatchExportEntityRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) request.descriptor = desc response: tgpduimpl.SingleBatchExportEntityResponse = self.__channel__.send(request, channelResponseWaiter) return response.numEnts, response.data, response.hasMore def endBulkExportSession(self): channelResponseWaiter = self.__genBCRWaiter() request: tgpduimpl.EndBulkExportSessionRequest = tgpduimpl.TGMessageFactory.createMessage( tgpdu.VerbId.EndBulkExportSessionRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) _ = self.__channel__.send(request, channelResponseWaiter) """ """ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // End Bulk Export Stuff // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// """ def getEntity(self, key: tgmodel.TGKey, option: tgquery.TGQueryOption = tgquery.DefaultQueryOption) ->\ tgmodel.TGEntity: channelResponseWaiter = self._genBCRWaiter() requestMessage: tgpduimpl.GetEntityRequestMessage retV: tgmodel.TGEntity = None try: requestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.GetEntityRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) requestMessage.command = tgpduimpl.GetEntityCommand.GetEntity requestMessage.key = key response: tgpduimpl.GetEntityResponseMessage = self.__channel__.send(requestMessage, channelResponseWaiter) if response.hasResult: response.finishReadWith(self.graphObjectFactory) fetchedEntities = response.fetchedEntities for id in fetchedEntities: fetchedEnt: tgmodel.TGEntity = fetchedEntities[id] if key.matches(fetchedEnt): retV = fetchedEnt break finally: pass return retV def insertEntity(self, entity: tgmodel.TGEntity): if not entity.isNew: raise tgexception.TGException("Should only be calling insertEntity on a new entity!") if entity.virtualId not in self.__removeEntities__: self.__addEntities__[entity.virtualId] = entity self.__updateEdge__(entity) if gLogger.isEnabled(TGLevel.Debug): gLogger.log(TGLevel.Debug, 'Insert entity called') def updateEntity(self, entity: tgmodel.TGEntity): if entity.isNew: raise tgexception.TGException('Should not be calling update on a new entity!') if entity.isDeleted: raise tgexception.TGException('Should not be calling update on an already deleted entity!') if entity.virtualId not in self.__removeEntities__: self.__updateEntities__[entity.virtualId] = entity self.__updateEdge__(entity) def __updateEdge__(self, entity: tgmodel.TGEntity): if isinstance(entity, tgentimpl.EdgeImpl): edge: tgmodel.TGEdge = entity fr, to = edge.vertices if not fr.isNew and fr.virtualId not in self.__removeEntities__: self.__updateEntities__[fr.virtualId] = fr if not to.isNew and to.virtualId not in self.__removeEntities__: self.__updateEntities__[to.virtualId] = to def deleteEntity(self, entity: tgentimpl.AbstractEntity): if entity.isDeleted: raise tgexception.TGException('Should not be calling delete on an already deleted entity!') # Remove any entities added to the add changelist if entity.virtualId in self.__addEntities__: del self.__addEntities__[entity.virtualId] # Remove any entities added to the update changelist if entity.virtualId in self.__updateEntities__: del self.__updateEntities__[entity.virtualId] if entity.isNew: entity.markDeleted() else: self.__removeEntities__[entity.virtualId] = entity self.__updateEdge__(entity) def createQuery(self, query: str) -> tgquery.TGQuery: channelResponseWaiter: tgchannel.TGChannelResponseWaiter result: int ret: tgquery.TGQuery = None channelResponseWaiter = self._genBCRWaiter() try: request: tgpduimpl.QueryRequestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.QueryRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) request.command = tgquery.TGQueryCommand.Create request.query = query response: tgpduimpl.QueryResponseMessage = self.__channel__.send(request, channelResponseWaiter) gLogger.log(TGLevel.Debug, "Send query completed") result: int = response.result queryHashId: int = response.queryHashId if result == 0 and queryHashId > 0: #TODO Create error reporting for query result. ret = tgqueryimpl.QueryImpl(self, queryHashId) finally: pass return ret def executeQuery(self, query: typing.Optional[str] = None, option: tgquery.TGQueryOption = tgquery.DefaultQueryOption) -> tgquery.TGResultSet: if query is None: try: query = option.queryExpr except KeyError as e: raise tgexception.TGException("Need to specify a query string!", cause=e) channelResponseWaiter: tgchannel.TGChannelResponseWaiter = self._genBCRWaiter() result: int try: request: tgpduimpl.QueryRequestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.QueryRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) request.option = option request.command, request.query = findCommandAndQueryString(query, self.__props__) response: tgpduimpl.QueryResponseMessage = self.__channel__.send(request, channelResponseWaiter) if response.error is not None: raise response.error return response.finishReadWith(request.command, self.__gof__) except (Exception, tgexception.TGException): raise # TODO implement some form of compiled queries def executeQueryWithId(self, queryId: int, option: tgquery.TGQueryOption = tgquery.DefaultQueryOption) -> \ tgquery.TGResultSet: result: int channelResponseWaiter: tgchannel.TGChannelResponseWaiter = self._genBCRWaiter() try: request: tgpduimpl.QueryRequestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.QueryRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) request.command = tgquery.TGQueryCommand.ExecuteID request.queryHashId = queryId request.option = option response: tgpduimpl.QueryResponseMessage = self.__channel__.send(request, channelResponseWaiter) return response.finishReadWith(tgquery.TGQueryCommand.ExecuteID, self.__gof__) except Exception as e: raise tgexception.TGException("Exception in executeQueryWithId", cause=e) def closeQuery(self, queryId: int): channelResponseWaiter: tgchannel.TGChannelResponseWaiter = self._genBCRWaiter() try: request: tgpduimpl.QueryRequestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.QueryRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) request.command = tgquery.TGQueryCommand.Close request.queryHashId = queryId _: tgpduimpl.QueryResponseMessage = self.__channel__.send(request, channelResponseWaiter) # TODO check response state gLogger.log(TGLevel.Debug, "Send close query completed") except Exception as e: raise tgexception.TGException("Exception in closeQuery", cause=e) def getLargeObjectAsBytes(self, entityId: int, encrypted: bool = False) -> bytes: channelResponseWaiter = self._genBCRWaiter() if encrypted: # TODO Decrypt encrypted entities raise tgexception.TGProtocolNotSupported("Blob/Clob encryption/decryption not implemented.") request: tgpduimpl.GetLargeObjectRequestMessage = tgpduimpl.TGMessageFactory.createMessage( tgpdu.VerbId.GetLargeObjectRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid) request.entityId = entityId request.decrypt = encrypted response: tgpduimpl.GetLargeObjectResponseMessage = self.__channel__.send(request, channelResponseWaiter) if entityId != response.entityId: raise tgexception.TGException("Server responded with different entityId than expected!") data = bytes() if response.data is None else response.data return data @property def linkState(self) -> tgchannel.LinkState: return self.__channel__.linkstate @property def outboxaddr(self) -> str: return self.__channel__.outboxaddr @property def connectedUsername(self) -> str: return self.__username__ @property def graphMetadata(self) -> tgmodel.TGGraphMetadata: return self.__gof__.graphmetadata @property def graphObjectFactory(self) -> tgmodel.TGGraphObjectFactory: return self.__gof__
python
import base64 import requests import uuid import time class MGTV: def __init__(self, url): self.url = url def get_video_id(self): return self.url.split("/", 5)[-1].split(".")[0] def get_pm2(self): did = "e6e13014-393b-43e7-b6be-2323e4960939" suuid = uuid.uuid4() pno = "1030" # tk2 = self.encode_tk2(did, pno) params = { "did": did, "suuid": suuid, "cxid": "", "tk2": self.encode_tk2(did, pno), "type": "pch5", "video_id": self.get_video_id(), "_support": "10000000", "auth_mode": "1", "src": "", "abroad": "", } res = requests.get("https://pcweb.api.mgtv.com/player/video", params=params).json() return res['data']['atc']['pm2'] def encode_tk2(self, did="e6e13014-393b-43e7-b6be-2323e4960939", pno="1030"): tk2 = bytes(f"did={did}|pno={pno}|ver=0.3.0301|clit={int(time.time())}".encode()) tk2 = base64.b64encode(tk2).decode().replace("/\+/g", "_").replace("/\//g", "~").replace("/=/g", "-") tk2 = list(' '.join(tk2).split()) tk2.reverse() return "".join(tk2) def start(self): params = { "_support": "10000000", "tk2": self.encode_tk2(), "pm2": self.get_pm2(), "video_id": self.get_video_id(), "type": "pch5", "auth_mode": "1", "src": "", "abroad": "", } res = requests.get("https://pcweb.api.mgtv.com/player/getSource", params=params).json() print(res) return res if __name__ == '__main__': MGTV().start()
python
import copy import pickle import torch import types from . import layers from . import rules Rules = rules.Rules def flatten_model(module): ''' flatten modul to base operation like Conv2, Linear, ... ''' modules_list = [] for m_1 in module.children(): if len(list(m_1.children())) == 0: modules_list.append(m_1) else: modules_list = modules_list + flatten_model(m_1) return modules_list def copy_module(module): ''' sometimes copy.deepcopy() does not work ''' module = copy.deepcopy(pickle.loads(pickle.dumps(module))) module._forward_hooks.popitem() # remove hooks from module copy module._backward_hooks.popitem() # remove hooks from module copy return module def redefine_nn(model, rule, input_lowest, input_highest): ''' go over model layers and overload chosen instance methods (e.g. forward()). New methods come from classes of layers module ''' rule_func = Rules(rule) list_of_layers = dir(layers) #list of redefined layers in layers module for num, module in enumerate(flatten_model(model)): if module.__class__.__name__ in list_of_layers: local_class = module.__class__ #current layer class layer_module_class = layers.__getattr__(local_class.__name__) # get same redefined layer class list_of_methods = [attr for attr in dir(layer_module_class) if attr[:2] != '__'] #methods which was redefined for l in list_of_methods: #overload object method from https://stackoverflow.com/questions/394770/override-a-method-at-instance-level setattr(module, l, types.MethodType(getattr(layer_module_class, l), module)) #set redefined methods to object if num == 0: setattr(module, 'rule_func', Rules('z_box_no_bias', lowest=input_lowest, highest=input_highest)) #first layer always z_box else: setattr(module, 'rule_func', rule_func) return model
python
import pytest import os from tddc import common def test_get_base_filename(): assert common.get_base_filename('/Users/foo/bar.txt') == 'bar' assert common.get_base_filename('bar.txt') == 'bar' assert common.get_base_filename('bar') == 'bar' assert common.get_base_filename('bar.txt.gz') == 'bar.txt' def test_write_summary(tmpdir): summary_data = {'a': 1, 'b': {'c': 2, 'd': 3}, 'e': [1, 2, 3]} filename = common.write_summary(summary_data, tmpdir.strpath, 'foo', 'bar') assert os.path.basename(filename) == 'foo_barsummary.json' summary_data_from_file = common.read_json_file(filename) assert summary_data_from_file == summary_data def test_file_exists_or_exit(): with pytest.raises(SystemExit) as exception_info: common.file_exists_or_exit('foo.bar.baz') assert exception_info.value.code == 1 assert common.file_exists_or_exit(__file__) is None
python
# compare gene numbers in different samples import pandas as pd import scanpy as sc import matplotlib.pyplot as plt import seaborn as sns from pathlib import Path from scipy.stats import ttest_ind import scipy.stats as stats import scikit_posthocs as sp #-------------------variable-------------------------------- fmt='png' fd_in='./out/a00_preprocess_00_pp' fd_out='./out/a01_plot-pp_00_compare' l_sample=['Ctrl', 'MethFix', 'RNAlater'] dic_cmap={'Ctrl': '#4287f5', 'MethFix': '#f5a142', 'RNAlater': '#4bf542'} #--------------------setup--------------------------------- Path(fd_out).mkdir(exist_ok=True, parents=True) #--------------------function---------------------------- def get_gene_cnt(prefix, l_sample=l_sample, fd_in=fd_in): '''count genes in each adata, and concat count dfs ''' #1. add gene count df to list l_df=[] for sample in l_sample: adata=sc.read(f'{fd_in}/{prefix}_{sample}.h5ad') sc.pp.filter_cells(adata, min_genes=0) #this will count genes in each cell l_df.append(adata.obs) #2. concat df df=pd.concat(l_df) return df def plot_gene(df, f_out, title, dic_cmap=dic_cmap, ylim=None): #1. plot sns.set() fig, ax=plt.subplots(figsize=(8, 5)) ax=sns.violinplot(x='sample', y='n_genes', data=df, hue='sample', linewidth=0.5, width=1.5, palette=dic_cmap) #2. adjust ax.set_title(title, fontsize=20, pad=15, weight='medium') plt.xlabel('') plt.ylabel('Gene Number', fontsize=22, labelpad=15, weight='medium') plt.xticks([-0.5, 1, 2.5], fontsize=22, rotation=0, va='center') ax.tick_params(axis='x', which='major', pad=15) plt.xlim([-1, 3]) plt.ylim(ylim) ax.get_legend().remove() #3. save plt.tight_layout() plt.savefig(f_out, dpi=300) plt.close() return ############################################################################ #----------------------raw data------------------------------ prefix='raw' #1. count df df=get_gene_cnt(prefix) ##2. plot #f_out=f'{fd_out}/{prefix}_gene.{fmt}' #title=f'Gene Numbers ({prefix.capitalize()})' #plot_gene(df, f_out, title, ylim=[-1000, 16000]) ##3. calculate p value #ctrl=df.loc[df['sample']=='Ctrl']['n_genes'] #meth=df.loc[df['sample']=='MethFix']['n_genes'] #later=df.loc[df['sample']=='RNAlater']['n_genes'] #t1, p1=ttest_ind(ctrl, meth) #print(p1) #0 #t2, p2=ttest_ind(ctrl, later) #print(p2) #0 #t3, p3=ttest_ind(meth, later) #print(p3) #0.03836 #---------------------anova------------------------------------ ##1. get data #l_ctrl=df.loc[df['sample']=='Ctrl', ['n_genes']]['n_genes'].tolist() #l_meth=df.loc[df['sample']=='MethFix', ['n_genes']]['n_genes'].tolist() #l_later=df.loc[df['sample']=='RNAlater', ['n_genes']]['n_genes'].tolist() #l_all=[l_ctrl, l_meth, l_later] ##2. avova #fvalue, pvalue=stats.f_oneway(l_ctrl, l_meth, l_later) #print(fvalue, pvalue) #4148.3173795985 0.0 ##3. post hoc ttest #p=sp.posthoc_conover(l_all, p_adjust='holm') #print(p) ''' 1 2 3 1 -1.0 0.000000 0.000000 2 0.0 -1.000000 0.880754 3 0.0 0.880754 -1.000000 ''' ######################################################################### ##----------------------cleaned data------------------------------ #prefix='clean' ##1. count df #df=get_gene_cnt(prefix) ##2. plot #f_out=f'{fd_out}/{prefix}_gene.{fmt}' #title=f'Gene Numbers ({prefix.capitalize()})' #plot_gene(df, f_out, title, ylim=[0, 4500]) ##3. calculate p value #ctrl=df.loc[df['sample']=='Ctrl']['n_genes'] #meth=df.loc[df['sample']=='MethFix']['n_genes'] #later=df.loc[df['sample']=='RNAlater']['n_genes'] #t1, p1=ttest_ind(ctrl, meth) #print(p1) #0 #t2, p2=ttest_ind(ctrl, later) #print(p2) #0 #t3, p3=ttest_ind(meth, later) #print(p3) #6.629720921642305e-78
python
# # Este arquivo é parte do programa multi_agenda # # Esta obra está licenciada com uma # Licença Creative Commons Atribuição 4.0 Internacional. # (CC BY 4.0 Internacional) # # Para ver uma cópia da licença, visite # https://creativecommons.org/licenses/by/4.0/legalcode # # WELLINGTON SAMPAIO - wsampaio@yahoo.com # https://www.linkedin.com/in/wellsampaio/ # import objetos.financeiro.TipoConta as TipoConta import objetos.dbConn.CRUD as CRUD class TipoContaDAO(CRUD.CRUD): __sqlInsert = "" __sqlUpdate = "" def __init__(self): schema = "financeiro" tabela = "tiposContas" pk = "codTipoConta" super().__init__(schema, tabela, pk) self.__sqlInsert = super().strINSERT() self.__sqlUpdate = super().strUPDATE() # ==================================== CRUD ==================================== # ============================================================================== def insert(self, tipoConta): self.setStatement(tipoConta, self.__sqlInsert) def select(self, pk): obj = TipoConta.TipoConta() super().setSelect(pk, obj) return obj def update(self, tipoConta): self.setStatement(tipoConta, self.__sqlUpdate) def setStatement2(self, obj): getPk = getattr(obj, "get" + self.__pk[:1].upper() + self.__pk[1:] ) return getPk() # ==================================== CRUD ==================================== # ============================================================================== def getLista(self): sql = \ """ SELECT * FROM tiposContas ORDER BY codTipoConta ; """ return super().getList(sql) def listaPrincipais(self): sql = \ """ SELECT * FROM tiposContas WHERE tipoContaAtivo = 1 ORDER BY TipoConta ; """ return super().getList(sql) def naoListadasNoPeriodo(self, dtRef): sql = \ """ SELECT * FROM tiposContas WHERE codTipoConta NOT IN( SELECT codTipoConta FROM contas WHERE strftime('%Y-%m',dtVencimento) = '{}' ) AND tipoContaAtivo = 1 ORDER BY tipoConta ; """.format(dtRef) return super().getList(sql) def contaOcorrenciasPelaReferencia(self, codTipoConta, dtRef, tipoRef): if tipoRef == "pgto": tipoRef = "receita.mesReferencia" #elif tipoRef = "venc": else: tipoRef = "contas.dtVencimento" sql = \ """ SELECT COUNT(codConta) FROM contas LEFT JOIN receita ON contas.codReceitaPagadora = receita.codReceita WHERE strftime("%Y-%m", {}) = '{}' AND codTipoConta = {}; """.format(tipoRef, dtRef, codTipoConta) return "{0:.0f}".format(super().getValue(sql, 0.0)) def listaCmb(self): sql = \ """ SELECT * FROM tiposContas ORDER BY tipoConta ; """ return super().getList(sql)
python
# Generated by Django 2.2.16 on 2020-09-21 16:18 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('contest', '0001_initial'), ('lecture', '0001_initial'), ] operations = [ migrations.AddField( model_name='signup_class', name='contest', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contest.Contest'), ), ]
python
import torch.nn as nn import torch.nn.functional as F from torch import cat, stack, sqrt class MLPNetwork(nn.Module): """ MLP network (can be used as value or policy) """ def __init__(self, input_dim, out_dim, hidden_dim=64, nonlin=F.relu, constrain_out=False, norm_in=True, discrete_action=True): """ Inputs: input_dim (int): Number of dimensions in input out_dim (int): Number of dimensions in output hidden_dim (int): Number of hidden dimensions nonlin (PyTorch function): Nonlinearity to apply to hidden layers """ super(MLPNetwork, self).__init__() if norm_in: # normalize inputs self.in_fn = nn.BatchNorm1d(input_dim) self.in_fn.weight.data.fill_(1) self.in_fn.bias.data.fill_(0) else: self.in_fn = lambda x: x self.fc1 = nn.Linear(input_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, hidden_dim) self.fc3 = nn.Linear(hidden_dim, out_dim) self.nonlin = nonlin if constrain_out and not discrete_action: # initialize small to prevent saturation self.fc3.weight.data.uniform_(-3e-3, 3e-3) self.out_fn = F.tanh else: # logits for discrete action (will softmax later) self.out_fn = lambda x: x def forward(self, X): """ Inputs: X (PyTorch Matrix): Batch of observations Outputs: out (PyTorch Matrix): Output of network (actions, values, etc) """ h1 = self.nonlin(self.fc1(self.in_fn(X))) h2 = self.nonlin(self.fc2(h1)) out = self.out_fn(self.fc3(h2)) return out class ConvMLPNetwork(nn.Module): """ Conv + MLP network (can be used as value or policy) """ def __init__(self, input_dim, out_dim): """ Inputs: input_dim (int): Number of dimensions in input out_dim (int): Number of dimensions in output hidden_dim (int): Number of hidden dimensions nonlin (PyTorch function): Nonlinearity to apply to hidden layers """ super(ConvMLPNetwork, self).__init__() self.in_fn = nn.BatchNorm2d(3) self.in_fn.weight.data.fill_(1) self.in_fn.bias.data.fill_(0) # Define image embedding self.image_conv = nn.Sequential( nn.Conv2d(3, 16, (2, 2)), nn.ReLU(), nn.MaxPool2d((2, 2)), nn.Conv2d(16, 32, (2, 2)), nn.ReLU(), nn.Conv2d(32, 64, (2, 2)), nn.ReLU() ) self.mlpnet = nn.Sequential( nn.Linear(input_dim, 64), nn.Tanh(), nn.Linear(64, out_dim) ) self.apply(init_params) def forward(self, obss, actions=None, critic=False, debug=False): """ Inputs: X (PyTorch Matrix): Batch of observations Outputs: out (PyTorch Matrix): Output of network (actions, values, etc) """ if not critic: x = obss if debug: print('----------') print('obss:') print(x) if len(x.shape) < 4: x = x.unsqueeze(0).transpose(1, 3).transpose(2, 3) else: x = x.transpose(1, 3).transpose(2, 3) x = self.in_fn(x) x = self.image_conv(x) x = x.reshape(x.shape[0], -1) if debug: print('----------') print('conv out:') print(x) out = self.mlpnet(x) if debug: print('----------') print('mlp out:') print(out) return out else: x = stack(obss) num_agents = x.shape[0] num_batches = x.shape[1] x = x.reshape(-1, *x.shape[-3:]) x = x.transpose(1, 3).transpose(2, 3) x = self.in_fn(x) x = self.image_conv(x) x = x.reshape(x.shape[0], -1) x = x.reshape(num_agents, num_batches, x.shape[1]) act = stack(actions) concat = cat((*x, *act), dim=1) out = self.mlpnet(concat) return out # Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py def init_params(m): classname = m.__class__.__name__ if classname.find("Linear") != -1: m.weight.data.normal_(0, 1) m.weight.data *= 1 / sqrt(m.weight.data.pow(2).sum(1, keepdim=True)) if m.bias is not None: m.bias.data.fill_(0)
python
import re from datetime import date, datetime, timezone from urllib.parse import urljoin, urlparse import pytest from swpt_debtors import procedures as p from swpt_debtors import models as m @pytest.fixture(scope='function') def client(app, db_session): return app.test_client() @pytest.fixture(scope='function') def debtor(db_session): debtor = m.Debtor(debtor_id=123, status_flags=0) debtor.activate() db_session.add(debtor) db_session.commit() return p.get_debtor(123) def _get_all_pages(client, url, page_type, streaming=False): r = client.get(url) assert r.status_code == 200 data = r.get_json() assert data['type'] == page_type assert urlparse(data['uri']) == urlparse(url) if streaming: assert 'next' in data or 'forthcoming' in data assert 'next' not in data or 'forthcoming' not in data else: assert 'forthcoming' not in data items = data['items'] assert isinstance(items, list) if 'next' in data: items.extend(_get_all_pages(client, urljoin(url, data['next']), page_type, streaming)) return items def test_auto_genereate_debtor_id(client): r = client.post('/debtors/.debtor-reserve', json={}) assert r.status_code == 200 data = r.get_json() assert data['type'] == 'DebtorReservation' assert isinstance(data['debtorId'], str) assert isinstance(data['reservationId'], int) assert datetime.fromisoformat(data['validUntil']) assert datetime.fromisoformat(data['createdAt']) def test_create_debtor(client): r = client.get('/debtors/4294967296/') assert r.status_code == 403 r = client.post('/debtors/4294967296/reserve', headers={'X-Swpt-User-Id': 'INVALID_USER_ID'}, json={}) assert r.status_code == 403 r = client.post('/debtors/2/reserve', headers={'X-Swpt-User-Id': 'debtors:4294967296'}, json={}) assert r.status_code == 403 r = client.post('/debtors/4294967296/reserve', json={}) assert r.status_code == 200 data = r.get_json() assert data['type'] == 'DebtorReservation' assert data['debtorId'] == '4294967296' assert isinstance(data['reservationId'], int) assert datetime.fromisoformat(data['validUntil']) assert datetime.fromisoformat(data['createdAt']) reservation_id = data['reservationId'] r = client.post('/debtors/4294967296/reserve', json={}) assert r.status_code == 409 r = client.get('/debtors/4294967296/') assert r.status_code == 403 r = client.post('/debtors/4294967296/activate', json={ 'reservationId': 123, }) assert r.status_code == 422 assert 'reservationId' in r.get_json()['errors']['json'] r = client.post('/debtors/4294967296/activate', json={ 'reservationId': reservation_id, }) assert r.status_code == 200 data = r.get_json() assert data['type'] == 'Debtor' assert data['uri'] == '/debtors/4294967296/' assert data['identity'] == {'type': 'DebtorIdentity', 'uri': 'swpt:4294967296'} assert data['transfersList'] == {'uri': '/debtors/4294967296/transfers/'} assert data['createTransfer'] == {'uri': '/debtors/4294967296/transfers/'} assert datetime.fromisoformat(data['createdAt']) r = client.post('/debtors/4294967296/activate', json={ 'reservationId': reservation_id, }) assert r.status_code == 200 r = client.post('/debtors/8589934591/activate', json={ 'reservationId': 123, }) assert r.status_code == 422 assert 'reservationId' in r.get_json()['errors']['json'] r = client.post('/debtors/8589934591/activate', json={}) assert r.status_code == 200 data = r.get_json() assert data['type'] == 'Debtor' assert data['uri'] == '/debtors/8589934591/' assert data['balance'] == 0 assert datetime.fromisoformat(data['createdAt']) assert 'info' not in data r = client.post('/debtors/8589934591/activate', json={}) assert r.status_code == 409 r = client.get('/debtors/4294967296/') assert r.status_code == 200 data = r.get_json() assert data['type'] == 'Debtor' assert data['uri'] == '/debtors/4294967296/' assert data['balance'] == 0 assert datetime.fromisoformat(data['createdAt']) r = client.get('/debtors/8589934591/') assert r.status_code == 200 r = client.post('/debtors/8589934591/deactivate', headers={'X-Swpt-User-Id': 'debtors:8589934591'}, json={}) assert r.status_code == 403 r = client.post('/debtors/8589934591/deactivate', headers={'X-Swpt-User-Id': 'debtors-supervisor'}, json={}) assert r.status_code == 403 r = client.post('/debtors/8589934591/deactivate', headers={'X-Swpt-User-Id': 'debtors-superuser'}, json={}) assert r.status_code == 204 r = client.post('/debtors/8589934591/deactivate', json={}) assert r.status_code == 204 r = client.get('/debtors/8589934591/') assert r.status_code == 403 r = client.post('/debtors/8589934591/deactivate', json={}) assert r.status_code == 204 def test_get_debtors_list(client): r = client.post('/debtors/4294967296/reserve', json={}) assert r.status_code == 200 r = client.post('/debtors/4294967297/activate', json={}) assert r.status_code == 200 r = client.post('/debtors/4294967298/activate', json={}) assert r.status_code == 200 r = client.post('/debtors/8589934591/activate', json={}) assert r.status_code == 200 r = client.get('/debtors/.list') assert r.status_code == 200 data = r.get_json() assert data['type'] == 'DebtorsList' assert data['uri'] == '/debtors/.list' assert data['itemsType'] == 'ObjectReference' assert data['first'] == '/debtors/9223372036854775808/enumerate' entries = _get_all_pages(client, data['first'], page_type='ObjectReferencesPage') assert entries == [ {'uri': '/debtors/4294967297/'}, {'uri': '/debtors/4294967298/'}, {'uri': '/debtors/8589934591/'}, ] def test_get_debtor(client, debtor): r = client.get('/debtors/123/') assert r.status_code == 200 data = r.get_json() assert data['type'] == 'Debtor' assert data['uri'] == '/debtors/123/' assert data['config'] == { 'type': 'DebtorConfig', 'uri': '/debtors/123/config', 'configData': '', 'latestUpdateId': 1, 'latestUpdateAt': '1970-01-01T00:00:00+00:00', 'debtor': {'uri': '/debtors/123/'}, } assert data['transfersList'] == {'uri': '/debtors/123/transfers/'} assert data['createTransfer'] == {'uri': '/debtors/123/transfers/'} assert data['balance'] == 0 assert datetime.fromisoformat(data['createdAt']) assert data['identity'] == {'type': 'DebtorIdentity', 'uri': 'swpt:123'} assert data['noteMaxBytes'] == 0 assert 'configError' not in data assert 'account' not in data def test_change_debtor_config(client, debtor): r = client.get('/debtors/123/config') assert r.status_code == 200 data = r.get_json() assert data['type'] == 'DebtorConfig' assert data['uri'] == '/debtors/123/config' assert data['configData'] == '' assert data['latestUpdateId'] == 1 latest_update_at = data['latestUpdateAt'] assert datetime.fromisoformat(latest_update_at) assert data['debtor'] == {'uri': '/debtors/123/'} request = { 'configData': 'TEST', 'latestUpdateId': 2 } r = client.patch('/debtors/123/config', json=request) assert r.status_code == 200 r = client.get('/debtors/123/config') assert r.status_code == 200 data = r.get_json() assert data['type'] == 'DebtorConfig' assert data['uri'] == '/debtors/123/config' assert data['configData'] == 'TEST' assert data['latestUpdateId'] == 2 assert datetime.fromisoformat(data['latestUpdateAt']) assert latest_update_at != data['latestUpdateAt'] assert data['debtor'] == {'uri': '/debtors/123/'} empty_request = { 'configData': '', 'latestUpdateId': 2, } r = client.patch('/debtors/666/config', json=empty_request) assert r.status_code == 404 r = client.patch('/debtors/123/config', json=empty_request) assert r.status_code == 409 data = r.get_json() for _ in range(9): r = client.patch('/debtors/123/config', json=request) assert r.status_code == 200 r = client.patch('/debtors/123/config', json=request) assert r.status_code == 403 def test_initiate_running_transfer(client, debtor): r = client.get('/debtors/666/transfers/') assert r.status_code == 404 r = client.get('/debtors/123/transfers/') assert r.status_code == 200 data = r.get_json() assert data['debtor'] == {'uri': '/debtors/123/'} assert data['type'] == 'TransfersList' assert data['uri'] == '/debtors/123/transfers/' assert data['items'] == [] json_request_body = { 'amount': 1000, 'noteFormat': 'fmt', 'note': 'test', 'recipient': {'uri': 'swpt:123/1111'}, 'transferUuid': '123e4567-e89b-12d3-a456-426655440000', } r = client.post('/debtors/123/transfers/', json=json_request_body) assert r.status_code == 201 data = r.get_json() assert data['amount'] == 1000 assert datetime.fromisoformat(data['initiatedAt']) assert 'result' not in data assert data['recipient'] == {'type': 'AccountIdentity', 'uri': 'swpt:123/1111'} assert data['type'] == 'Transfer' assert data['uri'] == '/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000' assert data['noteFormat'] == 'fmt' assert data['note'] == 'test' assert data['transfersList'] == {'uri': '/debtors/123/transfers/'} assert r.headers['Location'] == 'http://example.com/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000' r = client.get('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000') assert r.status_code == 200 data = r.get_json() assert data['type'] == 'Transfer' assert data['uri'] == '/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000' assert data['amount'] == 1000 r = client.post('/debtors/123/transfers/', json=json_request_body) assert r.status_code == 303 assert r.headers['Location'] == 'http://example.com/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000' json_request_body['amount'] += 1 r = client.post('/debtors/123/transfers/', json=json_request_body) assert r.status_code == 409 r = client.post('/debtors/123/transfers/', json={**json_request_body, **{'recipient': {'uri': 'INVALID'}}}) assert r.status_code == 422 r = client.post('/debtors/123/transfers/', json={**json_request_body, **{'recipient': {'uri': 'swpt:555/1111'}}}) assert r.status_code == 422 r = client.post('/debtors/555/transfers/', json={**json_request_body, **{'recipient': {'uri': 'swpt:555/1111'}}}) assert r.status_code == 404 r = client.get('/debtors/123/transfers/') assert r.status_code == 200 data = r.get_json() assert sorted(data['items']) == [ {'uri': '123e4567-e89b-12d3-a456-426655440000'}, ] r = client.delete('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440001') assert r.status_code == 204 r = client.get('/debtors/123/transfers/') assert r.status_code == 200 data = r.get_json() assert sorted(data['items']) == [ {'uri': '123e4567-e89b-12d3-a456-426655440000'}, ] for i in range(2, 12): suffix = '{:0>4}'.format(i) json_request_body = { 'amount': 1, 'recipient': {'uri': 'swpt:123/1111'}, 'transferUuid': f'123e4567-e89b-12d3-a456-42665544{suffix}', } r = client.post('/debtors/123/transfers/', json=json_request_body) if i == 11: assert r.status_code == 403 else: assert r.status_code == 201 def test_cancel_running_transfer(client, debtor): json_request_body = { 'amount': 1000, 'note': 'test', 'recipient': {'uri': 'swpt:123/1111'}, 'transferUuid': '123e4567-e89b-12d3-a456-426655440000', } r = client.post('/debtors/123/transfers/', json=json_request_body) assert r.status_code == 201 data = r.get_json() assert 'result' not in data r = client.post('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440001', json={}) assert r.status_code == 404 r = client.post('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000', json={}) assert r.status_code == 200 r = client.get('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000') assert r.status_code == 200 data = r.get_json() result = data['result'] error = result['error'] assert error['errorCode'] == 'CANCELED_BY_THE_SENDER' r = client.post('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000', json={}) assert r.status_code == 200 def test_unauthorized_debtor_id(debtor, client): json_request_body = { 'type': 'DebtorConfig', 'configData': '', 'latestUpdateId': 2, } r = client.get('/debtors/123/') assert r.status_code == 200 r = client.get('/debtors/123/', headers={'X-Swpt-User-Id': 'INVALID_USER_ID'}) assert r.status_code == 403 r = client.patch('/debtors/123/config', json=json_request_body, headers={'X-Swpt-User-Id': 'debtors-supervisor'}) assert r.status_code == 403 r = client.patch('/debtors/123/config', json=json_request_body, headers={'X-Swpt-User-Id': 'debtors:666'}) assert r.status_code == 403 r = client.patch('/debtors/123/config', json=json_request_body, headers={'X-Swpt-User-Id': 'debtors:123'}) assert r.status_code == 200 with pytest.raises(ValueError): r = client.get( '/debtors/18446744073709551615/', json=json_request_body, headers={'X-Swpt-User-Id': 'debtors:18446744073709551616'}, ) def test_redirect_to_debtor(client, debtor): r = client.get('/debtors/.debtor') assert r.status_code == 204 r = client.get('/debtors/.debtor', headers={'X-Swpt-User-Id': 'debtors:2'}) assert r.status_code == 303 assert r.headers['Location'] == 'http://example.com/debtors/2/' r = client.get('/debtors/.debtor', headers={'X-Swpt-User-Id': 'debtors:18446744073709551615'}) assert r.status_code == 303 assert r.headers['Location'] == 'http://example.com/debtors/18446744073709551615/' def test_redirect_to_latest_info(client, debtor): r = client.get('/debtors/123/public') assert r.status_code == 404 request = { 'configData': '{"info": {"iri": "https://example.com/"}}', 'latestUpdateId': 2 } r = client.patch('/debtors/123/config', json=request) assert r.status_code == 200 debtor = p.get_debtor(123) current_ts = datetime.now(tz=timezone.utc) p.process_account_update_signal( debtor_id=debtor.debtor_id, creditor_id=p.ROOT_CREDITOR_ID, creation_date=date(2020, 1, 1), last_change_ts=current_ts, last_change_seqnum=1, principal=0, interest_rate=0.0, last_config_ts=debtor.last_config_ts, last_config_seqnum=debtor.last_config_seqnum, negligible_amount=p.HUGE_NEGLIGIBLE_AMOUNT, config_data='INCORRECT CONFIG DATA', config_flags=debtor.config_flags, account_id='', transfer_note_max_bytes=0, ts=current_ts, ttl=10000000, ) r = client.get('/debtors/123/public') assert r.status_code == 404 p.process_account_update_signal( debtor_id=debtor.debtor_id, creditor_id=p.ROOT_CREDITOR_ID, creation_date=date(2020, 1, 1), last_change_ts=current_ts, last_change_seqnum=2, principal=0, interest_rate=0.0, last_config_ts=debtor.last_config_ts, last_config_seqnum=debtor.last_config_seqnum, negligible_amount=p.HUGE_NEGLIGIBLE_AMOUNT, config_data=debtor.config_data, config_flags=debtor.config_flags, account_id='', transfer_note_max_bytes=0, ts=current_ts, ttl=10000000, ) r = client.get('/debtors/123/public') assert r.status_code == 302 assert r.headers['Location'] == 'https://example.com/' assert r.headers['Cache-Control'] == 'max-age=86400' def test_save_document(client, debtor): r = client.get('/debtors/123/documents/0/public') assert r.status_code == 404 r = client.post( '/debtors/123/documents/', content_type='application/octet-stream', data=101 * b'1', ) assert r.status_code == 413 content = 100 * b'1' r = client.post( '/debtors/123/documents/', content_type='application/octet-stream', data=content, ) assert r.status_code == 201 assert r.content_type == 'application/octet-stream' assert r.get_data() == content location = r.headers['Location'] m = re.match(r'http://example.com/debtors/123/documents/(\d)+/public', location) assert m is not None document_id = int(m.group(1)) assert document_id >= 0 r = client.get(location) assert r.status_code == 200 assert r.content_type == 'application/octet-stream' assert r.get_data() == content r = client.post( '/debtors/123/documents/', content_type='application/octet-stream', data=content, ) assert r.status_code == 201 assert r.content_type == 'application/octet-stream' assert r.get_data() == content assert location != r.headers['Location'] r = client.post( '/debtors/123/documents/', content_type='application/octet-stream', data=content, ) assert r.status_code == 403 r = client.post( '/debtors/666/documents/', content_type='application/octet-stream', data=content, ) assert r.status_code == 404
python
from lxml import etree from defusedxml.lxml import fromstring import uuid from django.db import models from django.http import HttpResponse from django.urls import reverse from django.core.exceptions import ObjectDoesNotExist from acs.response import get_soap_envelope from acs.models import AcsHttpBaseModel from acs.utils import create_xml_document class AcsHttpRequest(AcsHttpBaseModel): """ Every HTTP request received on the ACS server URL is saved as an instance of this model. """ acs_session = models.ForeignKey('acs.AcsSession', related_name='acs_http_requests', on_delete=models.PROTECT) rpc_response_to = models.ForeignKey('acs.AcsHttpResponse', related_name='rpc_responses', null=True, blank=True, on_delete=models.PROTECT) # a foreignkey to the http response containing the acs rpc request which triggered the current http request (where relevant) request_headers = models.TextField(blank=True) request_xml_valid = models.BooleanField(default=False) request_soap_valid = models.BooleanField(default=False) class Meta: ordering = ['-created_date'] def __str__(self): return str(self.tag) def get_absolute_url(self): return reverse('acshttprequest_detail', kwargs={'pk': self.pk}) @property def is_request(self): return True @property def is_response(self): return False def get_response(self, empty_response=False): ''' get_response() is called when the CPE is waiting for the ACS to do something. This happens after the CPE does an empty POST, or after the CPE has responded to an RPC call initiated by the ACS. It simply pops a job from the queue and returns it in a http response. ''' job = False if not empty_response: ### get the first job from the queue (if any) #jobs = AcsQueueJob.objects.filter(acs_device=self.acs_session.acs_device, processed=False).order_by('created_date') jobs = self.acs_session.acs_device.acs_queue_jobs.filter(processed=False).order_by('created_date') self.acs_session.acs_log("Found %s unprocessed acs queue jobs for the device %s" % (jobs.count(), self.acs_session.acs_device)) if jobs: job = jobs.first() self.acs_session.acs_log("Picked job %s" % job) if not empty_response and job: ### get blank SOAP response envelope response_cwmp_id = uuid.uuid4().hex root, body = get_soap_envelope(response_cwmp_id, self.acs_session) ### add the cwmp soap object to the soap body cwmpobj = fromstring(job.cwmp_rpc_object_xml.encode('utf-8')) body.append(cwmpobj) ### get the rpc method response_cwmp_rpc_method = job.cwmp_rpc_method ### put HTTP response together output = etree.tostring(root, encoding='utf-8', xml_declaration=True) response = HttpResponse(output, content_type='text/xml; charset=utf-8') else: ### no jobs in queue for this acs device (or an empty response was requested), so return empty body to end this cwmp session response = HttpResponse(status=204) response_cwmp_rpc_method = '(empty response body)' response_cwmp_id = '' ### save the http response from acs.models import AcsHttpResponse acs_http_response = AcsHttpResponse.objects.create( http_request=self, fk_body=create_xml_document(xml=response.content), cwmp_id=response_cwmp_id, soap_element="{%s}%s" % (self.acs_session.soap_namespaces['cwmp'], response_cwmp_rpc_method), ) self.acs_session.acs_log("Created ACS HTTP response %s" % acs_http_response) if job: self.acs_session.acs_log("Saving AcsQueueJob %s" % job) ### save job job.handled_in = acs_http_response job.processed = True job.save() ### all good, return response self.acs_session.acs_log("Responding to CPE %s with %s" % (self.acs_session.acs_device, response_cwmp_rpc_method)) return response
python
from typing import List from fastapi import FastAPI from pydantic import BaseModel app = FastAPI() class Product(BaseModel): id: str class Review(BaseModel): body: str product: Product class User(BaseModel): reviews: List[Review] USER_DATA = { "1": User(reviews=[Review(body="Great!", product=Product(id="1"))]), "2": User(reviews=[Review(body="Great!", product=Product(id="2"))]), "3": User(reviews=[Review(body="Great!", product=Product(id="3"))]), } @app.get("/users/{id}", response_model=User) async def get_user_review(id: str) -> User: return USER_DATA[id]
python
import bpy from bpy.props import * PROP_TYPE_ICONS = { "String": "SORTALPHA", "Int": "CHECKBOX_DEHLT", "Float": "RADIOBUT_OFF", "Bool": "CHECKMARK", "Vec2": "ORIENTATION_VIEW", "Vec3": "ORIENTATION_GLOBAL", "Vec4": "MESH_ICOSPHERE", "Object": "OBJECT_DATA", "CameraObject": "CAMERA_DATA", "LightObject": "LIGHT_DATA", "MeshObject": "MESH_DATA", "SpeakerObject": "OUTLINER_DATA_SPEAKER" } def filter_objects(item, b_object): if item.type == "CameraObject": return b_object.type == "CAMERA" if item.type == "LightObject": return b_object.type == "LIGHT" if item.type == "MeshObject": return b_object.type == "MESH" if item.type == "SpeakerObject": return b_object.type == "SPEAKER" if item.type == "Object": return True class ArmTraitPropWarning(bpy.types.PropertyGroup): warning: StringProperty(name="Warning") class ArmTraitPropListItem(bpy.types.PropertyGroup): """Group of properties representing an item in the list.""" name: StringProperty( name="Name", description="The name of this property", default="Untitled") type: EnumProperty( items=( # (Haxe Type, Display Name, Description) ("String", "String", "String Type"), ("Int", "Integer", "Integer Type"), ("Float", "Float", "Float Type"), ("Bool", "Boolean", "Boolean Type"), ("Vec2", "Vec2", "2D Vector Type"), ("Vec3", "Vec3", "3D Vector Type"), ("Vec4", "Vec4", "4D Vector Type"), ("Object", "Object", "Object Type"), ("CameraObject", "Camera Object", "Camera Object Type"), ("LightObject", "Light Object", "Light Object Type"), ("MeshObject", "Mesh Object", "Mesh Object Type"), ("SpeakerObject", "Speaker Object", "Speaker Object Type")), name="Type", description="The type of this property", default="String") # === VALUES === value_string: StringProperty(name="Value", default="") value_int: IntProperty(name="Value", default=0) value_float: FloatProperty(name="Value", default=0.0) value_bool: BoolProperty(name="Value", default=False) value_vec2: FloatVectorProperty(name="Value", size=2) value_vec3: FloatVectorProperty(name="Value", size=3) value_vec4: FloatVectorProperty(name="Value", size=4) value_object: PointerProperty( name="Value", type=bpy.types.Object, poll=filter_objects) def set_value(self, val): # Would require way too much effort, so it's out of scope here. if self.type.endswith("Object"): return if self.type == "Int": self.value_int = int(val) elif self.type == "Float": self.value_float = float(val) elif self.type == "Bool": self.value_bool = val == "true" elif self.type in ("Vec2", "Vec3", "Vec4"): if isinstance(val, str): dimensions = int(self.type[-1]) # Parse "new VecX(...)" val = val.split("(")[1].split(")")[0].split(",") val = [value.strip() for value in val] # new VecX() without parameters if len(val) == 1 and val[0] == "": # Use default value return # new VecX() with less parameters than its dimensions while len(val) < dimensions: val.append(0.0) val = [float(value) for value in val] setattr(self, "value_" + self.type.lower(), val) else: self.value_string = str(val) def get_value(self): if self.type == "Int": return self.value_int if self.type == "Float": return self.value_float if self.type == "Bool": return self.value_bool if self.type in ("Vec2", "Vec3", "Vec4"): return list(getattr(self, "value_" + self.type.lower())) if self.type.endswith("Object"): if self.value_object is not None: return self.value_object.name return "" return self.value_string class ARM_UL_PropList(bpy.types.UIList): def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index): item_value_ref = "value_" + item.type.lower() custom_icon = PROP_TYPE_ICONS[item.type] sp = layout.split(factor=0.3) sp.label(text=item.type, icon=custom_icon) sp = sp.split(factor=0.6) sp.label(text=item.name) # Make sure your code supports all 3 layout types if self.layout_type in {'DEFAULT', 'COMPACT'}: if item.type.endswith("Object"): sp.prop_search(item, "value_object", context.scene, "objects", text="", icon=custom_icon) else: use_emboss = item.type in ("Bool", "String") sp.prop(item, item_value_ref, text="", emboss=use_emboss) elif self.layout_type in {'GRID'}: layout.alignment = 'CENTER' def register(): bpy.utils.register_class(ArmTraitPropWarning) bpy.utils.register_class(ArmTraitPropListItem) bpy.utils.register_class(ARM_UL_PropList) def unregister(): bpy.utils.unregister_class(ARM_UL_PropList) bpy.utils.unregister_class(ArmTraitPropListItem) bpy.utils.unregister_class(ArmTraitPropWarning)
python
import numpy as np import pandas as pd import sklearn from typing import Dict, Tuple from sklearn.base import BaseEstimator class RuleAugmentedEstimator(BaseEstimator): """Augments sklearn estimators with rule-based logic. This class is a wrapper class for sklearn estimators with the additional possibility of adding rule-based logic to the underlying estimator. The provided rules are hard-coded and take precedence over the underlying estimator's predictions. """ def __init__(self, base_model: BaseEstimator, rules: Dict, **base_params): """Initializes the RuleAugmentedEstimator instance. Initializes the rule-augmented estimator by supplying the underlying sklearn estimator as well as the hard-coded rules. Args: base_model: The underlying sklearn estimator. Must implement a fit and predict method. rules: The hard-coded rules in the format of a dictionary, with keys being the pandas dataframe column name, and the values being a tuple in the following form: (comparison operator, value, return value) Acceptable comparison operators are: "=", "<", ">", "<=", ">=" Example: {"House Type": [ ("=", "Penthouse", 1.0), ("=", "Shack", 0.0) ], "House Price": [ ("<", 1000.0, 0.0), (">=", 500000.0, 1.0) ]} **base_params: Optional keyword arguments which will be passed on to the ``base_model``. Examples: The below example illustrates how an instance of the RuleAugmentedEstimator class can be initialized with a trained sklearn GradientBoostingRegressor instance. >>> gbr = GradientBoostingRegressor() >>> rules = {"House Type": [ ("=", "Penthouse", 1.0), ("=", "Shack", 0.0) ], "House Price": [ ("<", 1000.0, 0.0), (">=", 500000.0, 1.0) ]} >>> ra_estimator = RuleAugmentedEstimator(gbr, rules) """ self.rules = rules self.base_model = base_model self.base_model.set_params(**base_params) def __repr__(self): return "Rule Augmented Estimator:\n\n\t Base Model: {}\n\t Rules: {}".format(self.base_model, self.rules) def __str__(self): return self.__str__ def _get_base_model_data(self, X: pd.DataFrame, y: pd.Series) -> Tuple[pd.DataFrame, pd.Series]: """Filters the trainig data for data points not affected by the rules.""" train_x = X for category, rules in self.rules.items(): if category not in train_x.columns.values: continue for rule in rules: if rule[0] == "=": train_x = train_x.loc[train_x[category] != rule[1]] elif rule[0] == "<": train_x = train_x.loc[train_x[category] >= rule[1]] elif rule[0] == ">": train_x = train_x.loc[train_x[category] <= rule[1]] elif rule[0] == "<=": train_x = train_x.loc[train_x[category] > rule[1]] elif rule[0] == ">=": train_x = train_x.loc[train_x[category] < rule[1]] else: print("Invalid rule detected: {}".format(rule)) indices = train_x.index.values train_y = y.iloc[indices] train_x = train_x.reset_index(drop=True) train_y = train_y.reset_index(drop=True) return train_x, train_y def fit(self, X: pd.DataFrame, y: pd.Series, **kwargs): """Fits the estimator to the data. Fits the estimator to the data, only training the underlying estimator on data which isn't affected by the hard-coded rules. Args: X: The training feature data. y: The training label data. **kwargs: Optional keyword arguments passed to the underlying estimator's fit function. """ train_x, train_y = self._get_base_model_data(X, y) self.base_model.fit(train_x, train_y, **kwargs) def predict(self, X: pd.DataFrame) -> np.array: """Gets predictions for the provided feature data. The predicitons are evaluated using the provided rules wherever possible otherwise the underlying estimator is used. Args: X: The feature data to evaluate predictions for. Returns: np.array: Evaluated predictions. """ p_X = X.copy() p_X['prediction'] = np.nan for category, rules in self.rules.items(): if category not in p_X.columns.values: continue for rule in rules: if rule[0] == "=": p_X.loc[p_X[category] == rule[1], 'prediction'] = rule[2] elif rule[0] == "<": p_X.loc[p_X[category] < rule[1], 'prediction'] = rule[2] elif rule[0] == ">": p_X.loc[p_X[category] > rule[1], 'prediction'] = rule[2] elif rule[0] == "<=": p_X.loc[p_X[category] <= rule[1], 'prediction'] = rule[2] elif rule[0] == ">=": p_X.loc[p_X[category] >= rule[1], 'prediction'] = rule[2] else: print("Invalid rule detected: {}".format(rule)) if len(p_X.loc[p_X['prediction'].isna()].index != 0): base_X = p_X.loc[p_X['prediction'].isna()].copy() base_X.drop('prediction', axis=1, inplace=True) p_X.loc[p_X['prediction'].isna(), 'prediction'] = self.base_model.predict(base_X) return p_X['prediction'].values def get_params(self, deep: bool = True) -> Dict: """Return the model's and base model's parameters. Args: deep: Whether to recursively return the base model's parameters. Returns Dict: The model's parameters. """ params = {'base_model': self.base_model, 'outcome_range': self.outcome_range, 'rules': self.rules } params.update(self.base_model.get_params(deep=deep)) return params def set_params(self, **params): """Sets parameters for the model and base model. Args: **params: Optional keyword arguments. """ parameters = params param_keys = parameters.keys() if 'base_model' in param_keys: value = parameters.pop('base_model') self.base_model = value if 'rules' in param_keys: value = parameters.pop('rules') self.rules = value self.base_model.set_params(**parameters)
python
# jay mahakal import Resources.Work_By_Raj.Google_Calender_api.Resources.Setup import Resources.Work_By_Raj.Google_Calender_api.Resources.Return_events_info # below function [Setup.setup_calendar_credentials_return_service()] should run only once # service = Setup.setup_calendar_credentials_return_service() # print(Return_events_info.return_events_info("Give details about calendar events for today", service=service))
python
print "Ejercicio de ciclos -Granizada-" def par(n): n=n/2 def impar(n): n=n*3+1 n=int(raw_input("digite numero ")) while n>=1: if n%2==0: par(n) print n else: impar(n) print n
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri May 1 15:39:06 2020 @author: jireh.park """ import pandas as pd import os from tqdm import tqdm from google.cloud import storage os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))+ '/key/level-district.json' def list_blob(bucket_name): global credentials """Uploads a file to the bucket.""" # bucket_name = "your-bucket-name" # source_file_name = "local/path/to/file" # destination_blob_name = "storage-object-name" storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) blobs = list(bucket.list_blobs()) #blob = bucket.blob(destination_blob_name) #blob.upload_from_filename("tmp", content_type='text/csv') # blob.upload_from_filename(source_file_name) print(blobs) ) bucket_name = 'j-first-bucket' save_path = 'route/' list_blob('bucket_name') os.chdir("/Users/jireh.park/jireh_module/svc_data/route") # 데이터 불러오기 df = pd.DataFrame() for fl in os.listdir(): if 'txt' in fl: data = pd.read_csv(fl, engine = 'python', encoding = 'cp949', sep = '|', dtype = str) df = df.append(data) df = df.reset_index(drop = True) # start, destination 뒤집어서 저 size = len(df) col = ['time', 'num_station', 'transfer'] for ii in tqdm(df.index): aa = df.loc[ii, 'route'][2:-2].split("', '") aa.reverse() df.loc[size + ii, 'start'] = df.loc[ii, 'destination'] df.loc[size + ii, 'destination'] = df.loc[ii, 'start'] for cl in col: df.loc[size + ii, cl] = df.loc[ii, cl] df.loc[size + ii, 'route'] = aa df = df.reset_index(drop = True) df.to_csv("route.csv", encoding = 'cp949', index = False) #df.to_json("route.json")
python
#!/usr/bin/env python2.7 import os import codecs import json import random with codecs.open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "apps.txt"), encoding="utf-8") as f: apps = f.read().splitlines() with codecs.open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "networks.txt"), encoding="utf-8") as f: networks = f.read().splitlines() dim_groups = [ ["install_date", "install_country", "ad_network", "campaign"], ["install_date", "ad_network", "campaign"], ["install_date", "install_country", "ad_network"], ["install_date", "ad_network", "campaign", "event_name"], ["install_date", "install_country", "ad_network", "campaign", "event_name"] ] metric_groups = [ ["installs_count", "clicks_count", "launches_count"], ["installs_count", "install_cost", "install_cost_alt", "revenue", "revenue_alt"], ["revenue", "revenue_alt", "inapps_count"], ["clicks_count", "impressions_count", "installs_count", "launches_count"], ["clicks_count", "impressions_count", "installs_count", "uninstalls_count"] ] date_ranges = [ ["2015-01-01", "2015-01-14"], ["2015-01-01", "2015-01-30"], ["2013-05-01", "2013-05-14"], ["2014-01-01", "2015-03-01"], ["2014-12-01", "2015-01-01"], ["2015-02-01", "2015-02-08"], ["2013-01-01", "2013-03-01"] ] country_groups = [ ["US", "IR", "UK", "MX"], ["US", "IL", "KZ"], ["RU", "BE"], ["TG", "TH", "TJ", "TL", "TM", "TN", "TO", "TR", "TT", "TV", "TZ", "UA", "UG", "US", "UY", "UZ", "VA", "VC", "VE", "VN", "VU", "WS", "YE", "ZA", "ZM", "ZW"] ] for i in range(1000): dates = random.choice(date_ranges) query = { "type": "aggregate", "table": "activity", "select": [], "filter": { "op": "and", "filters": [ {"op": "eq", "column": "app_id", "value": random.choice(apps)}, {"op": "ge", "column": "install_date", "value": dates[0]}, {"op": "lt", "column": "install_date", "value": dates[1]} ] } } query["select"] = [{"column": c} for c in random.choice(dim_groups) + random.choice(metric_groups)] if random.random() < 0.2: query["filter"]["filters"].append({"op": "in", "column": "install_country", "values": random.choice(country_groups)}) if random.random() < 0.5: query["filter"]["filters"].append({"op": "eq", "column": "ad_network", "value": random.choice(networks)}) print "http://localhost:5000/query POST %s" % json.dumps(query)
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """user表测试""" from executor.database.models.user import Users from executor.tests.database.base import DatabaseTestCase from executor.exceptions import UserAlreadyExistException, \ IncorrectPasswordException class TestOperatorUser(DatabaseTestCase): data_file_path = "database_user_data.yaml" def test_create_user(self): user = Users.from_json(self.get_test_date("test_create_user")) new_user = self.db.create_user(self.context, user) self.assertIsInstance(new_user, Users) self.db.delete_user(self.context, new_user.id, new_user.password) def test_create_same_name_user(self): user1 = Users.from_json( self.get_test_date( "test_create_same_name_user", "test_create_same_name_user1")) user2 = Users.from_json( self.get_test_date( "test_create_same_name_user", "test_create_same_name_user2")) self.db.create_user(self.context, user1) self.assertRaises(UserAlreadyExistException, self.db.create_user, self.context, user2) self.db.delete_user(self.context, user1.phone, user1.password) def test_create_same_phone_user(self): user1 = Users.from_json( self.get_test_date( "test_create_same_phone_user", "test_create_same_phone_user1" )) user2 = Users.from_json( self.get_test_date( "test_create_same_phone_user", "test_create_same_phone_user2" )) self.db.create_user(self.context, user1) self.assertRaises(UserAlreadyExistException, self.db.create_user, self.context, user2) self.db.delete_user(self.context, user1.phone, user1.password) def test_get_user_by_id(self): user = Users.from_json( self.get_test_date("test_get_user_by_id") ) n_user = self.db.create_user(self.context, user) self.assertEqual( n_user, self.db.get_user(self.context, user.id, user.password)) self.db.delete_user(self.context, n_user.id, n_user.password) def test_get_user_by_user_id(self): user = Users.from_json( self.get_test_date("test_get_user_by_user_id") ) n_user = self.db.create_user(self.context, user) self.assertEqual( n_user, self.db.get_user(self.context, user.user_id, user.password)) self.db.delete_user(self.context, n_user.user_id, n_user.password) def test_get_user_by_name(self): user = Users.from_json( self.get_test_date("test_get_user_by_name") ) n_user = self.db.create_user(self.context, user) self.assertEqual( n_user, self.db.get_user(self.context, user.username, user.password)) self.db.delete_user(self.context, n_user.username, n_user.password) def test_get_user_by_phone(self): user = Users.from_json( self.get_test_date("test_get_user_by_phone") ) n_user = self.db.create_user(self.context, user) self.assertEqual( n_user, self.db.get_user(self.context, user.phone, user.password)) self.db.delete_user(self.context, n_user.phone, n_user.password) def test_get_user_with_incorrect_password(self): user = Users.from_json( self.get_test_date("test_get_user_with_incorrect_password") ) n_user = self.db.create_user(self.context, user) self.assertRaises( IncorrectPasswordException, self.db.get_user, self.context, n_user.phone, n_user.password + "_" ) self.db.delete_user(self.context, n_user.id, n_user.password)
python
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines endpoints for the landing page. TODO(shifucun): once this is well tested, can deprecate corresponding code in chart.py and place.py """ import collections import copy import json import logging import urllib from flask import Blueprint, current_app, Response, url_for, g from flask_babel import gettext from collections import defaultdict from cache import cache import services.datacommons as dc_service import routes.api.place as place_api import lib.range as lib_range # Define blueprint bp = Blueprint("api.landing_page", __name__, url_prefix='/api/landingpage') BAR_CHART_TYPES = ['parent', 'similar', 'nearby', 'child'] MAX_DENOMINATOR_BACK_YEAR = 3 MIN_CHART_TO_KEEP_TOPICS = 30 OVERVIEW = 'Overview' def get_landing_page_data(dcid, new_stat_vars): response = dc_service.fetch_data('/landing-page', { 'place': dcid, 'newStatVars': new_stat_vars, }, compress=False, post=True, has_payload=False) return response def build_url(dcids, statvar_to_denom, is_scaled=False): anchor = '&place=' + ','.join(dcids) parts = [] for statvar, denom in statvar_to_denom.items(): part = statvar if denom: part += '|' + denom parts.append(part) anchor += ('&statsVar=' + '__'.join(parts)) if is_scaled: anchor = anchor + '&pc' return urllib.parse.unquote(url_for('tools.timeline', _anchor=anchor)) def fill_translation(chart): chart['title'] = gettext(chart['titleId']) del chart['titleId'] if 'description' in chart: del chart['description'] return chart # TODO: add test for chart_config for assumption that each combination of stat vars will only have one config in chart_config. def build_spec(chart_config): """Builds hierachical spec based on chart config.""" spec = defaultdict(lambda: defaultdict(list)) # Map: category -> topic -> [config] for conf in chart_config: config = copy.deepcopy(conf) config = fill_translation(config) if 'relatedChart' in config and config['relatedChart']['scale']: config['relatedChart'] = fill_translation(config['relatedChart']) is_overview = ('isOverview' in config and config['isOverview']) category = config['category'] if 'isOverview' in config: del config['isOverview'] del config['category'] if is_overview: spec[OVERVIEW][category].append(copy.deepcopy(config)) spec[category][config['title']].append(config) return spec def get_denom(cc, related_chart=False): """Get the numerator and denominator map.""" # If chart requires denominator, use it for both primary and related charts. if 'denominator' in cc: result = {} if len(cc['denominator']) != len(cc['statsVars']): raise ValueError('Denominator number not matching: %s', cc) for num, denom in zip(cc['statsVars'], cc['denominator']): result[num] = denom return result # For related chart, use the denominator that is specified in the # 'relatedChart' field if present. if related_chart and cc.get('relatedChart', {}).get('scale', False): return cc['relatedChart'].get('denominator', 'Count_Person') return None def get_series(data, place, stat_vars): """Get time series from the landing page data. Aggregate for all the stat vars and return empty series if any stat var data is missing Returns: series and sources. """ all_series = [] sources = set() num_sv = len(stat_vars) for sv in stat_vars: if 'data' not in data[place] or sv not in data[place]['data']: return {}, [] series = data[place]['data'][sv] all_series.append(series['val']) sources.add(series['metadata']['provenanceUrl']) # One series, no need to aggregate if num_sv == 1: return all_series[0], sources merged_series = defaultdict(list) for series in all_series: for date, value in series.items(): merged_series[date].append(value) # Aggregate agg_series = {} for date, values in merged_series.items(): if len(values) == num_sv: agg_series[date] = sum(values) return agg_series, sources def get_stat_var_group(cc, data, places): """Get the stat var grouping for aggregation.""" if 'aggregate' in cc: agg_type = lib_range.get_aggregate_config(cc['aggregate']) place_stat_vars = defaultdict(list) for place in places: if place not in data or 'data' not in data[place]: continue for sv in cc['statsVars']: if sv in data[place]['data']: place_stat_vars[place].append(sv) result = lib_range.aggregate_stat_var(place_stat_vars, agg_type) for place in places: if place not in result: result[place] = {} else: result = {} for place in places: result[place] = {sv: [sv] for sv in cc['statsVars']} return result def get_snapshot_across_places(cc, data, places): """Get the snapshot used for bar data across a few places. This will scale the value if required and pick the latest date that has the most <place, stat_var> entries. """ if not places: return {}, {} # date_to_data is a dictionary from date to place and a tuple of # (stat_var, value) pair. # Example: # { # "2018": { # "geoId/06":[("Count_Person", 200), ("Count_Person_Female", 100)], # "geoId/08":[("Count_Person", 300), ("Count_Person_Female", 150)], # }, # "2017": { # "geoId/06":[("Count_Person", 300), ("Count_Person_Female", 150)], # "geoId/08":[("Count_Person", 400), ("Count_Person_Female", 200)], # }, # } date_to_data = collections.defaultdict( lambda: collections.defaultdict(list)) # TODO(shifucun/beets): add a unittest to ensure denominator is set # explicitly when scale==True num_denom = get_denom(cc, related_chart=True) sources = set() place_stat_var_group = get_stat_var_group(cc, data, places) statvar_to_denom = {} for place in places: if place not in data: continue stat_var_group = place_stat_var_group[place] for num_sv, sv_list in stat_var_group.items(): num_series, num_sources = get_series(data, place, sv_list) if not num_series: continue sources.update(num_sources) if num_denom: if isinstance(num_denom, dict): denom_sv = num_denom[num_sv] else: denom_sv = num_denom statvar_to_denom[num_sv] = denom_sv denom_series, denom_sources = get_series( data, place, [denom_sv]) if not denom_series: continue sources.update(denom_sources) result_series = scale_series(num_series, denom_series) else: result_series = num_series statvar_to_denom[num_sv] = None # Turn the value to be keyed by date. for date, value in result_series.items(): date_to_data[date][place].append((num_sv, value)) # Pick a date that has the most series across places. dates = sorted(date_to_data.keys(), reverse=True) if not dates: return {}, {} count = 0 chosen_date = None for date in dates: if len(date_to_data[date]) > count: count = len(date_to_data[date]) chosen_date = date result = {'date': chosen_date, 'data': [], 'sources': list(sources)} for place in places: points = {} for stat_var, value in date_to_data[chosen_date][place]: points[stat_var] = value if points: result['data'].append({'dcid': place, 'data': points}) return result, statvar_to_denom # TODO(shifucun): Add unittest for these helper functions def get_bar(cc, data, places): """Get the bar data across a few places. This will scale the value if required and pick the latest date that has the most <place, stat_var> entries. """ result, statvar_denom = get_snapshot_across_places(cc, data, places) if not result: return {} # Should have data other than the primary place. Return empty struct to # so client won't draw chart. if len(result['data']) <= 1: return {} is_scaled = (('relatedChart' in cc and cc['relatedChart'].get('scale', False)) or ('denominator' in cc)) result['exploreUrl'] = build_url(places, statvar_denom, is_scaled) return result def get_trend(cc, data, place): """Get the time series data for a place.""" if place not in data: return {} result_series = {} sources = set() num_denom = get_denom(cc) stat_var_group = get_stat_var_group(cc, data, [place])[place] statvar_denom = {} for num_sv, sv_list in stat_var_group.items(): num_series, num_sources = get_series(data, place, sv_list) if not num_series: continue sources.update(num_sources) if num_denom: if isinstance(num_denom, dict): denom_sv = num_denom[num_sv] else: denom_sv = num_denom denom_sv = num_denom[num_sv] statvar_denom[num_sv] = denom_sv denom_series, denom_sources = get_series(data, place, [denom_sv]) if not denom_series: continue sources.update(denom_sources) result_series[num_sv] = scale_series(num_series, denom_series) else: result_series[num_sv] = num_series statvar_denom[num_sv] = None # filter out time series with single data point. for sv in list(result_series.keys()): if len(result_series[sv]) <= 1: del result_series[sv] if not result_series: return {} is_scaled = ('denominator' in cc) return { 'series': result_series, 'sources': list(sources), 'exploreUrl': build_url([place], statvar_denom, is_scaled) } def get_year(date): try: return int(date.split('-')[0]) except IndexError: raise ValueError('no valid date format found %s', date) # TODO(shifucun): Add unittest. def scale_series(numerator, denominator): """Scale two time series. The date of the two time series may not be exactly aligned. Here we use year alignment to match two date. If no denominator is found for a numerator, then the data is removed. """ data = {} for date, value in numerator.items(): if date in denominator: if denominator[date] > 0: data[date] = value / denominator[date] else: data[date] = 0 else: try: numerator_year = get_year(date) for i in range(0, MAX_DENOMINATOR_BACK_YEAR + 1): year = str(numerator_year - i) if year in denominator: if denominator[year] > 0: data[date] = value / denominator[year] else: data[date] = 0 break except ValueError: return {} return data def get_i18n_all_child_places(raw_page_data): all_child_places = raw_page_data.get('allChildPlaces', {}) all_dcids = [] for place_type in list(all_child_places.keys()): for place in all_child_places[place_type]['places']: all_dcids.append(place.get('dcid', '')) i18n_names = place_api.get_i18n_name(all_dcids, False) # Don't resolve en-only names for place_type in list(all_child_places.keys()): for place in all_child_places[place_type]['places']: dcid = place.get('dcid') i18n_name = i18n_names.get(dcid, '') if i18n_name: place['name'] = i18n_name for place_type in list(all_child_places.keys()): all_child_places[place_type] = all_child_places[place_type]['places'] return all_child_places @bp.route('/data/<path:dcid>') @cache.cached(timeout=3600 * 24, query_string=True) # Cache for one day. def data(dcid): """ Get chart spec and stats data of the landing page for a given place. """ logging.info("Landing Page: cache miss for %s, fetch and process data ...", dcid) spec_and_stat = build_spec(current_app.config['CHART_CONFIG']) new_stat_vars = current_app.config['NEW_STAT_VARS'] raw_page_data = get_landing_page_data(dcid, new_stat_vars) if not 'statVarSeries' in raw_page_data: logging.info("Landing Page: No data for %s", dcid) return Response(json.dumps({}), 200, mimetype='application/json') # Filter out Metropolitan France parent place. parent_places = [ el for el in raw_page_data.get('parentPlaces', []) if el != 'country/FXX' ] raw_page_data['parentPlaces'] = parent_places # Only US places have comparison charts. is_usa_place = False for place in [dcid] + raw_page_data.get('parentPlaces', []): if place == 'country/USA': is_usa_place = True break # Populate the data for each chart all_stat = raw_page_data['statVarSeries'] for category in spec_and_stat: if category == OVERVIEW: if is_usa_place: chart_types = ['nearby', 'child'] else: chart_types = ['similar'] else: chart_types = BAR_CHART_TYPES for topic in spec_and_stat[category]: for chart in spec_and_stat[category][topic]: # Trend data chart['trend'] = get_trend(chart, all_stat, dcid) if 'aggregate' in chart: aggregated_stat_vars = list(chart['trend'].get( 'series', {}).keys()) if aggregated_stat_vars: chart['trend']['statsVars'] = aggregated_stat_vars else: chart['trend'] = {} # Bar data for t in chart_types: chart[t] = get_bar(chart, all_stat, [dcid] + raw_page_data.get(t + 'Places', [])) if t == 'similar' and 'data' in chart[t]: # If no data for current place, do not serve similar # place data. keep_chart = False for d in chart[t]['data']: if d['dcid'] == dcid: keep_chart = True break if not keep_chart: chart[t] = {} # Update stat vars for aggregated stats if 'aggregate' in chart and chart[t]: chart[t]['statsVars'] = [] for place_data in chart[t].get('data', []): stat_vars = list(place_data['data'].keys()) if len(stat_vars) > len(chart[t]['statsVars']): chart[t]['statsVars'] = stat_vars elif len(stat_vars) == 0: chart[t] = {} if 'aggregate' in chart: chart['statsVars'] = [] # Remove empty category and topics for category in list(spec_and_stat.keys()): for topic in list(spec_and_stat[category].keys()): filtered_charts = [] for chart in spec_and_stat[category][topic]: keep_chart = False for t in ['trend'] + BAR_CHART_TYPES: if chart.get(t, None): keep_chart = True break if keep_chart: filtered_charts.append(chart) if not filtered_charts: del spec_and_stat[category][topic] else: spec_and_stat[category][topic] = filtered_charts if not spec_and_stat[category]: del spec_and_stat[category] # Only keep the "Overview" category if the number of total chart is less # than certain threshold. overview_set = set() non_overview_set = set() chart_count = 0 # Get the overview charts for topic, charts in spec_and_stat[OVERVIEW].items(): for chart in charts: overview_set.add((topic, chart['title'])) chart_count += 1 # Get the non overview charts for category, topic_data in spec_and_stat.items(): if category == OVERVIEW: continue for topic in topic_data: if (category, topic) not in overview_set: non_overview_set.add((category, topic)) chart_count += 1 # If the total number of chart is too small, then merge all charts to # the overview category and remove other categories if chart_count < MIN_CHART_TO_KEEP_TOPICS: for category, topic in non_overview_set: spec_and_stat[OVERVIEW][category].extend( spec_and_stat[category][topic]) for category in list(spec_and_stat.keys()): if category != OVERVIEW: del spec_and_stat[category] # Get chart category name translations categories = {} for category in list(spec_and_stat.keys()) + list(spec_and_stat[OVERVIEW]): categories[category] = gettext(f'CHART_TITLE-CHART_CATEGORY-{category}') # Get display name for all places all_places = [dcid] for t in BAR_CHART_TYPES: all_places.extend(raw_page_data.get(t + 'Places', [])) names = place_api.get_display_name('^'.join(sorted(all_places)), g.locale) # Pick data to highlight - only population for now population, statvar_denom = get_snapshot_across_places( {'statsVars': ['Count_Person']}, all_stat, [dcid]) highlight = {gettext('CHART_TITLE-Population'): population} response = { 'pageChart': spec_and_stat, 'allChildPlaces': get_i18n_all_child_places(raw_page_data), 'childPlacesType': raw_page_data.get('childPlacesType', ""), 'childPlaces': raw_page_data.get('childPlaces', []), 'parentPlaces': raw_page_data.get('parentPlaces', []), 'similarPlaces': raw_page_data.get('similarPlaces', []), 'nearbyPlaces': raw_page_data.get('nearbyPlaces', []), 'categories': categories, 'names': names, 'highlight': highlight, } return Response(json.dumps(response), 200, mimetype='application/json')
python
# OBS # Imagem celular original do vírus recentemente descoberto SARS-CoV-2, # popularmente chamado de COVID-19 ou Coronavirus. import cv2 as cv import numpy as np import matplotlib.pyplot as plt !wget "https://raw.githubusercontent.com/PedroHaupenthal/Image-Processing/master/watershed/covid_19.jpg" -O "covid_19.jpg" img1 = cv.imread("covid_19.jpg") img1 = cv.cvtColor(img1, cv.COLOR_BGR2RGB) img2 = cv.cvtColor(img1, cv.COLOR_RGB2GRAY) img2 = cv.bitwise_not(img2) ret, img2 = cv.threshold(img2, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU) kernel = np.ones((3,3), np.uint8) img3 = cv.morphologyEx(img2, cv.MORPH_CLOSE, kernel, iterations = 2) img4 = cv.dilate(img3, kernel, iterations = 5) img5 = cv.distanceTransform(img3, cv.DIST_L2, 5) ret,img6 = cv.threshold(img5, 0.65 * img5.max(), 255, 0) img6 = np.uint8(img6) img7 = cv.subtract(img4, img6) ret, count = cv.connectedComponents(img6) count = count + 1 count[img7 == 255] = 0 img8 = cv.watershed(img1, count) img1[count == -1] = [255, 0, 0] plt.figure(figsize=(30,30)) plt.subplot(121), plt.imshow(img1), plt.title("ORIGINAL"), plt.axis("off") plt.subplot(122), plt.imshow(img8, cmap='jet'), plt.title("RESULTADO"), plt.axis("off") plt.show()
python
import math import threading from django.core.cache import caches from .settings import CACHE_HELPERS_ALIAS CACHE_HELPERS_KEY = 'cache_helpers_key' def set_cache_bust_status(bust_key=None): cache = caches[CACHE_HELPERS_ALIAS] cache.set(CACHE_HELPERS_KEY, bust_key) def get_bust_key(): cache = caches[CACHE_HELPERS_ALIAS] return cache.get(CACHE_HELPERS_KEY, None) def mark_response_as_processed(response): setattr(response, '_already_cahed', True) def check_response_has_been_processed(response): return getattr(response, '_already_cahed', False) def check_bust_header(request): bust_key = request.META.get('HTTP_BUST', '') return False if (not bust_key or bust_key != get_bust_key()) else True # TODO avoid list user generator def threaded_cue(cue, callback, threads): def process_chunk(begining, end, worker_num): for index, item in enumerate(cue[begining:end]): real_index = (begining + index) if begining > 0 else index result = callback(item) if result: cue[real_index] = result CHUNK_SIZE = math.ceil(len(cue) / threads) end = 0 threads_refs = [] for i in range(threads): begining = end end = begining + CHUNK_SIZE t = threading.Thread(target=process_chunk, args=(begining, end if end < len(cue) else len(cue), i)) t.start() threads_refs.append(t) for i in threads_refs: t.join() return cue def get_ref_from_func(func): if hasattr(func, '__self__'): return func.__self__.__class__ return func def get_func_from_func(func): if hasattr(func, '__wrapped__'): return func.__wrapped__ return func def func_to_string(func): func = func.func if hasattr(func, 'func') else func ref = get_ref_from_func(func) chunks = [ ref.__module__, ref.__name__, ] func = get_func_from_func(func) if func.__name__ != chunks[-1]: chunks.append(func.__name__) return '.'.join(chunks) def invalidate_cache(cache_key, cache=None): cache = caches[cache if cache is not None else CACHE_HELPERS_ALIAS] cache.delete(cache_key)
python
""" Provides the functionality to feed TF templates with Jerakia lookups """ import sys import os from jerakia import Jerakia from terraform_external_data import terraform_external_data def retrieveLookupInfo(query,item): lookitem = query[item] lookuppath =lookitem.split('/') key = lookuppath.pop() namespace = lookuppath if not namespace: raise Exception("No namespace given %s" % item ) return namespace,key @terraform_external_data def lookupJerakia(query,variables=None): jerakia = Jerakia(configfile=os.path.abspath('utils/jerakia.yaml')) resdict = {} for item in query: namespace,key = retrieveLookupInfo(query,item) ret = [] response = jerakia.lookup(key=key, namespace=namespace, variables=variables) ret.append(response['payload']) resdict.update({item: str(ret)}) return resdict if __name__ == '__main__': lookupJerakia()
python
"""Internal helpers for dataset validation.""" from pathlib import Path from typing import Any, Iterable, List, Optional, Sequence, Tuple, Union import numpy as np import pandas as pd from biopsykit.utils._types import _Hashable, path_t from biopsykit.utils.exceptions import FileExtensionError, ValidationError, ValueRangeError def _assert_is_dir(path: path_t, raise_exception: Optional[bool] = True) -> Optional[bool]: """Check if a path is a directory. Parameters ---------- path : path or str path to check if it's a directory raise_exception : bool, optional whether to raise an exception or return a bool value Returns ------- ``True`` if ``path`` is a directory, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ ValueError if ``raise_exception`` is ``True`` and ``path`` is not a directory """ # ensure pathlib file_name = Path(path) if not file_name.is_dir(): if raise_exception: raise ValueError("The path '{}' is expected to be a directory, but it's not!".format(path)) return False return True def _assert_file_extension( file_name: path_t, expected_extension: Union[str, Sequence[str]], raise_exception: Optional[bool] = True ) -> Optional[bool]: """Check if a file has the correct file extension. Parameters ---------- file_name : path or str file name to check for correct extension expected_extension : str or list of str file extension (or a list of file extensions) to check for raise_exception : bool, optional whether to raise an exception or return a bool value Returns ------- ``True`` if ``file_name`` ends with one of the specified file extensions, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.FileExtensionError` if ``raise_exception`` is ``True`` and ``file_name`` does not end with any of the specified ``expected_extension`` """ # ensure pathlib file_name = Path(file_name) if isinstance(expected_extension, str): expected_extension = [expected_extension] if file_name.suffix not in expected_extension: if raise_exception: raise FileExtensionError( "The file name extension is expected to be one of {}. " "Instead it has the following extension: {}".format(expected_extension, file_name.suffix) ) return False return True def _assert_is_dtype( obj, dtype: Union[type, Tuple[type, ...]], raise_exception: Optional[bool] = True ) -> Optional[bool]: """Check if an object has a specific data type. Parameters ---------- obj : any object object to check dtype : type or list of type data type of tuple of data types to check raise_exception : bool, optional whether to raise an exception or return a bool value Returns ------- ``True`` if ``obj`` is one of the expected data types, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.ValidationError` if ``raise_exception`` is ``True`` and ``obj`` is none of the expected data types """ if not isinstance(obj, dtype): if raise_exception: raise ValidationError( "The data object is expected to be one of ({},). But it is a {}".format(dtype, type(obj)) ) return False return True def _assert_has_multiindex( df: pd.DataFrame, expected: Optional[bool] = True, nlevels: Optional[int] = 2, nlevels_atleast: Optional[int] = False, raise_exception: Optional[bool] = True, ) -> Optional[bool]: """Check if a :any:`pandas.DataFrame` has a :any:`pandas.MultiIndex` as index. Parameters ---------- df : :class:`~pandas.DataFrame` The dataframe to check expected : bool, optional Whether the df is expected to have a MultiIndex index or not nlevels : int, optional If MultiIndex is expected, how many levels the MultiIndex index should have nlevels_atleast : bool, optional Whether the MultiIndex has to have at least ``nlevels`` (``True``) or exactly match the number of levels (``False``) raise_exception : bool, optional whether to raise an exception or return a bool value Returns ------- ``True`` if ``df`` meets the expected index format, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.ValidationError` if ``raise_exception`` is ``True`` and ``df`` does not meet the expected index format """ return _multiindex_check_helper( df=df, idx_or_col="index", expected=expected, nlevels=nlevels, nlevels_atleast=nlevels_atleast, raise_exception=raise_exception, ) def _assert_has_index_levels( df: pd.DataFrame, index_levels: Iterable[_Hashable], match_atleast: Optional[bool] = False, match_order: Optional[bool] = False, raise_exception: Optional[bool] = True, ) -> Optional[bool]: """Check if the dataframe has all index level names. Parameters ---------- df : :class:`~pandas.DataFrame` The dataframe to check index_levels : list Set of index level names to check match_atleast : bool, optional Whether the MultiIndex columns have to have at least the specified column levels (``True``) or exactly match the column levels (``False``) match_order : bool, optional Whether to also match the level order raise_exception : bool, optional whether to raise an exception or return a bool value Returns ------- ``True`` if ``df`` has the expected index level names, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.ValidationError` if ``raise_exception`` is ``True`` and ``df`` does not have the expected index level names """ return _multiindex_level_names_helper( df, level_names=index_levels, idx_or_col="index", match_atleast=match_atleast, match_order=match_order, raise_exception=raise_exception, ) def _assert_has_columns( df: pd.DataFrame, columns_sets: Sequence[Union[List[_Hashable], List[str], pd.Index]], raise_exception: Optional[bool] = True, ) -> Optional[bool]: """Check if the dataframe has at least all columns sets. Parameters ---------- df : :class:`~pandas.DataFrame` The dataframe to check columns_sets : list Column set or list of column sets to check raise_exception : bool, optional whether to raise an exception or return a bool value Returns ------- ``True`` if ``df`` has the expected column names, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.ValidationError` if ``raise_exception`` is ``True`` and ``df`` does not have the expected index level names Examples -------- >>> df = pd.DataFrame() >>> df.columns = ["col1", "col2"] >>> _assert_has_columns(df, [["other_col1", "other_col2"], ["col1", "col2"]]) >>> # This raises no error, as df contains all columns of the second set """ columns = df.columns result = False for col_set in columns_sets: result = result or all(v in columns for v in col_set) if result is False: if len(columns_sets) == 1: helper_str = "the following columns: {}".format(columns_sets[0]) else: helper_str = "one of the following sets of columns: {}".format(columns_sets) if raise_exception: raise ValidationError( "The dataframe is expected to have {}. Instead it has the following columns: {}".format( helper_str, list(df.columns) ) ) return result def _assert_has_column_multiindex( df: pd.DataFrame, expected: Optional[bool] = True, nlevels: Optional[int] = 2, nlevels_atleast: Optional[int] = False, raise_exception: Optional[bool] = True, ) -> Optional[bool]: """Check if a :any:`pandas.DataFrame` has a :any:`pandas.MultiIndex` as columns. Parameters ---------- df : :class:`~pandas.DataFrame` The dataframe to check expected : bool, optional Whether the df is expected to have MultiIndex column or not nlevels : int, optional If MultiIndex is expected, how many levels the MultiIndex columns should have nlevels_atleast : bool, optional Whether the MultiIndex has to have at least ``nlevels`` (``True``) or exactly match the number of levels (``False``) raise_exception : bool, optional Whether to raise an exception or return a bool value Returns ------- ``True`` if ``df`` meets the expected column index format, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.ValidationError` if ``raise_exception` is ``True`` and ``df`` does not meet the expected column index format """ return _multiindex_check_helper( df=df, idx_or_col="column", expected=expected, nlevels=nlevels, nlevels_atleast=nlevels_atleast, raise_exception=raise_exception, ) def _assert_has_columns_any_level( df: pd.DataFrame, columns_sets: Sequence[Union[List[_Hashable], List[str], pd.Index]], raise_exception: Optional[bool] = True, ) -> Optional[bool]: """Check if the dataframe has the expected set of column names at any level of a :any:`pandas.MultiIndex`. Parameters ---------- df : :class:`~pandas.DataFrame` The dataframe to check columns_sets : list Column set of list of column sets to check raise_exception : bool, optional whether to raise an exception or return a bool value Returns ------- ``True`` if ``df`` has the expected column names at any :any:`pandas.MultiIndex` level, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.ValidationError` if ``raise_exception`` is ``True`` and ``df`` does not have the expected column names Examples -------- >>> df = pd.DataFrame() >>> df.columns = pd.MultiIndex.from_tuples([("Name", "col1"), ("Name", "col2")]) >>> _assert_has_columns_any_level(df, [["col1", "col2"]]) >>> # This raises no error, as df contains all columns in the seconds level """ _assert_has_column_multiindex(df, expected=True, nlevels_atleast=True) column_levels = [np.array(df.columns.get_level_values(i)) for i in range(df.columns.nlevels)] result = False for columns in column_levels: for col_set in columns_sets: result = result or all(v in columns for v in col_set) if result is False: if len(columns_sets) == 1: helper_str = "the following columns: {}".format(columns_sets[0]) else: helper_str = "one of the following sets of columns: {}".format(columns_sets) if raise_exception: raise ValidationError( "The dataframe is expected to have {} at any level of the MultiIndex. Instead it has the " "following MultiIndex columns: {}".format(helper_str, column_levels) ) return result def _assert_has_column_levels( df: pd.DataFrame, column_levels: Iterable[_Hashable], match_atleast: Optional[bool] = False, match_order: Optional[bool] = False, raise_exception: Optional[bool] = True, ) -> Optional[bool]: """Check if the dataframe has all column level names of a MultiIndex column. Parameters ---------- df : :class:`~pandas.DataFrame` The dataframe to check column_levels : list Set of column level names to check match_atleast : bool, optional Whether the MultiIndex columns have to have at least the specified column levels (``True``) or exactly match the column levels (``False``) match_order : bool, optional Whether to also match the level order raise_exception : bool, optional Whether to raise an exception or return a bool value Returns ------- ``True`` if ``df`` has the expected column level names, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.ValidationError` if ``raise_exception`` is ``True`` and ``df`` does not have the expected index level names """ return _multiindex_level_names_helper( df, level_names=column_levels, idx_or_col="column", match_atleast=match_atleast, match_order=match_order, raise_exception=raise_exception, ) def _assert_value_range( data: Union[pd.DataFrame, pd.Series], value_range: Sequence[Union[int, float]], raise_exception: Optional[bool] = True, ) -> Optional[bool]: """Check if all values are within the specified range. Parameters ---------- data : :class:`~pandas.DataFrame` data to check values value_range : tuple of numbers value range in the format [min_val, max_val] raise_exception : bool, optional Whether to raise an exception or return a bool value Returns ------- ``True`` if all values in ``data`` are within ``value_range``, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.ValueRangeError` if ``raise_exception`` is ``True`` and any value of ``data`` is not within ``value_range`` """ max_val = np.nanmax(data) min_val = np.nanmin(data) if not (min_val >= value_range[0] and max_val <= value_range[1]): if raise_exception: raise ValueRangeError( "Some of the values are out of the expected range. " "Expected were values in the range {}, got values in the range {}. " "If values are part of questionnaire scores, " "you can convert questionnaire items into the correct range by calling " "`biopsykit.questionnaire.utils.convert_scale()`.".format(value_range, [min_val, max_val]) ) return False return True def _assert_num_columns( data: pd.DataFrame, num_cols: Union[int, Sequence[int]], raise_exception: Optional[bool] = True ) -> Optional[bool]: """Check if dataframe has (any of) the required number of columns. Parameters ---------- data : :class:`~pandas.DataFrame` data to check num_cols : int or list of int the required number of columns (or any of the required number of columns in case ``num_cols`` is a list) raise_exception : bool, optional Whether to raise an exception or return a bool value Returns ------- ``True`` if ``data`` has the required number of columns, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.ValidationError` if ``raise_exception`` is ``True`` and ``data`` does not have the required number of columns """ if isinstance(num_cols, int): num_cols = [num_cols] if not any(len(data.columns) == num for num in num_cols): if raise_exception: raise ValidationError( "The dataframe does not have the required number of columns. " "Expected were any of {} columns, but has {} columns.".format(num_cols, len(data.columns)) ) return False return True def _assert_len_list(data: Sequence, length: int, raise_exception: Optional[bool] = True) -> Optional[bool]: """Check if a list has the required length. Parameters ---------- data : list list to check length : int the required length or the list raise_exception : bool, optional Whether to raise an exception or return a bool value Returns ------- ``True`` if ``data`` has the required length, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.ValidationError` if ``raise_exception`` is ``True`` and ``data`` does not have the required length """ _assert_is_dtype(data, (list, tuple, np.ndarray)) if len(data) != length: if raise_exception: raise ValidationError( "The list does not have the required length. " "Expected was length {}, but it has length {}.".format(length, len(data)) ) return False return True def _assert_dataframes_same_length( df_list: Sequence[pd.DataFrame], raise_exception: Optional[bool] = True ) -> Optional[bool]: """Check if all dataframes have same length. Parameters ---------- df_list : list list of dataframes to check raise_exception : bool, optional Whether to raise an exception or return a bool value Returns ------- ``True`` if all dataframes in ``df_list`` have same length, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ :exc:`~biopsykit.exceptions.ValidationError` if ``raise_exception`` is ``True`` and ``data`` does not have the required length """ if len(set(len(df) for df in df_list)) != 1: if raise_exception: raise ValidationError("Not all dataframes have the same length!") return False return True def _multiindex_level_names_helper_get_expected_levels( ac_levels: Sequence[str], ex_levels: Sequence[str], match_atleast: Optional[bool] = False, match_order: Optional[bool] = False, ) -> bool: if match_order: if match_atleast: ac_levels_slice = ac_levels[: len(ex_levels)] expected = ex_levels == ac_levels_slice else: expected = ex_levels == ac_levels else: if match_atleast: expected = all(level in ac_levels for level in ex_levels) else: expected = sorted(ex_levels) == sorted(ac_levels) return expected def _multiindex_level_names_helper( df: pd.DataFrame, level_names: Iterable[_Hashable], idx_or_col: str, match_atleast: Optional[bool] = False, match_order: Optional[bool] = False, raise_exception: Optional[bool] = True, ) -> Optional[bool]: if isinstance(level_names, str): level_names = [level_names] ex_levels = list(level_names) if idx_or_col == "index": ac_levels = list(df.index.names) else: ac_levels = list(df.columns.names) expected = _multiindex_level_names_helper_get_expected_levels(ac_levels, ex_levels, match_atleast, match_order) if not expected: if raise_exception: raise ValidationError( "The dataframe is expected to have exactly the following {} level names {}, " "but it has {}".format(idx_or_col, level_names, ac_levels) ) return False return True def _multiindex_check_helper( df: pd.DataFrame, idx_or_col: str, expected: Optional[bool] = True, nlevels: Optional[int] = 2, nlevels_atleast: Optional[int] = False, raise_exception: Optional[bool] = True, ) -> Optional[bool]: has_multiindex, nlevels_act = _multiindex_check_helper_get_levels(df, idx_or_col) if has_multiindex is not expected: return _multiindex_check_helper_not_expected(idx_or_col, nlevels, nlevels_act, expected, raise_exception) if has_multiindex is True: if nlevels_atleast: expected = nlevels_act >= nlevels else: expected = nlevels_act == nlevels if not expected: if raise_exception: raise ValidationError( "The dataframe is expected to have a MultiIndex with {0} {1} levels. " "But it has a MultiIndex with {2} {1} levels.".format(nlevels, idx_or_col, nlevels_act) ) return False return True def _multiindex_check_helper_get_levels(df: pd.DataFrame, idx_or_col: str) -> Tuple[bool, int]: if idx_or_col == "index": has_multiindex = isinstance(df.index, pd.MultiIndex) nlevels_act = df.index.nlevels else: has_multiindex = isinstance(df.columns, pd.MultiIndex) nlevels_act = df.columns.nlevels return has_multiindex, nlevels_act def _multiindex_check_helper_not_expected( idx_or_col: str, nlevels: int, nlevels_act: int, expected: bool, raise_exception: bool ) -> Optional[bool]: if not expected: if raise_exception: raise ValidationError( "The dataframe is expected to have a single level as {0}. " "But it has a MultiIndex with {1} {0} levels.".format(idx_or_col, nlevels_act) ) return False if raise_exception: raise ValidationError( "The dataframe is expected to have a MultiIndex with {0} {1} levels. " "It has just a single normal {1} level.".format(nlevels, idx_or_col) ) return False def _assert_has_column_prefix( columns: Sequence[str], prefix: str, raise_exception: Optional[bool] = True ) -> Optional[bool]: """Check whether all columns start with the same prefix. Parameters ---------- columns : list of str list of column names prefix : str expected prefix of all columns raise_exception : bool, optional Whether to raise an exception or return a bool value Returns ------- ``True`` if ``columns`` all start with ``prefix``, ``False`` otherwise (if ``raise_exception`` is ``False``) Raises ------ ValidationError if ``raise_exception`` is ``True`` and one of ``columns`` is not a string or does not start with ``prefix`` """ if prefix is None or len(prefix) == 0: if raise_exception: raise ValidationError("'prefix' is None or empty!") return False for col in columns: return _check_has_column_prefix_single_col(columns, col, prefix, raise_exception) return True def _check_has_column_prefix_single_col( columns: Sequence[str], col: Any, prefix: str, raise_exception: bool ) -> Optional[bool]: if not _assert_is_dtype(col, str, raise_exception=False): if raise_exception: raise ValidationError("Column '{}' from {} is not a string!".format(col, columns)) return False if not col.startswith(prefix): if raise_exception: raise ValidationError( "Column '{}' from {} are starting with the required prefix '{}'!".format(col, columns, prefix) ) return False return True
python
from terminaltables import SingleTable import requests import os from dotenv import load_dotenv def predict_salary(min_salary, max_salary): if min_salary == None or min_salary == 0: average_salary = max_salary*0.8 elif max_salary == None or max_salary == 0: average_salary = min_salary*1.2 else: average_salary = ((max_salary+min_salary)/2) return average_salary def get_vacancies_hh(profession): hh_vacancies = [] page = 0 pages = 1 while page < pages: url = 'https://api.hh.ru/vacancies' user_request = {'text': profession, 'area': '4', 'period': '30', 'per_page': '10', 'page': page} page_response = requests.get(url, params=user_request) pages = page_response.json()['pages'] page += 1 page_answer_hh = page_response.json() hh_vacancies.append(page_answer_hh) return hh_vacancies def predict_rub_salary_hh(hh_vacancies, profession): total_vacancies = hh_vacancies[0]['found'] total_salary = 0 total_number = 0 for vacancy in hh_vacancies: prepare_vacancies = vacancy['items'] number = 0 sum_salary = 0 total_average_salary = 0 for prepare_vacancy in prepare_vacancies: if prepare_vacancy['salary'] is not None: salary = prepare_vacancy['salary'] if salary['currency'] == 'RUR': number += 1 min_salary = salary['from'] max_salary = salary['to'] average_salary = predict_salary(min_salary, max_salary) sum_salary += average_salary total_salary += sum_salary total_number += number try: total_average_salary = int(total_salary/total_number) except ZeroDivisionError: pass hh_response = [profession, total_vacancies, total_number, total_average_salary] return hh_response def get_vacancies_sj(profession, secret_key_sj): sj_vacancies = [] page = 0 pages = 1 while page < pages: url = 'https://api.superjob.ru/2.0/vacancies/' headers = {'X-Api-App-Id': secret_key_sj} user_request = {'keyword': profession, 'town': 4, 'period': 30, 'count': 10, 'page': page} page_response = requests.get(url, headers=headers, params=user_request) page_response.raise_for_status() more_vacancies = page_response.json()['more'] if more_vacancies: page += 1 pages += 1 if not more_vacancies: break page_answer_sj = page_response.json() sj_vacancies.append(page_answer_sj) return sj_vacancies def predict_rub_salary_sj(sj_vacancies, profession): total_vacancies = sj_vacancies[0]['total'] total_salary = 0 total_number = 0 for vacancy in sj_vacancies: prepare_vacancies = vacancy['objects'] number = 0 sum_salary = 0 total_average_salary = 0 for prepare_vacancy in prepare_vacancies: if prepare_vacancy['currency'] == 'rub': min_salary = prepare_vacancy['payment_from'] max_salary = prepare_vacancy['payment_to'] if min_salary or max_salary != 0: number += 1 average_salary = predict_salary(min_salary, max_salary) sum_salary += average_salary total_salary += sum_salary total_number += number try: total_average_salary = int(total_salary/total_number) except ZeroDivisionError: pass sj_response = [profession, total_vacancies, total_number, total_average_salary] return sj_response def get_table(table, title): table_template = [['Язык программирования', 'Вакансий найдено', 'Вакансий обработано', 'Средняя зарплата'], ] for line in table: table_template.append(line) table_instance = SingleTable(table_template, title) table_instance.justify_columns[2] = 'right' table_result = table_instance.table return table_result def main(): load_dotenv() secret_key_sj = os.getenv('SECRET_KEY') table_hh = [] table_sj = [] professions = ("C#", "Objective-C", "Ruby", "Java", "C", "Typescript", "Scala", "Go", "Swift", "C++", "PHP", "JavaScript", "Python") for profession in professions: hh_vacancies = get_vacancies_hh(profession) hh_response = predict_rub_salary_hh(hh_vacancies, profession) table_hh.append(hh_response) title_hh = 'HEADHUNTER_MOSCOW' sj_vacancies = get_vacancies_sj(profession, secret_key_sj) try: sj_response = predict_rub_salary_sj(sj_vacancies, profession) table_sj.append(sj_response) except (IndexError, ValueError): pass title_sj = 'SUPERJOB_MOSCOW' print (get_table(table_hh, title_hh)) print() print (get_table(table_sj, title_sj)) print() if __name__ == '__main__': main()
python
import numpy as np import matplotlib.pyplot as plt teilnehmer = int(input("Teilnehmer: ")) a = list() x = np.arange(1, teilnehmer+1) y = np.zeros(teilnehmer) a.append(float(input("Geheimnis: "))) for i in range(teilnehmer-1): a.append(float(input(f"Koeffizient a{i+1}: "))) for i in range(teilnehmer): for j in range(len(x)): y[i]+=a[j]*x[i]**j for i in range(teilnehmer): print(f"Punkt für Teilnehmer {i+1}: x{i+1} = {x[i]}, y{i+1} = {y[i]}") berechnetes_geheimnis = 0 for i in range(teilnehmer): lagrange = 1 for j in range(teilnehmer): if i != j: print(f"((0 - {x[j]})/({x[i]} - {x[j]})) * ", end = '') lagrange *= (0 - x[j])/(x[i] - x[j]) print(f"{y[i]} = {lagrange * y[i]}") berechnetes_geheimnis += lagrange * y[i] print(f"Berechnetes Geheimnis: {berechnetes_geheimnis}") p = np.poly1d(np.polyfit(x, y, teilnehmer-1)) x_plot = np.linspace(-2, 6, 100) _ = plt.plot(x, y, '.', x_plot, p(x_plot), '-') plt.ylim(0, 20) plt.show()
python
#/usr/bin/env python from __future__ import absolute_import # Charge transfer efficiency by EPER, now as a pipe task! import lsst.pex.config as pexConfig import lsst.pipe.base as pipeBase import sys import numpy as np import argparse from .MaskedCCD import MaskedCCD import lsst.geom as lsstGeom import lsst.afw.math as afwMath from lsst.eotest.Estimator import Estimator class SubImage(object): """Functor to produce sub-images depending on scan direction.""" def __init__(self, ccd, amp, overscans, task): geom = ccd.amp_geom self.ccd = ccd self.imaging = geom.imaging self.image = ccd[amp] # This is the masked image for the desired amp. if task.config.direction == 'p': self._bbox = self._parallel_box llc = lsstGeom.Point2I(geom.parallel_overscan.getMinX(), geom.parallel_overscan.getMinY() + overscans) urc = geom.parallel_overscan.getCorners()[2] self._bias_reg = lsstGeom.Box2I(llc, urc) self.lastpix = self.imaging.getMaxY() elif task.config.direction == 's': self._bbox = self._serial_box llc = lsstGeom.Point2I(geom.serial_overscan.getMinX() + overscans, geom.serial_overscan.getMinY()) urc = geom.serial_overscan.getCorners()[2] # # Omit the last 4 columns to avoid the bright column in the # last overscan column in the e2v vendor data. # urc[0] -= 4 self._bias_reg = lsstGeom.Box2I(llc, urc) self.lastpix = self.imaging.getMaxX() else: task.log.error("Unknown scan direction: " + str(direction)) sys.exit(1) def bias_est(self, statistic=afwMath.MEAN, gain=1): subim = self.image.Factory(self.image, self._bias_reg) bias_estimate = Estimator() bias_estimate.value = \ gain*afwMath.makeStatistics(subim, statistic).getValue() num_pix = len(subim.getImage().getArray().flatten()) bias_estimate.error = \ gain*afwMath.makeStatistics(subim, afwMath.STDEV).getValue()/np.sqrt(float(num_pix)) return bias_estimate def __call__(self, start, end=None): if end is None: end = start my_exp = self.image.Factory(self.image, self._bbox(start, end)) return my_exp def _parallel_box(self, start, end): llc = lsstGeom.PointI(self.imaging.getMinX(), start) urc = lsstGeom.PointI(self.imaging.getMaxX(), end) return lsstGeom.BoxI(llc, urc) def _serial_box(self, start, end): llc = lsstGeom.PointI(start, self.imaging.getMinY()) urc = lsstGeom.PointI(end, self.imaging.getMaxY()) return lsstGeom.BoxI(llc, urc) class EPERConfig(pexConfig.Config): """Configuration for the EPERTask.""" direction = pexConfig.Field("Select either parallel or serial direction", str, default="p") verbose = pexConfig.Field("Turn verbosity on", bool, default=True) cti = pexConfig.Field('Return CTI instead of CTE', bool, default=False) class EPERTask(pipeBase.Task): """Task to calculate either parallel or serial charge transfer efficiency via EPER.""" ConfigClass = EPERConfig _DefaultName = "eper" @pipeBase.timeMethod def run(self, infilename, nframes, amps, overscans, gains=None, mask_files=(), linearity_correction=None): if not infilename: self.log.error("Please specify an input file path.") sys.exit(1) if gains is None: gains = dict([(amp, 1) for amp in amps]) ccd = MaskedCCD(infilename, mask_files=mask_files, linearity_correction=linearity_correction) # iterate through amps cte = {} bias_estimates = {} for amp in amps: subimage = SubImage(ccd, amp, overscans, self) lastpix = subimage.lastpix # find signal in last image vector (i.e., row or column) last_im = Estimator(subimage(lastpix), ccd.stat_ctrl, gain=gains[amp], var_wt=nframes) if self.config.verbose: self.log.info("Last imaging row/column = " + str(last_im)) # find signal in each overscan vector overscan_ests = [] for i in range(1, overscans+1): overscan_ests.append(Estimator(subimage(lastpix+i), ccd.stat_ctrl, gain=gains[amp], var_wt=nframes)) if self.config.verbose: self.log.info("Overscan values = " + str(overscan_ests)) # sum medians of first n overscan rows summed = sum(overscan_ests) if self.config.verbose: self.log.info("summed overscans = " + str(summed)) # Find bias level. bias_est = subimage.bias_est(gain=gains[amp], statistic=afwMath.MEAN) bias_estimates[amp] = bias_est if self.config.verbose: self.log.info("bias value = " + str(bias_est)) # signal = last - bias sig = last_im - bias_est # trailed = sum(last2) - bias trailed = summed - overscans*bias_est # charge loss per transfer = (trailed/signal)/N chargelosspt = (trailed/sig)/(lastpix + 1.) if self.config.cti: cte[amp] = chargelosspt cte[amp].set_format_str("{0:.5e}") else: cte[amp] = 1. - chargelosspt cte[amp].set_format_str("{0:.16f}") if self.config.verbose: if self.config.cti: self.log.info('cti, amp ' + str(amp) + " = " + str(cte[amp]) + '\n') else: self.log.info('cte, amp ' + str(amp) + " = " + str(cte[amp]) + '\n') return cte, bias_estimates if __name__ == '__main__': #import pdb; pdb.set_trace() parser = argparse.ArgumentParser(description='Calculate either parallel or serial CTE via EPER.') parser.add_argument('infilename', help="image file to be used for analysis") parser.add_argument('-o', '--overscans', help="number of overscan rows/columns to use", type=int, default=3) parser.add_argument('-d', '--direction', help="specify either parallel ('p') or serial ('s') direction", default='p') parser.add_argument('-a', '--amps', help="amps to be analyzed, separated by a space", type=int, nargs='+', default=list(range(1, 17))) parser.add_argument('-v', '--verbose', help="turn verbosity on", action='store_true', default=False) parser.add_argument('-i', '--cti', help='return CTI (not CTE)', action='store_true', default=False) args = parser.parse_args() task = EPERTask() task.config.direction = args.direction task.config.verbose = args.verbose task.config.cti = args.cti task.run(args.infilename, args.amps, args.overscans)
python
from datetime import datetime from sqlalchemy import create_engine, Column, Integer, DateTime from sqlalchemy.ext.declarative import as_declarative, declared_attr from sqlalchemy.orm import sessionmaker, scoped_session from config.config import SQLALCHEMY_DATABASE_URI engine = create_engine(SQLALCHEMY_DATABASE_URI) Session = scoped_session(sessionmaker(bind=engine)) @as_declarative() class Base: @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) created_at = Column(DateTime, default=datetime.now) updated_at = Column(DateTime, default=datetime.now) @classmethod def count(cls): session = Session() return session.query(cls).count()
python
from typing import List class Solution: def plusOne(self, digits: List[int]) -> List[int]: N = len(digits) for i in reversed(range(N)): digit = digits[i] if digit == 9: digits[i] = 0 else: digits[i] += 1 return digits digits[0] = 1 digits.append(0) return digits
python
import timeit import CoolProp.CoolProp as CP def time_check(N, h, p, TTSE = False, mode = 'TTSE'): if TTSE: if mode =='TTSE': setup = "import CoolProp; import CoolProp.CoolProp as CP; CP.enable_TTSE_LUT('Water'); CP.set_TTSE_mode('Water','TTSE'); CP.Props('T','H',500,'P',10000,'Water'); IWater = CP.get_Fluid_index('Water'); from CoolProp.param_constants import iT,iH,iP,iD" elif mode =='BICUBIC': setup = "import CoolProp; import CoolProp.CoolProp as CP; CP.enable_TTSE_LUT('Water'); CP.set_TTSE_mode('Water','BICUBIC'); CP.Props('T','H',500,'P',10000,'Water'); IWater = CP.get_Fluid_index('Water'); from CoolProp.param_constants import iT,iH,iP,iD" else: raise ValueError() else: setup = "import CoolProp.CoolProp as CP; IWater = CP.get_Fluid_index('Water'); CP.disable_TTSE_LUT('Water'); from CoolProp.param_constants import iT,iH,iP,iD" time = timeit.Timer("CP.IProps(iD,iH,"+str(h)+",iP,"+str(p)+",IWater)",setup).timeit(N)/N*1e6 value = CP.Props('D','H',h,'P',p,'Water') return time, value values = dict(subcooled = (500,10000), twophase = (2000,10000), superheated = (3000,10000), supercritical = (2000,30000)) N = 10000 for k in ['subcooled','twophase','superheated','supercritical']: h, p = values[k] time_EOS, value_EOS = time_check(N, h, p, TTSE = False) time_TTSE, value_TTSE = time_check(N, h, p, TTSE = True) time_BICUBIC, value_BICUBIC = time_check(N, h, p, TTSE = True, mode='BICUBIC') print("%s %s %s %s %s %s %s" % (k, h, p, (value_TTSE/value_EOS-1.0)*100, (value_BICUBIC/value_EOS-1.0)*100, time_EOS/time_TTSE, time_EOS/time_BICUBIC))
python
import numpy as np import pandas as pd import logging logger = logging.getLogger(__name__) def approximate_curve(data, bin_number): binned = pd.cut(data.capacity_factor, bin_number) # bins = np.arange(1, len(data.datetime) / bin_number + 1) # logger.debug("bins: {}".format(bins)) # digitized = np.digitize(data, bins) # bin_means = [data[digitized == i].mean() # for i in range(1, len(bin_number))] return binned
python
"""There is a vehicle obscuring a pedestrian that conflicts with your path.""" from flow.envs.multiagent import Bayesian0NoGridEnv from flow.networks import Bayesian1Network from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams from flow.core.params import SumoCarFollowingParams, VehicleParams from flow.core.params import PedestrianParams from flow.controllers import SimCarFollowingController, GridRouter, RLController from flow.utils.registry import make_create_env from flow.utils.rllib import FlowParamsEncoder # Experiment parameters N_ROLLOUTS = 20 # number of rollouts per training iteration N_CPUS = 8 # number of parallel workers # Environment parameters # TODO(@klin) make sure these parameters match what you've set up in the SUMO version here V_ENTER = 30 # enter speed for departing vehicles INNER_LENGTH = 50 # length of inner edges in the traffic light grid network # number of vehicles originating in the left, right, top, and bottom edges N_LEFT, N_RIGHT, N_TOP, N_BOTTOM = 0, 1, 1, 1 def make_flow_params(): """ Generate the flow params for the experiment. Parameters ---------- Returns ------- dict flow_params object """ pedestrian_params = PedestrianParams() pedestrian_params.add( ped_id='ped_0', depart_time='0.00', start='(1.0)--(1.1)', end='(1.1)--(1.2)', depart_pos='40') # we place a sufficient number of vehicles to ensure they confirm with the # total number specified above. We also use a "right_of_way" speed mode to # support traffic light compliance vehicles = VehicleParams() vehicles.add( veh_id="human", acceleration_controller=(SimCarFollowingController, {}), car_following_params=SumoCarFollowingParams( min_gap=2.5, max_speed=V_ENTER, decel=7.5, # avoid collisions at emergency stops speed_mode="right_of_way", ), routing_controller=(GridRouter, {}), num_vehicles=2) vehicles.add( veh_id='rl', acceleration_controller=(RLController, {}), car_following_params=SumoCarFollowingParams( speed_mode="aggressive", ), routing_controller=(GridRouter, {}), num_vehicles=1) ''' vehicles.add( veh_id="human_1", acceleration_controller=(SimCarFollowingController, {}), car_following_params=SumoCarFollowingParams( min_gap=2.5, max_speed=V_ENTER, decel=7.5, # avoid collisions at emergency stops speed_mode="right_of_way", ), routing_controller=(GridRouter, {}), num_vehicles=1) ''' n_rows = 1 n_columns = 1 # define initial configs to pass into dict initial_config = InitialConfig( spacing='custom', shuffle=False, sidewalks=True, lanes_distribution=float('inf')) flow_params = dict( # name of the experiment exp_tag="bayesian_1_env", # name of the flow environment the experiment is running on env_name=Bayesian0NoGridEnv, # name of the network class the experiment is running on network=Bayesian1Network, # simulator that is used by the experiment simulator='traci', # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( restart_instance=False, sim_step=0.1, render=False, ), env=EnvParams( horizon=500, # environment related parameters (see flow.core.params.EnvParams) additional_params={ # maximum acceleration of autonomous vehicles 'max_accel': 2.6, # maximum deceleration of autonomous vehicles 'max_decel': 4.5, # desired velocity for all vehicles in the network, in m/s "target_velocity": 25, # how many objects in our local radius we want to return "max_num_objects": 3, # how large of a radius to search in for a given vehicle in meters "search_veh_radius": 50, # how large of a radius to search for pedestrians in for a given vehicle in meters (create effect of only seeing pedestrian only when relevant) "search_ped_radius": 22, # whether or not we have a discrete action space, "discrete": False, # whether to randomize which edge the vehicles are coming from "randomize_vehicles": False, # whether to append the prior into the state "inference_in_state": False, # whether to grid the cone "search_veh_radius" in front of us into 6 grid cells "use_grid": False }, ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( additional_params={ "speed_limit": V_ENTER + 5, # inherited from grid0 benchmark "grid_array": { "inner_length": INNER_LENGTH, "row_num": n_rows, "col_num": n_columns, "cars_left": N_LEFT, "cars_right": N_RIGHT, "cars_top": N_TOP, "cars_bot": N_BOTTOM, }, "horizontal_lanes": 1, "vertical_lanes": 1, "randomize_routes": True, }, ), # vehicles to be placed in the network at the start of a rollout (see # flow.core.params.VehicleParams) veh=vehicles, ped=pedestrian_params, # parameters specifying the positioning of vehicles upon initialization # or reset (see flow.core.params.InitialConfig) initial = initial_config ) return flow_params # define callbacks for tensorboard
python
from datetime import datetime, timedelta from typing import Optional from utils.utils import format_date class Event: """Event object to store data about a Google Calendar event""" def __init__( self, event_id: str, link: str, title: str, location: Optional[str], description: Optional[str], all_day: bool, start: datetime, end: datetime, ): self.__id = event_id self.__link = link self.__title = title self.__location = location self.__description = description self.__all_day = all_day self.__start = start.replace(tzinfo=None) self.__end = end.replace(tzinfo=None) @property def id(self) -> str: """Returns the event id""" return self.__id @property def link(self) -> str: """Returns the link to the event in Google Calendar""" return self.__link @property def title(self) -> str: """Returns the title of the event""" return self.__title @property def location(self) -> Optional[str]: """Returns the location of the event""" return self.__location @property def description(self) -> Optional[str]: """Returns the description of the event""" return self.__description @property def all_day(self) -> bool: """Returns whether or not the event is an all day event""" return self.__all_day @property def start(self) -> datetime: """Returns the start date as a datetime object""" return self.__start @property def end(self) -> datetime: """Returns the end date as a datetime object""" return self.__end @property def __one_day(self) -> bool: """Returns whether or not the event is a one day event""" return self.all_day and self.end - self.start <= timedelta(days=1) def relative_date_range_str(self, base=datetime.now()) -> str: """Returns a formatted string of the start to end date range""" start_str = self.__relative_start_str(base=base) end_str = self.__relative_end_str(base=self.start) # all day event if self.__one_day: return f"{start_str} - All day" # include end time if it is not the same as the start time return f"{start_str} - {end_str}" if end_str else start_str def __relative_start_str(self, base=datetime.now()) -> str: """Returns a formatted string of the start date""" return format_date(self.start, all_day=self.all_day, base=base) or "Today" def __relative_end_str(self, base=datetime.now()) -> str: """Returns a formatted string of the end date""" end_date = self.end # use previous day if end of multi-day, all-day event if self.all_day and not self.__one_day: end_date -= timedelta(days=1) return format_date(end_date, all_day=self.all_day, base=base)
python
from __future__ import absolute_import, division, print_function from cctbx.array_family.flex import ( # noqa: F401; lgtm abs, acos, arg, asin, atan, atan2, bool, ceil, compare_derivatives, complex_double, condense_as_ranges, conj, cos, cosh, cost_of_m_handle_in_af_shared, double, double_from_byte_str, double_range, empty_container_sizes_double, empty_container_sizes_int, exercise_versa_packed_u_to_flex, exp, extract_double_attributes, fabs, first_index, flex_argument_passing, float, float_range, floor, fmod, fmod_positive, get_random_seed, grid, hendrickson_lattman, histogram, imag, int, int_from_byte_str, int_range, integer_offsets_vs_pointers, intersection, last_index, linear_correlation, linear_interpolation, linear_regression, linear_regression_core, log, log10, long, long_range, mat3_double, max, max_absolute, max_default, max_index, mean, mean_and_variance, mean_default, mean_sq, mean_sq_weighted, mean_weighted, median, median_functor, median_statistics, mersenne_twister, miller_index, min, min_default, min_index, min_max_mean_double, nested_loop, norm, order, permutation_generator, polar, pow, pow2, product, py_object, random_bool, random_double, random_double_point_on_sphere, random_double_r3_rotation_matrix, random_double_r3_rotation_matrix_arvo_1992, random_double_unit_quaternion, random_generator, random_int_gaussian_distribution, random_permutation, random_selection, random_size_t, reindexing_array, rows, select, set_random_seed, show, show_count_stats, sin, sinh, size_t, size_t_from_byte_str, size_t_range, slice_indices, smart_selection, sort_permutation, sorted, split_lines, sqrt, std_string, sum, sum_sq, sym_mat3_double, tan, tanh, tiny_size_t_2, to_list, union, vec2_double, vec3_double, vec3_int, weighted_histogram, xray_scatterer, ) from dials.array_family.flex_ext import ( # noqa: F401; lgtm real, reflection_table_selector, ) from dials_array_family_flex_ext import ( # noqa: F401; lgtm Binner, PixelListShoeboxCreator, int6, observation, reflection_table, reflection_table_to_list_of_reflections, shoebox, )
python
# @Author: BingWu Yang <detailyang> # @Date: 2016-03-29T17:47:44+08:00 # @Email: detailyang@gmail.com # @Last modified by: detailyang # @Last modified time: 2016-04-10T16:54:56+08:00 # @License: The MIT License (MIT) import ply.yacc as yacc import eslast as ast from esllexer import ESLLexer tokens = ESLLexer.tokens def p_request(p): '''request : URL | URL METHOD | URL METHOD OPTIONS''' if len(p) == 2: p[0] = ast.RequestNode(ast.MethodNode('GET'), ast.URLNode(p[1]), None) elif len(p) == 3: p[0] = ast.RequestNode(ast.MethodNode(p[2]), ast.URLNode(p[1]), None) else: p[0] = ast.RequestNode(ast.MethodNode(p[2]), ast.URLNode(p[1]), p[3]) def p_options(p): '''OPTIONS : | OPTION | OPTIONS OPTION''' if len(p) == 2: p[0] = ast.OptionListNode([p[1]]) else: p[0] = p[1].append(p[2]) def p_option_empty(p): ' OPTION : empty ' p[0] = p[1] def p_option_header(p): ' OPTION : HEADERVALUE ' p[0] = p[1] def p_option_querystring(p): ' OPTION : QUERYSTRINGVALUE ' p[0] = p[1] def p_option_body(p): ' OPTION : BODYVALUE ' p[0] = p[1] def p_empty(p): 'empty :' p[0] = [] def p_querystring_value(p): '''QUERYSTRINGVALUE : QUERYSTRING VALUE ''' p[0] = ast.OptionNode(ast.QueryStringNode(p[1]), ast.ValueNode(p[2])) def p_querystring_shell(p): '''QUERYSTRINGVALUE : QUERYSTRING SHELL ''' p[0] = ast.OptionNode(ast.QueryStringNode(p[1]), ast.ShellNode(p[2])) def p_header_value(p): '''HEADERVALUE : HEADER VALUE ''' p[0] = ast.OptionNode(ast.HeaderNode(p[1]), ast.ValueNode(p[2])) def p_header_shell(p): '''HEADERVALUE : HEADER SHELL ''' p[0] = ast.OptionNode(ast.HeaderNode(p[1]), ast.ShellNode(p[2])) def p_body_value(p): '''BODYVALUE : BODY VALUE ''' p[0] = ast.OptionNode(ast.BodyNode(p[1]), ast.ValueNode(p[2])) def p_body_shell(p): '''BODYVALUE : BODY SHELL ''' p[0] = ast.OptionNode(ast.BodyNode(p[1]), ast.ShellNode(p[2])) def p_error(p): print("Syntax Error") print("ESL format: {URL} {METHOD} {OPTIONS}") print("{URL}: https://example.com|examples.com|/api/endpoints") print("{METHOD}: GET|get|POST|post|DELETE|delete|PUT|put") print("{OPTIONS}: --hContent-Type=application/json") print("{OPTIONS}: --qper_page=1") print("{OPTIONS}: --busername=xxxx") def parse(text): parser = yacc.yacc(debug=True) ast = parser.parse(text, ESLLexer().build()) return ast if __name__ == '__main__': ast = parse("/api/cmdb/peoples/ get --qhost_ip=!(ifconfig eth0) --qhost_name=bj-sdf --hContent-Type=abcd --bslkjsdf=123") # Test it print(ast.left) print(ast.method) for option in ast.right.options: key = option.key value = option.value
python
from discord.ext import commands import config class Bot(commands.Bot): async def invoke(self, ctx): if self.user.mentioned_in(ctx.message): # Mention was processed in on_message. return if ctx.invoked_with: await ctx.send(config.response) async def on_message(self, message): # bot? if message.author.bot: return # mention? if self.user.mentioned_in(message): await message.channel.send(config.response) return # invoke command await self.process_commands(message)
python
#!/usr/bin/env python3 import altair as alt import pandas import selenium def vegaGraphics( cmdTag, id1, id2, parameters, sql, transformedData, verbose,): """Create interactive charts for specified data""" # making function more explicit cmdTag = cmdTag id1 = id1 id2 = id2 parameters = parameters sql = sql transformedData = transformedData verbose = verbose if verbose >= 1: print( "Creating Vega Graphics" ) transformedData = transformedData.rename( columns = { id1 : "id1", id2 : "id2", sql : "sql", cmdTag : "cmdTag", parameters : "parameters"}) dataInfo = transformedData.copy() data = transformedData[["total_duration", "cmdTag", "id1", "id2", "sql", "parameters"]].copy() data = data.sort_values(by = ["total_duration"], ascending = True, inplace = False).dropna().reset_index(drop = True) data["length"] = data["sql"].str.len() + data["parameters"].str.len() alt.data_transformers.disable_max_rows() brush = alt.selection_interval() # -----> create the scatter plot graph line = alt.Chart(data.reset_index()).mark_point().encode( x = alt.X( "length:Q", axis = alt.Axis(title = "Query Length")), y=alt.Y( "total_duration:Q", axis = alt.Axis(title = "Latency (ms)")), color = alt.condition( brush, "cmdTag:N", alt.value("lightgray")), shape = "cmdTag:N", tooltip = ["index:O", "total_duration:Q", "length:Q", "log_time_with_tz:N", "sql:N", "parameters:N", "cmdTag:N", "id1:N", "id2:N"] ).properties( width = 500, height = 500, title = "Einherjar Queries" ).add_selection( brush ).interactive() # -----> display the mean via a line across our chart rule = alt.Chart(data).mark_rule(color = "red").encode( y = "median(total_duration):Q", size = alt.value(2) ) alt.Chart(data).configure_title( fontSize = 30 ) # -----> display number of interations per table insert dog = dataInfo[["inserted_data", "cmdTag"]].dropna() bars1 = alt.Chart(dog).mark_bar().encode( y = "inserted_into:N", color = "cmdTag:N", x = "count(inserted_into):Q" ).transform_filter( brush ) # -----> display number of interations per table select cat = dataInfo[["selected_from", "cmdTag"]].dropna() bars2 = alt.Chart(cat).mark_bar().encode( y = "selected_from:N", color = "cmdTag:N", x = "count(selected_from):Q" ).transform_filter( brush ) # -----> add the line and rule charts to the base chart chart = line + rule chart = chart & bars1 & bars2 chart.save("results/data.json") chart.save("results/data.html") if verbose >= 1: print( "Vega Graphics have been completed" )
python
from ark.thread_handler import ThreadHandler from factory import Factory import time class GuiTasks(object): @classmethod def loop(cls): time.sleep(1) GuiTasks.get_active_threads() @classmethod def get_active_threads(cls): GUI = Factory.get('GUI') max_threads = len(ThreadHandler.activethreads) active_threads = 0 for key,timestamp in ThreadHandler.activethreads.items(): if timestamp > (time.time()-30): active_threads += 1 GUI.active_threads['text'] = "{} / {}".format(active_threads,max_threads)
python
import inject from flask import Flask, Response, send_from_directory, send_file class StaticRoute: @staticmethod @inject.autoparams() def init(flask: Flask) -> None: @flask.route("/static/<path:path>") def send_static(path: str) -> Response: return send_from_directory(f"static", path) @flask.route("/") def index() -> Response: return send_file("static/html/index.html")
python
# Exercise 31: Making Decisions print "You enter a dark room with two doors. Do you go through door #1 or door #2?" door = raw_input("> ") if door == "1": print "There's a giant bear here eating a cheese cake. What do you do?" print "1. Take the cake." print "2. Scream at the bear." print "3. Turn back quietly" print "4. Look around" bear = raw_input("> ") if bear == "1": print "The bear eats your face off. Good job!" elif bear == "2": print "The bear eats your legs off. Good job!" elif bear == "3": print "One plank creaked and bear eats you. Good job!" elif bear == "4": print "There have rifle. Will you get it?" print "1. Yes!" print "2. No!" rifle = raw_input("> ") if rifle == "1": print "Did you want to shoot the bear?" print "1. Yes, of course!" print "2. No!" choice = raw_input("> ") if choice == "1": print """ The rifle isn't loaded! You look around and see bullets on the table. You are going to get them, but the bear see you and eat you!!! Good job! :D """ elif choice == "2": print "While you thinking what to do the bear see you and eat you! Good job!" else: print "You can't choice other, for that you die! Good Job!" elif rifle == "2": print """ This is stupid decision and what will do now? Okey, just die. Good job! """ else: print "You can't choce other, for that you die! Good Job!" else: print "Well, doing %s is probably better. Bear runs away." % bear elif door == "2": print "You stare into the endless abyss at Cthulhu's retina." print "1. Blueberries." print "2. Yellow jacket clothespins." print "3. Understanding revolvers yelling melodies." insanity = raw_input("> ") if insanity == "1" or insanity == "2": print "Your body survives powered by a mind of jello. Good job!" else: print "The insanity rots your eyes into a pool of muck. Good job!" else: print "You stumble around and fall on a knife and die. Good job!" # Study Drills: # 1. Make new parts of the game and change what decisions people # can make. Expand the game out as much as you can before it get # ridiculous. # 2. Write a copletely new game. Maybe you don't like this one, so # make your own. This is your computer, do what you want.
python
# Generated by Django 3.1.1 on 2020-10-08 02:15 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('rameniaapp', '0009_auto_20201002_0243'), ] operations = [ migrations.CreateModel( name='Edit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ImageField(blank=True, upload_to='')), ('change', models.JSONField(blank=True, null=True)), ('editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('noodle', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='rameniaapp.noodle')), ], ), ]
python
#!/usr/bin/env python # -*- coding: utf-8; py-indent-offset:4 -*- ############################################################################### # # Copyright (C) 2015, 2016, 2017 Daniel Rodriguez # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### from __future__ import (absolute_import, division, print_function, unicode_literals) tableau20 = [ 'steelblue', # 0 'lightsteelblue', # 1 'darkorange', # 2 'peachpuff', # 3 'green', # 4 'lightgreen', # 5 'crimson', # 6 'lightcoral', # 7 'mediumpurple', # 8 'thistle', # 9 'saddlebrown', # 10 'rosybrown', # 11 'orchid', # 12 'lightpink', # 13 'gray', # 14 'lightgray', # 15 'olive', # 16 'palegoldenrod', # 17 'mediumturquoise', # 18 'paleturquoise', # 19 ] tableau10 = [ 'blue', # 'steelblue', # 0 'darkorange', # 1 'green', # 2 'crimson', # 3 'mediumpurple', # 4 'saddlebrown', # 5 'orchid', # 6 'gray', # 7 'olive', # 8 'mediumturquoise', # 9 ] tableau10_light = [ 'lightsteelblue', # 0 'peachpuff', # 1 'lightgreen', # 2 'lightcoral', # 3 'thistle', # 4 'rosybrown', # 5 'lightpink', # 6 'lightgray', # 7 'palegoldenrod', # 8 'paleturquoise', # 9 ] tab10_index = [3, 0, 2, 1, 2, 4, 5, 6, 7, 8, 9] class PlotScheme(object): def __init__(self): # to have a tight packing on the chart wether only the x axis or also # the y axis have (see matplotlib) self.ytight = False # y-margin (top/bottom) for the subcharts. This will not overrule the # option plotinfo.plotymargin self.yadjust = 0.0 # Each new line is in z-order below the previous one. change it False # to have lines paint above the previous line self.zdown = True # Rotation of the date labes on the x axis self.tickrotation = 15 # How many "subparts" takes a major chart (datas) in the overall chart # This is proportional to the total number of subcharts self.rowsmajor = 5 # How many "subparts" takes a minor chart (indicators/observers) in the # overall chart. This is proportional to the total number of subcharts # Together with rowsmajor, this defines a proportion ratio betwen data # charts and indicators/observers charts self.rowsminor = 1 # Distance in between subcharts self.plotdist = 0.0 # Have a grid in the background of all charts self.grid = True # Default plotstyle for the OHLC bars which (line -> line on close) # Other options: 'bar' and 'candle' self.style = 'line' # Default color for the 'line on close' plot self.loc = 'black' # Default color for a bullish bar/candle (0.75 -> intensity of gray) self.barup = '0.75' # Default color for a bearish bar/candle self.bardown = 'red' # Level of transparency to apply to bars/cancles (NOT USED) self.bartrans = 1.0 # Wether the candlesticks have to be filled or be transparent self.barupfill = True self.bardownfill = True # Wether the candlesticks have to be filled or be transparent self.fillalpha = 0.20 # Wether to plot volume or not. Note: if the data in question has no # volume values, volume plotting will be skipped even if this is True self.volume = True # Wether to overlay the volume on the data or use a separate subchart self.voloverlay = True # Scaling of the volume to the data when plotting as overlay self.volscaling = 0.33 # Pushing overlay volume up for better visibiliy. Experimentation # needed if the volume and data overlap too much self.volpushup = 0.00 # Default colour for the volume of a bullish day self.volup = '#aaaaaa' # 0.66 of gray # Default colour for the volume of a bearish day self.voldown = '#cc6073' # (204, 96, 115) # Transparency to apply to the volume when overlaying self.voltrans = 0.50 # Transparency for text labels (NOT USED CURRENTLY) self.subtxttrans = 0.66 # Default font text size for labels on the chart self.subtxtsize = 9 # Transparency for the legend (NOT USED CURRENTLY) self.legendtrans = 0.25 # Wether indicators have a leged displaey in their charts self.legendind = True # Location of the legend for indicators (see matplotlib) self.legendindloc = 'upper left' # Location of the legend for datafeeds (see matplotlib) self.legenddataloc = 'upper left' # Plot the last value of a line after the Object name self.linevalues = True # Plot a tag at the end of each line with the last value self.valuetags = True # Default color for horizontal lines (see plotinfo.plothlines) self.hlinescolor = '0.66' # shade of gray # Default style for horizontal lines self.hlinesstyle = '--' # Default width for horizontal lines self.hlineswidth = 1.0 # Default color scheme: Tableau 10 self.lcolors = tableau10 # strftime Format string for the display of ticks on the x axis self.fmt_x_ticks = None # strftime Format string for the display of data points values self.fmt_x_data = None def color(self, idx): colidx = tab10_index[idx % len(tab10_index)] return self.lcolors[colidx]
python
#!/usr/bin/env python #Creates an instance in /home/pi/.config/lxsession/LXDE-pi/autostart which will autolaunch the server on the pi user account. import time print "Copy the path of the shortcut file by right clicking it and clicking 'copy path(s)'." print "Paste the path when prompted by right clicking in the terminal and clicking 'paste'." dspath = raw_input("Paste the full path to the server shortcut: ") atspath = "/home/pi/.config/lxsession/LXDE-pi/autostart" desktopentry = open(dspath, "r") desktopcnt = desktopentry.readlines() desktopentry.close() workingline = "failsafe" for line in desktopcnt: if line[0:4] == "Exec": workingline = line if workingline == "failsafe": print "no Exec line was found in the file you specified." print "The program will terminate" time.sleep(5) exit() workingline = workingline.strip() workingline = workingline[6:len(workingline)] autostartline = "@"+workingline+"\n" readcurrent = open(atspath, "r") readcnt = readcurrent.readlines() readcurrent.close() memory = [] for line in readcnt: if len(line) > 2: memory.append(line) memory.insert(0, autostartline) print memory overwritecurrent = open(atspath, "w") lenmem = len(memory) for x in range(lenmem): overwritecurrent.write("%s" %(memory[x])) overwritecurrent.close() print "Autostart entry created." print "Program will terminate" time.sleep(5) exit()
python
class Book(): ''' Creates a book object that can be used to populate a web page Inputs: - title: the title of the book [str] - author: the author of the book [str] - series: the series the book belongs to or None [str] - review_text: a short blurb about the book [str] - image_url: a place to find the cover image of the book [str] ''' def __init__(self, title, author, series, review_text, image_url): self.title = title self.author = author self.series = series self.review_text = review_text self.image_url = image_url def create_book_info(self): if self.series == None: self.series = 'This is a stand alone book.' else: self.series = 'This book is part of the series {}'.format(self.series) return { 'title': self.title, 'author': self.author, 'series': self.series, 'review_text': self.review_text, 'image_url': self.image_url } class Movie(): ''' Creates a book object that can be used to populate a web page Inputs: - title: the title of the book [str] - author: the author of the book [str] - series: the series the book belongs to or None [str] - review_text: a short blurb about the book [str] - image_url: a place to find the cover image of the book [str] ''' def __init__(self, title, image_url, trailer_url): self.title = title self.poster_image_url = image_url self.trailer_youtube_url = trailer_url def create_movie_info(self): return { 'title': self.title, 'image_url': self.image_url, 'trailer_url': self.trailer_url }
python
# -*- coding: utf-8 -*- from .base import Smoother __all__ = ['Smoother']
python
from os.path import join, dirname from textx import metamodel_for_language def test_example(): mm = metamodel_for_language('questionnaire') questionnaire = mm.model_from_file(join(dirname(__file__), 'example.que')) assert len(questionnaire.questions) == 6 assert questionnaire.questions[3].text == 'Author name' assert questionnaire.questions[2].type.__class__.__name__ == 'Free' assert questionnaire.questions[0].type.__class__.__name__ == 'Choice' assert questionnaire.questions[5].text == \ 'This question is to test multiline feature and indenting.' opt = questionnaire.questions[5].type.options assert len(opt) == 2 assert opt[0].num == 1 assert opt[0].text == 'Working' # Multiline assert opt[1].text == \ 'Not working. This is also to test multiline in choices.'
python
# -*- coding: UTF-8 -*- import threading import json import re from datetime import datetime from flask import current_app from flask_jwt import current_identity, jwt_required from flask_restful import Resource, request from marshmallow import EXCLUDE, ValidationError from sqlalchemy.exc import SQLAlchemyError from common.utils import paginate_parse, pretty_response from common.tasks import analysis_dataset, analysis_dataset_block, fetch_collection, delete_collection from models.dataset import DatasetModel, DatasetSchema from models.blockset import BlocksetModel, BlocksetSchema class DatasetList(Resource): @jwt_required() def get(self): """ Query all instances """ if current_identity.roles not in ['super']: return pretty_response(403) title = request.args.get('title', '') page = request.args.get('page', 1, type=int) per_page = request.args.get('per_page', 10, type=int) paginate = DatasetModel.query.filter(DatasetModel.title.like('%' + title + '%')).paginate( page, per_page, max_per_page=100) data = paginate_parse(paginate) data['items'] = DatasetSchema(many=True).dump(paginate.items) return pretty_response(200, data) @jwt_required() def post(self): """ Insert multi-instances """ if current_identity.roles not in ['super']: return pretty_response(403) jsondata = request.get_json() if DatasetModel.query.filter_by(title=jsondata['title']).first(): return pretty_response(40002) headers = jsondata.get('header', []) catalog = jsondata.get('catalog', None) if '数值' not in headers \ or (catalog == 'block' and '板块' not in headers) \ or (catalog == 'point' and '标题' not in headers): return pretty_response(40001) try: dataset_instance = DatasetSchema().load(jsondata, unknown=EXCLUDE) dataset_instance.add(dataset_instance) blocksets = BlocksetModel.query.all() data = json.loads( re.sub(r'[\s+]', '', json.dumps(jsondata.get('data', [])))) if catalog == 'block': # 导入板块数据 t = threading.Thread(target=analysis_dataset_block, args=( 'T' + dataset_instance.uuid, data, blocksets, headers)) t.start() else: # 导入集合数据 t = threading.Thread(target=analysis_dataset, args=( 'T' + dataset_instance.uuid, data, blocksets, headers)) t.start() dataset_dump = DatasetSchema().dump(dataset_instance) return pretty_response(200, dataset_dump) except ValidationError as e: current_app.logger.error(e.messages) return pretty_response(40003) except SQLAlchemyError as e: current_app.logger.error(e) return pretty_response(50001) def put(self): """ Update multi-instances """ return pretty_response(405) def delete(self): """ Batch-delete instances """ return pretty_response(405) class Dataset(Resource): @jwt_required() def get(self, uuid): """ Query specific instance """ if current_identity.roles not in ['super']: return pretty_response(403) dataset_instance = DatasetModel.query.get_or_404(uuid) dataset_dump = DatasetSchema().dump(dataset_instance) result = fetch_collection('T' + dataset_instance.uuid, []) dataset_dump['data'] = result return pretty_response(200, dataset_dump) @jwt_required() def post(self, uuid): """ Update specific instance """ if current_identity.roles not in ['super']: return pretty_response(403) dataset_instance = DatasetModel.query.get_or_404(uuid) jsondata = request.get_json() if not jsondata: return pretty_response(40001) catalog = jsondata.get('catalog', '') match = jsondata.get('match', {}) pipeline = [] aggregate_items = [] aggregate_max = 0 if catalog == 'block': pipeline = [{ '$match': match }, { '$group': {'_id': "$板块", 'value': {dataset_instance.mode: '$数值'}} }] result = fetch_collection('T' + dataset_instance.uuid, pipeline) blockset_list = BlocksetModel.query.all() for blockset in blockset_list: temp = { 'title': blockset.title, 'area': blockset.area, 'centroid': json.loads(blockset.centroid), 'coordinates': json.loads(blockset.coordinates), 'org_value': 0, 'value': 0, } for item in result: if blockset.title == item.get('_id', ''): if not item.get('value', None): break item_value = item.get('value', 0) temp['org_value'] = item_value temp['value'] = round( item_value / float(blockset.area), 4) if dataset_instance.inc_area else item_value if temp['value'] > aggregate_max: aggregate_max = temp['value'] break aggregate_items.append(temp) else: pipeline = [{ '$match': match }] result = fetch_collection('T' + dataset_instance.uuid, pipeline) for item in result: aggregate_item = { 'title': item.get('标题', ''), 'address': item.get('地址', ''), 'lng': item.get('经度', ''), 'lat': item.get('纬度', ''), 'value': item.get('数值', 0), } if aggregate_item['value'] > aggregate_max: aggregate_max = aggregate_item['value'] aggregate_items.append(aggregate_item) return pretty_response(200, {'max': aggregate_max, 'items': aggregate_items}) @jwt_required() def put(self, uuid): """ Update specific instance """ if current_identity.roles not in ['super']: return pretty_response(403) dataset_instance = DatasetModel.query.get_or_404(uuid) try: jsondata = request.get_json() DatasetSchema().load(jsondata, unknown=EXCLUDE) for key, val in jsondata.items(): setattr(dataset_instance, key, val) dataset_instance.updatetime = datetime.now() dataset_instance.update() dataset_dump = DatasetSchema().dump(dataset_instance) return pretty_response(200, dataset_dump) except ValidationError as e: current_app.logger.error(e.messages) return pretty_response(40003) except SQLAlchemyError as e: current_app.logger.error(e) return pretty_response(50001) @jwt_required() def delete(self, uuid): """ Delete specific instance """ if current_identity.roles not in ['super']: return pretty_response(403) dataset_instance = DatasetModel.query.get_or_404(uuid) delete_collection('T' + dataset_instance.uuid) try: dataset_instance.delete(dataset_instance) return pretty_response(20003) except SQLAlchemyError as e: current_app.logger.error(e) pretty_response(50001) class DatasetFree(Resource): def put(self, uuid): """ Update specific instance """ dataset_instance = DatasetModel.query.get_or_404(uuid) try: jsondata = request.get_json() DatasetSchema().load(jsondata, unknown=EXCLUDE) for key, val in jsondata.items(): setattr(dataset_instance, key, val) dataset_instance.updatetime = datetime.now() dataset_instance.update() dataset_dump = DatasetSchema().dump(dataset_instance) return pretty_response(200, dataset_dump) except ValidationError as e: current_app.logger.error(e.messages) return pretty_response(40003) except SQLAlchemyError as e: current_app.logger.error(e) return pretty_response(50001)
python
""" Discovering structure in heatmap data ===================================== _thumb: .4, .2 """ import pandas as pd import seaborn as sns sns.set(font="monospace") # Load the brain networks example dataset df = sns.load_dataset("brain_networks", header=[0, 1, 2], index_col=0) # Select a subset of the networks used_networks = [1, 5, 6, 7, 8, 11, 12, 13, 16, 17] used_columns = (df.columns.get_level_values("network") .astype(int) .isin(used_networks)) df = df.loc[:, used_columns] # Create a custom palette to identify the networks network_pal = sns.cubehelix_palette(len(used_networks), light=.9, dark=.1, reverse=True, start=1, rot=-2) network_lut = dict(zip(map(str, used_networks), network_pal)) # Convert the palette to vectors that will be drawn on the side of the matrix networks = df.columns.get_level_values("network") network_colors = pd.Series(networks, index=df.columns).map(network_lut) # Create a custom colormap for the heatmap values cmap = sns.diverging_palette(h_neg=210, h_pos=350, s=90, l=30, as_cmap=True) # Draw the full plot sns.clustermap(df.corr(), row_colors=network_colors, linewidths=.5, col_colors=network_colors, figsize=(13, 13), cmap=cmap)
python
from django.db import models class Position(models.Model): w = models.CharField(max_length=128, null=True, blank=True) x = models.CharField(max_length=128, null=True, blank=True) y = models.CharField(max_length=128, null=True, blank=True) z = models.CharField(max_length=128, null=True, blank=True) time_received = models.DateField()
python
# -*- coding: UTF-8 -*- # Copyright 2016-2018 Rumma & Ko Ltd # License: BSD, see LICENSE for more details. """ A library of `invoke <http://docs.pyinvoke.org/en/latest/index.html>`__ tasks. See :doc:`/invlib`. .. autosummary:: :toctree: tasks utils """ from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals import os import six from importlib import import_module from invoke import Collection from unipath import Path import atelier def setup_from_tasks( globals_dict, main_package=None, settings_module_name=None, **kwargs): """ This is the function you must call from your :xfile:`tasks.py` file in order to activate the tasks defined by atelier. """ if '__file__' not in globals_dict: raise Exception( "No '__file__' in %r. " "First parameter to must be `globals()`" % globals_dict) tasks_file = Path(globals_dict['__file__']) if not tasks_file.exists(): raise Exception("No such file: %s" % tasks_file) # print("20180428 setup_from_tasks() : {}".format(root_dir)) from atelier.invlib import tasks from atelier.projects import get_project_from_tasks prj = get_project_from_tasks(tasks_file.parent) atelier.current_project = prj if kwargs: prj.config.update(kwargs) if settings_module_name is not None: os.environ['DJANGO_SETTINGS_MODULE'] = settings_module_name from django.conf import settings prj.config.update( languages=[lng.name for lng in settings.SITE.languages]) if isinstance(main_package, six.string_types): main_package = import_module(main_package) if main_package: prj.set_main_package(main_package) self = Collection.from_module(tasks) prj.set_namespace(self) return self
python
from flask import request, jsonify, current_app, make_response, session import random from info.libs.yuntongxun import sms from . import passport_blue from info.utils.response_code import RET from info.utils.captcha.captcha import captcha from info import redis_store,constants,db # 导入模型类 from info.models import User import re from datetime import datetime """ json.loads:把json字符串转成字典 json.dumps: 把字典转成json字符串 json.load/json.dump(操作的是文件对象) var data={ "mobile":mobile, "image_code":imagecode, ... } a='123'; json的概念:本质字符串,基于键值对的字符串;轻量级的数据交互格式; json的作用:实现跨语言,跨平台的数据交互; xml 格式: 作用是用来传输数据;都是闭合标签 XML: xmltodic模块,xmltodict.parse()/unparse() 微信, html用来展示数据; <xml> <mobile>12223234</mobile> <image_code>12223234</image_code> </xml> JSON { "mobile":mobile, "image_code":imagecode, } JSON.Stringify(data) 前端把对象转成json字符串; """ @passport_blue.route('/image_code') def generate_image_code(): """ 1.获取前端生成的uuid,/image_code?image_code_id=uuid request.args.get('image_code_id') 2. 判断参数是否存在,如果不存在uuid,直接return 3.调用工具生成图片验证码, 4.存储redis图片验证码的text文本,构造redis数据实例,用来存储业务相关的数据比如 :图片验证码 5. 返回图片给浏览器, 状态码: return jsonify(errno=666,errmsg='uuid未获取到') 1. 自定义的状态码: 用来实现前后端的数据交互. $.ajax({ url:'/image_code, type:'get' data:data, contentType:'application/json' success:function(resp){ if (resp == 666){ alert(成功) }else{ alert(失败) } } }) :return: """ # 获取参数 image_code_id=request.args.get('image_code_id') # 校验参数是否存在,如果UUID不存在,返回错误信息 if not image_code_id: return jsonify(errno=RET.PARAMERR,errmsg='参数缺失') # 调用工具captcha生成图片验证码 name,text,image=captcha.generate_captcha() # 保存图片验证码的文本 try: redis_store.setex('ImageCode_'+image_code_id,constants.IMAGE_CODE_REDIS_EXPIRES,text) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR,errmsg='保存图片验证码失败') else: response=make_response(image) # 默认的响应报文Content-Type:text/html,应该修改默认的响应报文 response.headers['Content-Type']='image/jpg' return response @passport_blue.route('/sms_code',methods=['POST']) def send_sms_code(): """ 发送短信验证码 获取参数---校验参数---业务处理(查询数据)---返回结果 1、获取post请求的三个参数;前端使用ajax传入的参数,前端如何传入json? mobile/image_code/image_code_id request.json.get() 2、检查参数的完整性 3、检查手机号的格式是否符合要求,使用正则 4、比较图片验证码,从redis数据库中获取真实的图片验证码 get() 5、判断图片验证码是否过期 6、需要先删除Redis中真实存在的图片验证码,因为图片验证码只能获取一次,比较一次. 7、比较图片验证码,如果图片验证码正确 **检查手机号是否注册过??? 8、生成短信的随机数,六位数的随机数 random 9、保存短信随机数到Redis数据库中, 10、调用云通讯接口,发送短信,保存发送结果 11、返回发送结果 :return: """ mobile=request.json.get('mobile') image_code=request.json.get('image_code') image_code_id=request.json.get('image_code_id') # 检查参数的完整性 if not all([mobile,image_code,image_code_id]): return jsonify(errno=RET.PARAMERR,errmsg='参数不完整') # 检查手机号的格式,13012345678 if not re.match(r'1[3456789]\d{9}$',mobile): return jsonify(errno=RET.PARAMERR,errmsg='手机号格式错误') # 尝试从redis数据库中获取真实的图片验证码 try: real_image_code=redis_store.get('ImageCode_'+image_code_id) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR,errmsg='获取图片验证码数据失败') # 判断图片验证码是否过期 if not real_image_code: return jsonify(errno=RET.NODATA,errmsg='图片验证码已过期') # 删除Redis数据库中的图片验证码 try: redis_store.delete('ImageCode_'+image_code_id) except Exception as e: current_app.logger.error(e) # 比较图片验证码是否一致,忽略大小写 if real_image_code.lower() != image_code.lower(): return jsonify(errno=RET.DATAERR,errmsg='图片验证码错误') # 确认用户是否注册过? try: # User.query.filter_by(mobile=mobile).first() user=User.query.filter(User.mobile==mobile).first() except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR,errmsg='查询用户数据失败') else: # 判断查询结果是否存在 if user is not None: return jsonify(errno=RET.DATAEXIST,errmsg='用户已存在') #生成6位数短信随机数,使用随机数模块 sms_code='%06d' % random.randint (0, 999999) print(sms_code) try: redis_store.setex('SMSCode_'+mobile,constants.SMS_CODE_REDIS_EXPIRES,sms_code) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR,errmsg='保存短信数据失败') # 调用云通讯扩展,发送短信 try: ccp=sms.CCP() result=ccp.send_template_sms(mobile,[sms_code,constants.SMS_CODE_REDIS_EXPIRES/60],1) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.THIRDERR,errmsg='发送短信异常') # 判断发送是否成功 if result==0: return jsonify(errno=RET.OK,errmsg='发送成功') else: return jsonify(errno=RET.THIRDERR,errmsg='发送失败') @passport_blue.route('/register',methods=['POST']) def register(): """ 用户注册 1、获取参数,mobile,sms_code,password 2、检查参数的完整性 3、检查手机号的格式 4、检查短信验证码,尝试从Redis数据库中获取真实的短信验证码 5、判断获取结果是否过期 6、先比较短信验证码是否一致 7、删除Redis数据库中的短信验证码 8、构造模型类对象 user=User() user.password=password 9、提交数据到数据库中,mysql 10、把用户基本信息缓存到Redis数据库中 session['user_id']=user.id session['mobile']=mobile session['nick_name']=mobile 11、返回结果 :return: """ mobile=request.json.get('mobile') sms_code=request.json.get('sms_code') password=request.json.get('password') # 检查参数完整性 if not all([mobile,sms_code,password]): return jsonify(errno=RET.PARAMERR,errmsg='参数缺失') # 检查手机号格式 if not re.match(r'1[3456789]\d{9}$',mobile): return jsonify(errno=RET.PARAMERR,errmsg='手机号格式错误') # 尝试从Redis中获取真实的短信验证码 try: real_sms_code=redis_store.get('SMSCode_'+mobile) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR,errmsg='查询短信验证码失败') # 判断查询结果 if not real_sms_code: return jsonify(errno=RET.NODATA,errmsg='短信验证码已过期') # 比较短信验证码是否正确 if real_sms_code !=str(sms_code): return jsonify(errno=RET.DATAERR,errmsg='短信验证码不一致') # 删除redis数据库中存储的短信验证码 try: redis_store.delete('SMSCode_'+mobile) except Exception as e: current_app.logger.error(e) # 构造模型类对象 user=User() user.mobile=mobile user.nick_name=mobile # 调用了模型类中的generate_password_hash实现了密码 加密储存,sha256 user.password=password # 提交用户注册信息数据到mysql数据库中 try: db.session.add(user) db.session.commit() except Exception as e: current_app.logger.error(e) # 存储数据如果发生异常,需要进行回滚 db.session.rollback() return jsonify(errno=RET.DBERR,errmsg='保存用户数据失败') # 返回用户信息到Redis数据库中 session['user_id']=user.id session['mobile']=mobile session['nick_name']=mobile # 返回结果 return jsonify(errno=RET.OK,errmsg='注册成功') @passport_blue.route("/login",methods=['POST']) def login(): """ 用户登录 1、获取参数:mobile,password 2、检查参数完整性 3、检查手机号的格式 4、根据手机号查询数据库,确认用户user存在 5、调用模型类检查密码是否正确的方法 6、记录用户的登录时间 user.last_login=datetime.now() 7、提交数据库,如果发生异常需要回滚 8、缓存用户信息session,昵称要换成user.nick_name 8、返回结果 :return: """ # 获取参数 mobile=request.json.get('mobile') password=request.json.get('password') # 检查参数的完整性 if not all([mobile,password]): return jsonify(errno=RET.PARAMERR,errmsg='参数缺失') # 检查手机号格式 if not re.match(r'1[3456789]\d{9}$',mobile): return jsonify(errno=RET.PARAMERR,errmsg='手机号格式错误') # 根据手机号查询数据库,确认用户已注册. try: user=User.query.filter_by(mobile=mobile).first() except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR,errmsg='查询用户数据失败') # 判断用户是否注册,以及密码是否正确. if user is None or not user.check_password(password): return jsonify(errno=RET.DATAERR,errmsg='用户名或密码错误') # 记录用户的登录时间 user.last_login=datetime.now() # 提交数据到数据库中 try: db.session.add(user) db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR,errmsg='保存数据失败') # 缓存用户信息到redis数据库中 session['user_id']=user.id session['mobile']=mobile # 缓存的用户昵称和注册时要有区别,因为登录可以登录多次,昵称有可能会修改 session['nick_name']=user.nick_name # 返回结果 return jsonify(errno=RET.OK,errmsg='ok') @passport_blue.route("/logout") def logout(): """ 如果是前后端分离,以及符合RESTful风格,(表现层状态转换),退出的请求方法为delete get/post/put/delete 获取/新建/修改/删除 退出登录 1、本质是清除服务器缓存的用户信息 :return: """ session.pop('user_id',None) session.pop('mobile',None) session.pop('nick_name',None) return jsonify(errno=RET.OK,errmsg='OK') pass
python
from mpf.tests.MpfGameTestCase import MpfGameTestCase from mpf.core.rgb_color import RGBColor class TestBlinkenlight(MpfGameTestCase): def get_config_file(self): return 'config.yaml' def get_platform(self): return 'smart_virtual' def get_machine_path(self): return 'tests/machine_files/blinkenlight/' def test_add_color_to_one_blinkenlight(self): self.post_event('start_mode1') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors') self.post_event('add_color_to_first_blinkenlight') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors') def test_add_color_to_two_blinkenlights(self): self.post_event('start_mode1') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors') self.post_event('add_color_to_all_blinkenlights') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight2.num_colors') def test_remove_color_from_one_blinkenlight(self): self.post_event('start_mode1') self.post_event('add_color_to_second_blinkenlight') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight2.num_colors') self.post_event('remove_color_from_first_blinkenlight') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight2.num_colors') self.post_event('remove_color_from_second_blinkenlight') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors') def test_remove_all_colors_from_all_blinkenlights(self): self.post_event('start_mode1') self.post_event('start_mode2') self.post_event('add_color_to_first_blinkenlight') self.post_event('add_color_to_second_blinkenlight') self.post_event('add_color_to_third_blinkenlight') self.post_event('add_color_to_all_blinkenlights') self.post_event('mode2_add_color_to_first_blinkenlight') self.assertPlaceholderEvaluates(3, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(2, 'device.blinkenlights.my_blinkenlight2.num_colors') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight3.num_colors') self.post_event('remove_all_colors_from_all_blinkenlights') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight3.num_colors') def test_remove_mode_colors_from_one_blinkenlight(self): self.post_event('start_mode1') self.post_event('start_mode2') self.post_event('add_color_to_first_blinkenlight') self.post_event('mode2_add_color_to_first_blinkenlight') self.post_event('mode2_add_color2_to_first_blinkenlight') self.assertPlaceholderEvaluates(3, 'device.blinkenlights.my_blinkenlight1.num_colors') self.post_event('mode2_remove_mode_colors_from_first_blinkenlight') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors') def test_remove_mode_colors_when_mode_ends(self): self.post_event('start_mode1') self.post_event('start_mode2') self.post_event('add_color_to_first_blinkenlight') self.post_event('add_color_to_second_blinkenlight') self.post_event('mode2_add_color_to_first_blinkenlight') self.post_event('mode2_add_color2_to_first_blinkenlight') self.post_event('mode2_add_color_to_second_blinkenlight') self.assertPlaceholderEvaluates(3, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(2, 'device.blinkenlights.my_blinkenlight2.num_colors') self.post_event('stop_mode2') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight2.num_colors') def test_flashing_cycle(self): self.post_event('start_mode1') self.post_event('add_color_to_all_blinkenlights') self.post_event('add_color_to_first_blinkenlight') self.post_event('add_color_to_second_blinkenlight') self.post_event('add_color_to_third_blinkenlight') self.assertPlaceholderEvaluates(2, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertPlaceholderEvaluates(2, 'device.blinkenlights.my_blinkenlight2.num_colors') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight3.num_colors') blinkenlight1 = self.machine.blinkenlights['my_blinkenlight1'] blinkenlight2 = self.machine.blinkenlights['my_blinkenlight2'] blinkenlight3 = self.machine.blinkenlights['my_blinkenlight3'] blue = RGBColor('blue') green = RGBColor('green') red = RGBColor('red') yellow = RGBColor('yellow') purple = RGBColor('purple') cyan = RGBColor('cyan') off = RGBColor('off') self.assertEqual(blue, blinkenlight1.light._color) self.assertEqual(green, blinkenlight2.light._color) self.assertEqual(purple, blinkenlight3.light._color) self.advance_time_and_run(1) self.assertEqual(red, blinkenlight1.light._color) self.assertEqual(green, blinkenlight2.light._color) self.assertEqual(off, blinkenlight3.light._color) self.advance_time_and_run(1) self.assertEqual(off, blinkenlight1.light._color) self.assertEqual(yellow, blinkenlight2.light._color) self.assertEqual(purple, blinkenlight3.light._color) self.advance_time_and_run(1) self.assertEqual(blue, blinkenlight1.light._color) self.assertEqual(yellow, blinkenlight2.light._color) self.assertEqual(off, blinkenlight3.light._color) self.advance_time_and_run(1) self.assertEqual(red, blinkenlight1.light._color) self.assertEqual(green, blinkenlight2.light._color) self.assertEqual(purple, blinkenlight3.light._color) self.advance_time_and_run(1) self.assertEqual(off, blinkenlight1.light._color) self.assertEqual(green, blinkenlight2.light._color) self.assertEqual(off, blinkenlight3.light._color) self.advance_time_and_run(1) self.assertEqual(blue, blinkenlight1.light._color) self.assertEqual(yellow, blinkenlight2.light._color) self.assertEqual(purple, blinkenlight3.light._color) self.post_event("remove_color_from_third_blinkenlight") self.advance_time_and_run(1) self.assertEqual(red, blinkenlight1.light._color) self.assertEqual(yellow, blinkenlight2.light._color) self.assertEqual(off, blinkenlight3.light._color) self.advance_time_and_run(1) self.assertEqual(off, blinkenlight1.light._color) self.assertEqual(green, blinkenlight2.light._color) self.assertEqual(off, blinkenlight3.light._color) self.advance_time_and_run(1) self.assertEqual(blue, blinkenlight1.light._color) self.assertEqual(green, blinkenlight2.light._color) self.assertEqual(off, blinkenlight3.light._color) def test_priority_order(self): self.post_event('start_mode1') self.post_event('start_mode2') blinkenlight1 = self.machine.blinkenlights['my_blinkenlight1'] red = RGBColor('red') orange = RGBColor('orange') off = RGBColor('off') self.post_event('add_color_to_first_blinkenlight') self.post_event('mode2_add_color_to_first_blinkenlight') self.assertEqual(orange, blinkenlight1.light._color) self.advance_time_and_run(1) self.assertEqual(red, blinkenlight1.light._color) self.advance_time_and_run(1) self.assertEqual(off, blinkenlight1.light._color) self.post_event('remove_all_colors_from_all_blinkenlights') self.advance_time_and_run(1) self.post_event('mode2_add_color_to_first_blinkenlight') self.post_event('add_color_to_first_blinkenlight') self.assertEqual(orange, blinkenlight1.light._color) self.advance_time_and_run(1) self.assertEqual(red, blinkenlight1.light._color) self.advance_time_and_run(1) self.assertEqual(off, blinkenlight1.light._color) def test_replace_existing_color(self): self.post_event('start_mode1') blinkenlight1 = self.machine.blinkenlights['my_blinkenlight1'] blue = RGBColor('blue') darkred = RGBColor('darkred') off = RGBColor('off') self.post_event('add_color_to_all_blinkenlights') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertEqual(blue, blinkenlight1.light._color) self.advance_time_and_run(1.5) self.assertEqual(off, blinkenlight1.light._color) self.advance_time_and_run(1.5) self.assertEqual(blue, blinkenlight1.light._color) self.advance_time_and_run(1.5) self.assertEqual(off, blinkenlight1.light._color) self.advance_time_and_run(1.5) self.post_event('add_color_to_first_blinkenlight_with_duplicate_key') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors') self.assertEqual(darkred, blinkenlight1.light._color) self.advance_time_and_run(1.5) self.assertEqual(off, blinkenlight1.light._color) self.advance_time_and_run(1.5) self.assertEqual(darkred, blinkenlight1.light._color) self.advance_time_and_run(1.5) self.assertEqual(off, blinkenlight1.light._color) def test_show_with_tokens(self): self.post_event('start_mode2') blinkenlight = self.machine.blinkenlights['my_blinkenlight2'] gray = RGBColor('gray') off = RGBColor('off') self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors') self.post_event('play_blinkenlight_token_show') self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight2.num_colors') self.assertEqual(gray, blinkenlight.light._color) self.advance_time_and_run(2) self.assertEqual(off, blinkenlight.light._color) self.advance_time_and_run(2) self.assertEqual(gray, blinkenlight.light._color) self.advance_time_and_run(2) self.assertEqual(off, blinkenlight.light._color)
python
import mysql.connector mydb = mysql.connector.connect( host = 'localhost', user = "root", #passwd = "ant904", database = "spl" #auth_plugin='mysql_native_password' ) myCursor = mydb.cursor() qusTimeList=list() qusTimeList.append("0:00:03") qusTimeList.append("0:00:02") qusTimeList.append("0:00:05") qusTimeList.append("0:00:06") qusTimeList.append("0:00:08") qusTimeList.append("0:00:02") qusTimeList.append("0:00:03") qusTimeList.append("0:00:03") qusTimeList.append("0:00:04") qusTimeList.append("0:00:05") qusTimeList.append("0:00:08") gameTimeList=list() gameTimeList.append("0:00:13") gameTimeList.append("0:00:19") gameTimeList.append("0:00:24") gameTimeList.append("0:00:08") gameTimeList.append("0:00:09") gameTimeList.append("0:00:13") gameTimeList.append("0:00:08") gameTimeList.append("0:00:09") gameTimeList.append("0:00:13") gameTimeList.append("0:00:14") gameTimeList.append("0:00:12") #myCursor.execute("CREATE database test222") sql="INSERT into controlGroup(questionTime , gameTime) VALUES (%s, %s)" val=(qusTimeList[9],gameTimeList[9]) myCursor.execute(sql,val) mydb.commit() myCursor.close() mydb.close()
python
from toee import * def OnBeginSpellCast( spell ): print "Vampiric Touch OnBeginSpellCast" print "spell.target_list=", spell.target_list print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level game.particles( "sp-necromancy-conjure", spell.caster ) def OnSpellEffect( spell ): print "Vampiric Touch OnSpellEffect" dice = dice_new("1d6") dice.number = min(10, (spell.caster_level) / 2) spell.duration = 600 target = spell.target_list[0] if not (target.obj == spell.caster): attack_successful = spell.caster.perform_touch_attack( target.obj , 1) if attack_successful & D20CAF_HIT: old_hp = target.obj.stat_level_get( stat_hp_current ) target.obj.spell_damage_weaponlike( spell.caster, D20DT_NEGATIVE_ENERGY, dice, D20DAP_UNSPECIFIED, 100, D20A_CAST_SPELL, spell.id, attack_successful, 0 ) new_hp = target.obj.stat_level_get( stat_hp_current ) damage = old_hp - new_hp if damage > (old_hp + 10): damage = old_hp + 10 #spell.caster.condition_add_with_args( 'Temporary_Hit_Points', spell.id, spell.duration, damage ) spell.caster.condition_add_with_args( 'sp-Vampiric Touch', spell.id, spell.duration, damage ) spell.caster.float_mesfile_line( 'mes\\spell.mes', 20005, 0 ) else: #target.obj.float_mesfile_line( 'mes\\spell.mes', 30021 ) game.particles( 'Fizzle', target.obj ) spell.target_list.remove_target( target.obj ) game.particles( 'sp-Vampiric Touch', spell.caster ) def OnBeginRound( spell ): print "Vampiric Touch OnBeginRound" def OnEndSpellCast( spell ): print "Vampiric Touch OnEndSpellCast"
python
from django.contrib import admin from .models import District, Quarter, Community admin.site.register(District) admin.site.register(Quarter) admin.site.register(Community)
python
# Copyright (C) 2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions # and limitations under the License. # pylint: disable=W1203,C0411,C0413,no-value-for-parameter import argparse import json import logging import os import subprocess import sys import tempfile from mmcv.utils import Config import yaml from eval import main as evaluate sys.path.append(f'{os.path.abspath(os.path.dirname(__file__))}/../../') from tools.misc import train, get_work_dir def parse_args(): """ Parses input args. """ parser = argparse.ArgumentParser() parser.add_argument('config', help='A path to model training configuration file (.py).') parser.add_argument('gpu_num', type=int, help='A number of GPUs to use in training.') parser.add_argument('out', help='A path to output file where models metrics will be saved (.yml).') parser.add_argument('--update_config', help='Update configuration file by parameters specified here.' 'Use quotes if you are going to change several params.', default='') parser.add_argument('--show-dir', '--show_dir', dest='show_dir', help='A directory where images with drawn detected objects will be saved.') return parser.parse_args() def is_clustering_needed(cfg): if cfg.total_epochs > 0: return False if not hasattr(cfg.model, 'bbox_head') or not cfg.model.bbox_head.type == 'SSDHead': return False if not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered': return False return True def cluster(cfg, config_path, update_config): mmdetection_tools = f'{os.path.dirname(__file__)}/../../../../external/mmdetection/tools' logging.info('Clustering started...') widths = cfg.model.bbox_head.anchor_generator.widths n_clust = 0 for w in widths: n_clust += len(w) if isinstance(w, (list, tuple)) else 1 n_clust = ' --n_clust ' + str(n_clust) group_as = '' if isinstance(widths[0], (list, tuple)): group_as = ' --group_as ' + ' '.join([str(len(w)) for w in widths]) config = ' --config ' + config_path tmp_file = tempfile.NamedTemporaryFile(delete=False) out = f' --out {tmp_file.name}' if 'pipeline' in cfg.data.train: img_shape = [t for t in cfg.data.train.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] else: img_shape = [t for t in cfg.data.train.dataset.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] img_shape = f' --image_size_wh {img_shape[0]} {img_shape[1]}' subprocess.run(f'python {mmdetection_tools}/cluster_boxes.py' f'{config}' f'{n_clust}' f'{group_as}' f'{update_config}' f'{img_shape}' f'{out}'.split(' '), check=True) with open(tmp_file.name) as src_file: content = json.load(src_file) widths, heights = content['widths'], content['heights'] if not update_config: update_config = ' --update_config' update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(" ", "")}' update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(" ", "")}' logging.info('... clustering completed.') return update_config def main(): """ Main function. """ logging.basicConfig(level=logging.INFO) args = parse_args() logging.info(f'Commandline:\n{" ".join(sys.argv)}') cfg = Config.fromfile(args.config) update_config = f' --update_config {args.update_config}' if args.update_config else '' if is_clustering_needed(cfg): update_config = cluster(cfg, args.config, update_config) logging.info('Training started ...') training_info = train(args.config, args.gpu_num, update_config) logging.info('... training completed.') work_dir = get_work_dir(cfg, args.update_config) logging.info('Evaluation started ...') evaluate(os.path.join(work_dir, "config.py"), os.path.join(work_dir, "latest.pth"), args.out, '', args.show_dir) logging.info('... evaluation completed.') with open(args.out, 'a+') as dst_file: yaml.dump(training_info, dst_file) if __name__ == '__main__': main()
python
import os import codecs from io import StringIO from pytest import fixture from rave import filesystem class DummyProvider: def __init__(self, files): self.files = files; def list(self): return self.files def has(self, filename): return filename in self.list() def open(self, filename, *args, **kwargs): if not self.has(filename): raise filesystem.FileNotFound(filename) if not self.isfile(filename): raise filesystem.NotAFile(filename) return DummyFile(self, filename) def isfile(self, filename): return self.has(filename) and '.' in filename def isdir(self, filename): return self.has(filename) and not self.isfile(filename) class FaultyProvider(DummyProvider): def __init__(self, files, faulty_files, err=filesystem.FileNotFound): super().__init__(files) self.faulty_files = faulty_files self.error_class = err def open(self, filename, *args, **kwargs): if filename in self.faulty_files: raise self.error_class(filename) return super().open(filename, *args, **kwargs) class DummyFile(filesystem.File): def __init__(self, parent, filename, content='merry saltmas'): self.parent = parent self.filename = filename self._buffer = StringIO(content) self._closed = False def close(self): if self._closed: raise filesystem.FileClosed(self.filename) self._closed = True def opened(self): return not self._closed def readable(self): return True def writable(self): return True def seekable(self): return True def read(self, amount=None): if self.closed: raise filesystem.FileClosed(self.filename) return self._buffer.read(amount) def write(self, buffer): if self.closed: raise filesystem.FileClosed(self.filename) return self._buffer.write(buffer) def seek(self, offset, mode=os.SEEK_CUR): return self._buffer.seek(offset, mode) def tell(self): return self._buffer.tell() class DummyTransformer: CONSUME = False RELATIVE = False def __init__(self, filename, handle): self.filename = filename self.handle = handle self.files = [ self.filename + '.rot13' ] def list(self): return self.files def has(self, filename): return filename in self.list() def open(self, filename, *args, **kwargs): if not self.has(filename): raise filesystem.FileNotFound(filename) return ROT13File(self, filename, self.handle) def isfile(self, filename): return self.has(filename) def isdir(self, filename): return False def relative(self): return self.RELATIVE def consumes(self): return self.CONSUME def valid(self): return True class FaultyTransformer: def __init__(self, filename, handle): raise FileNotFound(filename) class InvalidTransformer(DummyTransformer): def valid(self): return False class ROT13File(filesystem.File): def __init__(self, parent, filename, handle): self.parent = parent self.filename = filename self.handle = handle def close(self): return self.handle.close() def opened(self): return self.handle.opened() def readable(self): return self.handle.readable() def writable(self): return self.handle.writable() def seekable(self): return self.handle.seekable() def read(self, amount=None): return codecs.encode(self.handle.read(amount), 'rot13') def write(self, buffer): return self.handle.write(codecs.encode(buffer, 'rot13')) def seek(self, offset, mode=os.SEEK_CUR): return self.handle.seek(offset, mode) def tell(self): return self.handle.tell() @fixture def fs(): return filesystem.FileSystem() @fixture def dummyfs(): fs = filesystem.FileSystem() fs.mount('/x', DummyProvider({ '/a.txt', '/b.png' })) return fs @fixture def nestedfs(): fs = filesystem.FileSystem() fs.mount('/x', DummyProvider({ '/y', '/y/c.txt', '/y/p.png', '/y/z' })) return fs @fixture def parentlessfs(): fs = filesystem.FileSystem() fs.mount('/x', DummyProvider({ '/z/k.txt' })) return fs @fixture def doublefs(): fs = filesystem.FileSystem() fs.mount('/x', DummyProvider({ '/a.txt', '/b.png' })) fs.mount('/y', DummyProvider({ '/c.exe', '/d.jpg' })) return fs @fixture def mergedfs(): fs = filesystem.FileSystem() fs.mount('/x', DummyProvider({ '/a.txt', '/b.png' })) fs.mount('/x', DummyProvider({ '/c.exe', '/d.jpg' })) return fs @fixture def transfs(): fs = dummyfs() fs.transform('\.txt$', DummyTransformer) return fs
python
"""Forward measurements from Xiaomi Mi plant sensor via MQTT. See https://github.com/ChristianKuehnel/plantgateway for more details. """ ############################################## # # This is open source software licensed under the Apache License 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # ############################################## from enum import Enum import os import logging import json import time from datetime import datetime from typing import List, Optional import yaml import paho.mqtt.client as mqtt from miflora.miflora_poller import MiFloraPoller, MI_BATTERY, MI_LIGHT, MI_CONDUCTIVITY, MI_MOISTURE, MI_TEMPERATURE, MI_FWVERSION from btlewrap.bluepy import BluepyBackend from plantgw import __version__ class MQTTAttributes(Enum): """Attributes sent in the json dict.""" BATTERY = 'battery' TEMPERATURE = 'temperature' BRIGHTNESS = 'brightness' MOISTURE = 'moisture' CONDUCTIVITY = 'conductivity' TIMESTAMP = 'timestamp' FIRMWARE = 'firmware' # unit of measurement for the different attributes UNIT_OF_MEASUREMENT = { MQTTAttributes.BATTERY: '%', MQTTAttributes.TEMPERATURE: '°C', MQTTAttributes.BRIGHTNESS: 'lux', MQTTAttributes.MOISTURE: '%', MQTTAttributes.CONDUCTIVITY: 'µS/cm', MQTTAttributes.TIMESTAMP: 's', MQTTAttributes.FIRMWARE: '', } # home assistant device classes for the different attributes DEVICE_CLASS = { MQTTAttributes.BATTERY: 'battery', MQTTAttributes.TEMPERATURE: 'temperature', MQTTAttributes.BRIGHTNESS: 'illuminance', MQTTAttributes.MOISTURE: None, MQTTAttributes.CONDUCTIVITY: None, MQTTAttributes.TIMESTAMP: 'timestamp', MQTTAttributes.FIRMWARE: None, } # pylint: disable-msg=too-many-instance-attributes class Configuration: """Stores the program configuration.""" def __init__(self, config_file_path): with open(config_file_path, 'r') as config_file: config = yaml.load(config_file, Loader=yaml.FullLoader) self._configure_logging(config) self.interface = 0 if 'interface' in config: self.interface = config['interface'] self.mqtt_port = 8883 # type: int self.mqtt_user = None # type: Optional[str] self.mqtt_password = None # type: Optional[str] self.mqtt_ca_cert = None # type: Optional[str] self.mqtt_client_id = None # type: Optional[str] self.mqtt_trailing_slash = True # type:bool self.mqtt_timestamp_format = None # type: Optional[str] self.mqtt_discovery_prefix = None # type: Optional[str] self.sensors = [] # type: List[SensorConfig] if 'port' in config['mqtt']: self.mqtt_port = config['mqtt']['port'] if 'user' in config['mqtt']: self.mqtt_user = config['mqtt']['user'] if 'password' in config['mqtt']: self.mqtt_password = config['mqtt']['password'] if 'ca_cert' in config['mqtt']: self.mqtt_ca_cert = config['mqtt']['ca_cert'] if 'client_id' in config['mqtt']: self.mqtt_client_id = config['mqtt']['client_id'] if 'trailing_slash' in config['mqtt'] and not config['mqtt']['trailing_slash']: self.mqtt_trailing_slash = False if 'timestamp_format' in config['mqtt']: self.mqtt_timestamp_format = config['mqtt']['timestamp_format'] self.mqtt_server = config['mqtt']['server'] self.mqtt_prefix = config['mqtt']['prefix'] for sensor_config in config['sensors']: fail_silent = 'fail_silent' in sensor_config self.sensors.append(SensorConfig(sensor_config['mac'], sensor_config.get('alias', None), fail_silent, sensor_config.get('cache_timeout', 600), sensor_config.get('cache_retries', 3))) if 'discovery_prefix' in config['mqtt']: self.mqtt_discovery_prefix = config['mqtt']['discovery_prefix'] @staticmethod def _configure_logging(config): timeform = '%a, %d %b %Y %H:%M:%S' logform = '%(asctime)s %(levelname)-8s %(message)s' loglevel = logging.INFO if 'debug' in config: loglevel = logging.DEBUG if 'logfile' in config: logfile = os.path.abspath(os.path.expanduser(config['logfile'])) logging.basicConfig(filename=logfile, level=loglevel, datefmt=timeform, format=logform) else: logging.basicConfig(level=loglevel, datefmt=timeform, format=logform) class SensorConfig: """Stores the configuration of a sensor.""" def __init__(self, mac: str, alias: str = None, fail_silent: bool = False, cache_timeout: int = 600, cache_retries: int = 3): if mac is None: msg = 'mac of sensor must not be None' logging.error(msg) raise Exception('mac of sensor must not be None') self.mac = mac self.alias = alias self.fail_silent = fail_silent self.cache_timeout = cache_timeout self.cache_retries = cache_retries def get_topic(self) -> str: """Get the topic name for the sensor.""" if self.alias is not None: return self.alias return '0x' + self.short_mac def __str__(self) -> str: if self.alias: result = self.alias else: result = self.mac if self.fail_silent: result += ' (fail silent)' return result @property def short_mac(self): """Get the sensor mac without ':' in it.""" return self.mac.replace(':', '') @staticmethod def get_name_string(sensor_list) -> str: """Convert a list of sensor objects to a nice string.""" return ', '.join([str(sensor) for sensor in sensor_list]) class PlantGateway: """Main class of the module.""" def __init__(self, config_file_path: str = '~/.plantgw.yaml'): config_file_path = os.path.abspath(os.path.expanduser(config_file_path)) self.config = Configuration(config_file_path) # type: Configuration logging.info('PlantGateway version %s', __version__) logging.info('loaded config file from %s', config_file_path) self.mqtt_client = None self.connected = False # type: bool def start_client(self): """Start the mqtt client.""" if not self.connected: self._start_client() def stop_client(self): """Stop the mqtt client.""" if self.connected: self.mqtt_client.disconnect() self.connected = False self.mqtt_client.loop_stop() logging.info('Disconnected MQTT connection') def _start_client(self): self.mqtt_client = mqtt.Client(self.config.mqtt_client_id) if self.config.mqtt_user is not None: self.mqtt_client.username_pw_set(self.config.mqtt_user, self.config.mqtt_password) if self.config.mqtt_ca_cert is not None: self.mqtt_client.tls_set(self.config.mqtt_ca_cert, cert_reqs=mqtt.ssl.CERT_REQUIRED) def _on_connect(client, _, flags, return_code): self.connected = True logging.info("MQTT connection returned result: %s", mqtt.connack_string(return_code)) self.mqtt_client.on_connect = _on_connect self.mqtt_client.connect(self.config.mqtt_server, self.config.mqtt_port, 60) self.mqtt_client.loop_start() def _publish(self, sensor_config: SensorConfig, poller: MiFloraPoller): self.start_client() state_topic = self._get_state_topic(sensor_config) data = { MQTTAttributes.BATTERY.value: poller.parameter_value(MI_BATTERY), MQTTAttributes.TEMPERATURE.value: '{0:.1f}'.format(poller.parameter_value(MI_TEMPERATURE)), MQTTAttributes.BRIGHTNESS.value: poller.parameter_value(MI_LIGHT), MQTTAttributes.MOISTURE.value: poller.parameter_value(MI_MOISTURE), MQTTAttributes.CONDUCTIVITY.value: poller.parameter_value(MI_CONDUCTIVITY), MQTTAttributes.FIRMWARE.value: poller.parameter_value(MI_FWVERSION), MQTTAttributes.TIMESTAMP.value: datetime.now().isoformat(), } for key, value in data.items(): logging.debug("%s: %s", key, value) if self.config.mqtt_timestamp_format is not None: data['timestamp'] = datetime.now().strftime(self.config.mqtt_timestamp_format) json_payload = json.dumps(data) self.mqtt_client.publish(state_topic, json_payload, qos=1, retain=True) logging.info('sent data to topic %s', state_topic) logging.info('payload: %s', data) def _get_state_topic(self, sensor_config: SensorConfig) -> str: prefix_fmt = '{}/{}' if self.config.mqtt_trailing_slash: prefix_fmt += '/' prefix = prefix_fmt.format(self.config.mqtt_prefix, sensor_config.get_topic()) return prefix def process_mac(self, sensor_config: SensorConfig): """Get data from one Sensor.""" logging.info('Getting data from sensor %s', sensor_config.get_topic()) poller = MiFloraPoller(sensor_config.mac, BluepyBackend, sensor_config.cache_timeout, sensor_config.cache_retries) self.announce_sensor(sensor_config) self._publish(sensor_config, poller) def process_all(self): """Get data from all sensors.""" next_list = self.config.sensors timeout = 1 # initial timeout in seconds max_retry = 6 # number of retries retry_count = 0 while retry_count < max_retry and next_list: # if this is not the first try: wait some time before trying again if retry_count > 0: logging.info('try %d of %d: could not process sensor(s) %s. Waiting %d sec for next try', retry_count, max_retry, SensorConfig.get_name_string(next_list), timeout) time.sleep(timeout) timeout *= 2 # exponential backoff-time current_list = next_list retry_count += 1 next_list = [] for sensor in current_list: try: self.process_mac(sensor) # pylint: disable=bare-except, broad-except except Exception as exception: next_list.append(sensor) # if it failed, we'll try again in the next round msg = "could not read data from {} ({}) with reason: {}".format( sensor.mac, sensor.alias, str(exception)) if sensor.fail_silent: logging.error(msg) logging.warning('fail_silent is set for sensor %s, so not raising an exception.', sensor.alias) else: logging.exception(msg) # print(msg) # return sensors that could not be processed after max_retry return next_list def announce_sensor(self, sensor_config: SensorConfig): """Announce the sensor via Home Assistant MQTT Discovery. see https://www.home-assistant.io/docs/mqtt/discovery/ """ if self.config.mqtt_discovery_prefix is None: return self.start_client() self_name = 'plantgateway' device_name = '0x{}'.format(sensor_config.short_mac) for attribute in MQTTAttributes: unique_id = '{}_{}_{}'.format(self_name, device_name, attribute.value) topic = '{}/sensor/{}_{}/{}/config'.format(self.config.mqtt_discovery_prefix, self_name, device_name, attribute.value) payload = { 'state_topic': self._get_state_topic(sensor_config), 'json_attributes_topic': self._get_state_topic(sensor_config), 'unit_of_measurement': UNIT_OF_MEASUREMENT[attribute], 'value_template': '{{value_json.'+attribute.value+'}}', 'unique_id': unique_id, 'device': { 'identifiers': [ '{}_{}'.format(self_name, device_name), ], 'name': device_name, 'sw_version': 'plantgw dev', 'model': "MiFlora compatible plant humidity, brightness, conductivity, temperature sensor", 'manufacturer': 'to be deternmined', } } if sensor_config.alias is not None: payload['name'] = '{}_{}'.format(sensor_config.alias, attribute.value) else: payload['name'] = '{}_{}'.format(device_name, attribute.value) if DEVICE_CLASS[attribute] is not None: payload['device_class'] = DEVICE_CLASS[attribute] json_payload = json.dumps(payload) self.mqtt_client.publish(topic, json_payload, qos=1, retain=False) logging.info('sent sensor config to topic %s', topic) logging.info('payload: %s', payload)
python
# # This file contains the Python code from Program 6.2 of # "Data Structures and Algorithms # with Object-Oriented Design Patterns in Python" # by Bruno R. Preiss. # # Copyright (c) 2003 by Bruno R. Preiss, P.Eng. All rights reserved. # # http://www.brpreiss.com/books/opus7/programs/pgm06_02.txt # class StackAsArray(Stack): def __init__(self, size = 0): super(StackAsArray, self).__init__() self._array = Array(size) def purge(self): while self._count > 0: self._array[self._count] = None self._count -= 1 #...
python
print('----->DESAFIO 48<-----') print('Vou te mostrar a soma de todos os números impares múltiplos de 3 que estão no intervalo de 1 a 500!') soma = 0 for c in range(0, 501): if c > 0 and c % 2 != 0 and c % 3 == 0: soma += c print(soma)
python
################################################################# # Name: randDLA.py # # Authors: Michael Battaglia # # Function: Program simulates diffusion limited aggregation # # using Monte Carlo Methods. # ################################################################# #essential modules import numpy as np import matplotlib.pyplot as plt #function: 2D diffusion limited random walk def randDLA(lims, sink=False, source=False, periodic=True, N=False): """ sink: position vector for aggregation sink if False, then boundary is sink source: position vector for particle source if False, then particles randomly appear on Free spaces lims: vector of dimension lengths if False, then periodic boundaries N: number of participating particles if False, then spawn particles until source or boundary is taken """ if sink is False: if periodic: #there will be no aggregate print("No aggregate can form") return float("NaN") if N is False: if source is False: if not periodic: #aggregate will never end print("No end condition for aggregate") return float("NaN") #initialize list of occupied positions occupied_pos = [] anchored=np.zeros(lims,dtype=int) #generate particles generate = True while generate: if source: #specified source pos = pos_0 else: #random source particle pos = np.array([np.random.randint(0,lim) for lim in lims],dtype=int) if not anchored[pos[0]][pos[1]]: #take each step if position is not in a stuck position while not isStuck(pos, lims, sink, periodic, anchored): #take a random step in a random direction with a random orientation step = np.zeros(len(pos)) step[np.random.randint(0,len(pos))] = 1-2*np.random.randint(0,2) pos = pos + step #impose periodic boundary pos = np.mod(pos,lims).astype(int) if len(occupied_pos)==0: print("Position:", pos) occupied_pos.append(pos) anchored[pos[0],pos[1]] = 1 print("Anchored:",len(occupied_pos)) print("Anchor pos:",pos) if N: #generate until N particles if len(occupied_pos) == N: #generated N particles generate = False else: #generate until if source: #source covered triggers end if all(pos == source): #occupied source generate = False if periodic: #boundary covered triggers end if any(pos==lims-1) or any(pos==0): #occupied boundary generate = False #return list of occupied positions return anchored, np.array(occupied_pos) #function: check if particle is stuck (to edge, or other particle) def isStuck(pos, lims, sink, periodic, anchored): xp = pos[0] yp = pos[1] if not periodic: #not periodic, gets stuck on wall if any(pos==lims-1) or any(pos==0): #if the particle has reached a wall return True if all(pos==sink): #if particle hits sink return True if anchored[xp-1:xp+2,yp-1:yp+2].any(): #if particle is adjacent to an anchored particle return True else: #particle is free return False #function: animated plot of 2D random walk def D2plot(pos, animate=0.01): if animate: for i in range(len(pos)): plt.cla() plt.title('diffusion limited aggregation') plt.scatter(pos.T[0][:i+1],pos.T[1][:i+1]) plt.xlabel('x position') plt.ylabel('y position') plt.draw() plt.pause(animate) else: plt.title('diffusion limited aggregation') plt.scatter(pos.T[0],pos.T[1]) plt.xlabel('x position') plt.ylabel('y position') plt.show() #function: evaluate fractal dimension def fracDim(image): print(image.shape) cen = (np.array([image.shape[0],image.shape[1]])-1)/2 r = range(1, min(cen)+1) m = np.zeros(len(r)) for i in range(len(r)): subimage = image[cen[0]-r[i]:cen[0]+r[i]+1,cen[1]-r[i]:cen[1]+r[i]+1] m[i] = subimage.sum() plt.title("fractal dimension") plt.plot(r,m) plt.ylabel('mass') plt.xlabel('radius') plt.yscale('log') plt.xscale('log') plt.show() #function: main if __name__ == '__main__': #size of box L = np.array([201,201]) #central position central = (L-1)/2 #take a random walk until aggregation reaches edge image, pos = randDLA(L, central) #plot path D2plot(pos, animate=True) #plot fractal dimension fracDim(image)
python
#import import random import os import numpy dic = {} with open("points3D.txt","r") as n: for line in n: a = line.split(" ") temp = [] temp.append(float(a[1])) temp.append(float(a[2])) temp.append(float(a[3])) dic[a[0]] = temp[:] print(dic["1"]) #end
python
def getone(coll, key, default=None): try: value = coll[key] except [IndexError, KeyError, TypeError]: return default; else: return value;
python
#!/usr/bin/env python2.7 # Copyright 2019 The Fuchsia Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import print_function import argparse import itertools import json import os import sys SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPTS_DIR = os.path.dirname(SCRIPT_DIR) FUCHSIA_ROOT = os.path.dirname(SCRIPTS_DIR) # The maximum number of size percentage points a binary is allowed to drop. # A greater amount will raise a flag. MAX_SIZE_DECREASE = 10 # The maximum number of size percentage points a binary is allowed to gain. # A greater amount will raise a flag. MAX_SIZE_INCREASE = 1 class Type(object): AUX = 'aux' IMAGE = 'image' TESTS = 'tests' @classmethod def all(cls): return [cls.AUX, cls.IMAGE, cls.TESTS] class Origin(object): LEGACY = 'legacy' MIGRATED = 'migrated' @classmethod def all(cls): return [cls.LEGACY, cls.MIGRATED] class Manifest(object): '''Lists the contents of a manifest file''' def __init__(self, origin, type, contents): self.origin = origin self.type = type self.contents = contents def __repr__(self): return 'M[%s-%s]' % (self.origin, self.type) class CustomJSONEncoder(json.JSONEncoder): '''A JSON encoder that handles sets and sorts lists.''' def default(self, object): if isinstance(object, FileDataSet) or isinstance(object, FileData): return object.to_json() return json.JSONEncoder.default(self, object) class FileData(object): '''Represents a file referred to in a manifest.''' def __init__(self, path, size=None): self.path = path self.size = size if size else os.path.getsize(path) def __eq__(self, other): return self.path == other.path def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return len(hash(self.path)) def __repr__(self): return 'F[' + self.path + ']' def to_json(self): return { 'path': self.path, 'size': self.size, } @classmethod def from_json(cls, input): return FileData(input['path'], input['size']) class FileDataSet(object): '''Represents a set of files.''' def __init__(self): # map { name --> FileData } self.files = {} def add(self, name, file): if name == 'lib/libdriver.so': # libdriver is a complicated hydra whose many heads we don't need to # worry about here. return if name in self.files and file != self.files[name]: print('Error: different file under path ' + name + ':') print(' - ' + str(file)) print(' - ' + str(self.files[name])) return self.files[name] = file def filenames(self): return set(self.files.keys()) def get_file(self, name): return self.files[name] def __len__(self): return len(self.files) def to_json(self): return self.files @classmethod def from_json(cls, input): result = FileDataSet() for name, data in input.iteritems(): result.add(name, FileData.from_json(data)) return result class Summary(object): '''Data for a particular state of the build.''' def __init__(self): # map { type --> FileDataSet } self.objects = {} def add_objects(self, type, objects): dataset = self.objects.setdefault(type, FileDataSet()) for name, path in objects.iteritems(): dataset.add(name, FileData(path)) def get_objects(self, type): return self.objects[type] def __repr__(self): items = ['%s=%s' % (t, len(o)) for (t, o) in self.objects.iteritems()] return 'S[' + ', '.join(items) + ']' def to_json(self, output): json.dump(self.objects, output, cls=CustomJSONEncoder, indent=2, sort_keys=True, separators=(',', ': ')) @classmethod def from_json(cls, input): result = Summary() data = json.load(input) for type in Type.all(): result.objects[type] = FileDataSet.from_json(data[type]) return result def generate_summary(manifests, base_dir): '''Generates a summary based on the manifests found in the build.''' result = Summary() for type in Type.all(): for manifest in filter(lambda m: m.type == type, manifests): contents = manifest.contents.copy() contents = dict([(n, os.path.join(base_dir, p)) for (n, p) in contents.iteritems()]) result.add_objects(type, contents) return result def report(manifest, is_error, message): type = 'Error' if is_error else 'Warning' print('%s%s%s' % (type.ljust(10), manifest.ljust(8), message)) def compare_summaries(reference, current): '''Compares summaries for two states of the build.''' match = True for type in Type.all(): reference_objects = reference.get_objects(type) current_objects = current.get_objects(type) reference_names = reference_objects.filenames() current_names = current_objects.filenames() # Missing and new files. if reference_names != current_names: match = False removed = reference_names - current_names if removed: for element in removed: report(type, True, 'element removed: ' + element) added = current_names - reference_names if added: for element in added: report(type, True, 'element removed: ' + element) # Size changes. for name in reference_names & current_names: reference_size = reference_objects.get_file(name).size current_size = current_objects.get_file(name).size if current_size == reference_size: continue is_diff_positive = current_size > reference_size diff_percentage = 100 * (current_size - reference_size) / reference_size is_error = False if (diff_percentage < -MAX_SIZE_DECREASE or diff_percentage > MAX_SIZE_INCREASE): match = False is_error = True report(type, is_error, 'size change for ' + name + ': ' + ('+' if is_diff_positive else '-') + str(abs(diff_percentage)) + '%') return match def main(): parser = argparse.ArgumentParser( description='Performs verifications after moving an element from ' 'ZN to GN.') parser.add_argument('--build-dir', help='path to the GN build dir', default=os.path.join(FUCHSIA_ROOT, 'out', 'default')) parser.add_argument('--summary', help='path to the summary file to generate') parser.add_argument('--reference', help='path to the summary file to compare against') args = parser.parse_args() if not args.summary and not args.reference: print('At least one of --summary or --reference needs to be set.') parser.print_help() return 1 # Load up manifests from the current build. manifests = [] for origin in Origin.all(): for type in Type.all(): path = os.path.join(args.build_dir, 'obj', 'build', 'unification', 'images', '%s-%s.unification.manifest' % (origin, type)) with open(path, 'r') as manifest_file: contents = dict(map(lambda line: line.strip().split('=', 1), manifest_file.readlines())) manifests.append(Manifest(origin, type, contents)) # Generate a summary for the current build. summary = generate_summary(manifests, args.build_dir) # If applicable, save the current build's summary. if args.summary: dirname = os.path.dirname(args.summary) if not os.path.exists(dirname): os.makedirs(dirname) with open(args.summary, 'w') as output_file: summary.to_json(output_file) # If applicable, compare the current summary to a previously-saved one. if args.reference: with open(args.reference, 'r') as input_file: reference = Summary.from_json(input_file) if not compare_summaries(reference, summary): print('Error: summaries do not match!') return 1 return 0 if __name__ == '__main__': sys.exit(main())
python
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Create renderer stuff # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create pipeline # cow = vtk.vtkBYUReader() cow.SetGeometryFileName("" + str(VTK_DATA_ROOT) + "/Data/Viewpoint/cow.g") cowMapper = vtk.vtkPolyDataMapper() cowMapper.SetInputConnection(cow.GetOutputPort()) cowActor = vtk.vtkActor() cowActor.SetMapper(cowMapper) cowActor.GetProperty().SetDiffuseColor(0.9608,0.8706,0.7020) cowAxesSource = vtk.vtkAxes() cowAxesSource.SetScaleFactor(10) cowAxesSource.SetOrigin(0,0,0) cowAxesMapper = vtk.vtkPolyDataMapper() cowAxesMapper.SetInputConnection(cowAxesSource.GetOutputPort()) cowAxes = vtk.vtkActor() cowAxes.SetMapper(cowAxesMapper) ren1.AddActor(cowAxes) cowAxes.VisibilityOff() # Add the actors to the renderer, set the background and size # ren1.AddActor(cowActor) ren1.SetBackground(0.1,0.2,0.4) renWin.SetSize(320,240) ren1.ResetCamera() ren1.GetActiveCamera().Azimuth(0) ren1.GetActiveCamera().Dolly(1.4) ren1.ResetCameraClippingRange() cowAxes.VisibilityOn() renWin.Render() # render the image # # prevent the tk window from showing up then start the event loop # def RotateX (__vtk__temp0=0,__vtk__temp1=0): cowActor.SetOrientation(0,0,0) ren1.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() i = 1 while i <= 6: cowActor.RotateX(60) renWin.Render() renWin.Render() i = i + 1 renWin.EraseOn() def RotateY (__vtk__temp0=0,__vtk__temp1=0): cowActor.SetOrientation(0,0,0) ren1.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() i = 1 while i <= 6: cowActor.RotateY(60) renWin.Render() renWin.Render() i = i + 1 renWin.EraseOn() def RotateZ (__vtk__temp0=0,__vtk__temp1=0): cowActor.SetOrientation(0,0,0) ren1.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() i = 1 while i <= 6: cowActor.RotateZ(60) renWin.Render() renWin.Render() i = i + 1 renWin.EraseOn() def RotateXY (__vtk__temp0=0,__vtk__temp1=0): cowActor.SetOrientation(0,0,0) cowActor.RotateX(60) ren1.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() i = 1 while i <= 6: cowActor.RotateY(60) renWin.Render() renWin.Render() i = i + 1 renWin.EraseOn() RotateX() RotateY() RotateZ() RotateXY() renWin.EraseOff() # --- end of script --
python
from collections import defaultdict with open('day10/input.txt', 'r') as file: data = sorted([int(x.strip()) for x in file.readlines()]) data = [0] + data data.append(data[-1] + 3) jolt_1, jolt_3 = 0, 0 for i in range(len(data)): current = data[i - 1] if (data[i] - current) == 1: jolt_1 += 1 elif (data[i] - current) == 3: jolt_3 += 1 jumps = [1, 2, 3] routes = defaultdict(int) # default value is 0 routes[0] = 1 for i in data[1:]: routes[i] = sum([routes[i - j] for j in jumps]) print(f"Result 1: {jolt_1 * jolt_3}\nResult 2: {routes[data[-1]]}")
python
from django.contrib.auth.hashers import make_password from rest_framework import serializers from .models import User from rest_framework_simplejwt.serializers import TokenObtainPairSerializer from rest_framework import response, status class RegisterSerializer(serializers.ModelSerializer): class Meta: model = User fields = ('username', 'email', 'password') def validate_password(self, value: str) -> str: return make_password(value) class MyTokenObtainPairSerializer(TokenObtainPairSerializer): @classmethod def get_token(cls, user): token = super(MyTokenObtainPairSerializer, cls).get_token(user) return token class UsersSerializers(serializers.ModelSerializer): class Meta: model = User fields = ('id', 'username', 'email', 'phone_number') class UserUpdateSerializer(serializers.ModelSerializer): class Meta: model = User fields = ('username', 'email', 'phone_number')
python
import tensorflow as tf # GPU版Tensor Flowを、特定のGPUで実行する GPU_INDEX = 2 tf.config.set_soft_device_placement(True) tf.debugging.set_log_device_placement(True) gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") print(gpus) print(logical_gpus) except RuntimeError as e: print(e) try: with tf.device('/device:GPU:{}'.format(GPU_INDEX)): # GPUの番号を指定する # MNIST mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) model.fit(x_train, y_train, epochs=5) model.evaluate(x_test, y_test) except RuntimeError as e: print(e)
python
import pytest from package_one.module_one import IntegerAdder @pytest.fixture def adder(): print("Test set-up!") yield IntegerAdder() print("Test tear-down") def test_integer_adder(adder): assert adder.add(1, 2) == 3 """ In case you'd like to declare a fixture that executes only once per module, then declare a fixture like this: @pytest.fixture(scope="module") """ @pytest.mark.parametrize( "operand_one, operand_two, expected_result", [ (1, 2, 3), (10, 20, 30), (-5, -10, -15) ] ) def test_integer_adder_complex( adder, operand_one, operand_two, expected_result ): assert adder.add(operand_one, operand_two) == expected_result
python
def snail(array): results = [] while len(array) > 0: results += array[0] del array[0] if len(array) > 0: for i in array: results += [i[-1]] del i[-1] if array[-1]: results += array[-1][::-1] del array[-1] for i in reversed(array): results += [i[0]] del i[0] return results
python
import os from google.appengine.ext.webapp import template from base_controller import CacheableHandler from models.event import Event class EventWizardHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "event_wizard" def __init__(self, *args, **kw): super(EventWizardHandler, self).__init__(*args, **kw) self.cache_expiration = 60 * 60 def _render(self, *args, **kw): path = os.path.join(os.path.dirname(__file__), "../templates/eventwizard.html") selected_event_key = self.request.get('event', '') if selected_event_key and Event.validate_key_name(selected_event_key): selected_event = Event.get_by_id(selected_event_key) if selected_event: self.template_values['selected_event'] = selected_event return template.render(path, self.template_values) class ReactEventWizardHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "event_wizard_react" def __init__(self, *args, **kw): super(ReactEventWizardHandler, self).__init__(*args, **kw) self.cache_expiration = 60 * 60 def _render(self, *args, **kw): path = os.path.join(os.path.dirname(__file__), "../templates/react-eventwizard.html") return template.render(path, self.template_values)
python
""" MIT License Copyright (c) 2021 martinpflaum Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #%% import torchvision import math import torch import torch.nn as nn import numpy as np import lightly import pandas as pd from data.data_utils import get_train_val_test_split,open_image,\ post_load_default,post_load_scene_depth,post_load_normal,\ post_load_lightning,load_edge_tensor,load_pickle,save_pickle from perceptual_loss import PerceptualLoss from torch.utils.data import Dataset class GaussianNoise: """ this is from lightly https://docs.lightly.ai Applies random Gaussian noise to a tensor. The intensity of the noise is dependent on the mean of the pixel values. See https://arxiv.org/pdf/2101.04909.pdf for more information. """ def __call__(self, sample: torch.Tensor) -> torch.Tensor: mu = sample.mean() snr = np.random.randint(low=4, high=8) sigma = mu / snr noise = torch.normal(torch.zeros(sample.shape), sigma) return sample + noise def depth_calc_std_mean(img_data_set_root): train,val,test = get_train_val_test_split("./data_splits/train_test_split.csv") out = [] for name in train: scene_depth = post_load_scene_depth(open_image(name,"scene_depth",img_data_set_root)).reshape(-1) out += [scene_depth] out = torch.cat(out).reshape(-1) return torch.std_mean(out, unbiased=False) def get_all(file_name): df = pd.read_csv(file_name) df = df[["train_val_test"]] split = np.array(df) return split class BrainDatasetSceneDepth(Dataset): def __init__(self,img_data_set_root,indicies) : super().__init__() self.img_data_set_root = img_data_set_root self.indicies = indicies self.size = len(indicies) def __len__(self): #print("get_len") return self.size def __getitem__(self, index): name = self.indicies[index] return post_load_scene_depth(open_image(name,"scene_depth",self.img_data_set_root)) img_data_set_root="D:/ImageDatasetBig" indicies = get_all("./data_splits/train_test_split.csv") indicies #%% dset = BrainDatasetSceneDepth(img_data_set_root,indicies) #%% num_workers = 0 batch_size = 128 seed = 1 epochs = 50 input_size = 64 # dimension of the embeddings num_ftrs = 512 # dimension of the output of the prediction and projection heads out_dim = proj_hidden_dim = 512 # the prediction head uses a bottleneck architecture pred_hidden_dim = 128 # use 2 layers in the projection head num_mlp_layers = 2 mean,std = torch.tensor(0),torch.tensor(1) mean,std = depth_calc_std_mean(img_data_set_root) mean,std = mean.item(),std.item() mean,std = (mean,mean,mean),(std,std,std) transform = torchvision.transforms.Compose([ torchvision.transforms.Grayscale(num_output_channels=3), torchvision.transforms.RandomResizedCrop(size=(64,64), scale=(0.2, 1.0)), torchvision.transforms.RandomHorizontalFlip(p=0.5), torchvision.transforms.RandomVerticalFlip(p=0.5), torchvision.transforms.GaussianBlur(21), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean,std), GaussianNoise(), ]) collate_fn = lightly.data.BaseCollateFunction(transform) torch.manual_seed(0) np.random.seed(0) # set the path to the dataset path_to_data = 'C:/Users/Martin/Downloads/test' dataset_train_simsiam = lightly.data.LightlyDataset( input_dir=path_to_data ) dataloader_train_simsiam = torch.utils.data.DataLoader( dataset_train_simsiam, batch_size=batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True, num_workers=num_workers ) resnet = torchvision.models.resnet18() backbone = nn.Sequential(*list(resnet.children())[:-1]) # create the SimSiam model using the backbone from above model = lightly.models.SimSiam( backbone, num_ftrs=num_ftrs, proj_hidden_dim=pred_hidden_dim, pred_hidden_dim=pred_hidden_dim, out_dim=out_dim, num_mlp_layers=num_mlp_layers ) # SimSiam uses a symmetric negative cosine similarity loss criterion = lightly.loss.SymNegCosineSimilarityLoss() # scale the learning rate lr = 0.05 * batch_size / 256 # use SGD with momentum and weight decay optimizer = torch.optim.SGD( model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4 ) device = 'cuda' if torch.cuda.is_available() else 'cpu' model.to(device) avg_loss = 0. avg_output_std = 0. for e in range(epochs): for (x0, x1), _, _ in dataloader_train_simsiam: # move images to the gpu x0 = x0.to(device) x1 = x1.to(device) # run the model on both transforms of the images # the output of the simsiam model is a y containing the predictions # and projections for each input x y0, y1 = model(x0, x1) # backpropagation loss = criterion(y0, y1) loss.backward() optimizer.step() optimizer.zero_grad() # calculate the per-dimension standard deviation of the outputs # we can use this later to check whether the embeddings are collapsing output, _ = y0 output = output.detach() output = torch.nn.functional.normalize(output, dim=1) output_std = torch.std(output, 0) output_std = output_std.mean() # use moving averages to track the loss and standard deviation w = 0.9 avg_loss = w * avg_loss + (1 - w) * loss.item() avg_output_std = w * avg_output_std + (1 - w) * output_std.item() # the level of collapse is large if the standard deviation of the l2 # normalized output is much smaller than 1 / sqrt(dim) collapse_level = max(0., 1 - math.sqrt(out_dim) * avg_output_std) # print intermediate results print(f'[Epoch {e:3d}] ' f'Loss = {avg_loss:.2f} | ' f'Collapse Level: {collapse_level:.2f} / 1.00') model = PerceptualLoss(model.backbone.cpu(),mean,std).cpu() save_pickle(model,"perceptual_loss.pth") # %%
python
__author__ = "Jeremy Nelson" import csv import datetime import json import urllib2 from json_ld.utilities.creator import JSONLinkedDataCreator class JohnPeabodyHarringtonJSONLinkedDataCreator(JSONLinkedDataCreator): CC_URI = 'http://id.loc.gov/authorities/names/n84168445' LOC_URI = 'http://id.loc.gov/authorities/names/no2008011986' def __init__(self, creator_id=None, csv_filename=None): """Initializes instance of John Peabody Harrington JSON-LD creator Parameters: creator_id -- LOC ID of creator, defaults to Colorado College csv_filename -- Filename of CSV file, defaults to None """ if creator_id is None: creator_id = self.CC_URI super(JohnPeabodyHarringtonJSONLinkedDataCreator, self).__init__( **{'creator_id': creator_id}) self.title_prefix = 'John P. Harrington Papers 1907-1959 (some earlier)' jph_csv_reader = csv.DictReader(open(csv_filename, 'rb')) for row in jph_csv_reader: self.records.append(row) def __generate_topics__(self, lcsh_subjects, work_dict): """Internal function generates a list of topics from a list of LCSH uri Parameters: lcsh_subjects -- list of http://id.loc.gov subject uri work_dict -- Dictionary of properties for the Creative Work """ if len(lcsh_subjects) > 0: work_dict['bf:subject'] = [] for subject_uri in lcsh_subjects: uri = subject_uri.replace('"','').strip() if not self.topics.has_key(uri): loc_uri = json.load( urllib2.urlopen('{0}.json'.format(uri))) loc_key = u"<{0}>".format(uri) self.topics[uri] = { '@type': 'bf:Topic', 'prov:Generation': self.__generate_provenance__(), 'bf:label': loc_uri[loc_key].get( u'<http://www.w3.org/2004/02/skos/core#prefLabel>', [{'value':uri},])[0].get('value'), 'bf:identifier': uri, 'bf:hasAuthority': self.LOC_URI} lcc_classification = loc_uri[loc_key].get( u'<http://www.loc.gov/mads/rdf/v1#classification>', None) if lcc_classification is not None: class_value = lcc_classification[0].get('value') if not work_dict.has_key('bf:class-lcc'): work_dict['bf:class-lcc'] = [class_value, ] else: work_dict['bf:class-lcc'].append(class_value) work_dict['bf:subject'].append(self.topics[uri]) return work_dict def generate(self): "Linked Data Cataloging for John Peabody Harrington Collection" for row in self.records: work_dict = self.__generate_work__( creative_work_class='bf:Manuscript') instance_dict = self.__generate_instance__('online resource') instance_dict['bf:publication'] = { 'providerName': 'National Anthropological Archives', 'identifier': 'http://id.loc.gov/authorities/names/n50065490'} if len(row.get('Part')) > 0: title_prefix = '{0} {1}'.format(self.title_prefix, row.get('Part')) else: title_prefix = self.title_prefix title_str = '{0} Microfilm {1}, Reel {2}'.format( title_prefix, row.get('Microfilm #'), row.get('Reel #')) title_parts = row.get('Title').replace('"','').split(",") if len(title_parts) > 1: sub_titles = [] for sub in title_parts: sub = sub.strip() sub_titles.append(sub) title_str = '{0} "{1}'.format(title_str, '", "'.join(sub_titles)) title_str += '"' elif len(title_parts) == 1: title_str = "{0} {1}".format(title_str, title_parts[0]) work_dict['bf:title'] = {'@type': 'bf:TitleEntity', 'bf:titleValue': title_str, 'bf:label': title_str} instance_dict['schema:contentUrl'] = '/pdf/{0}'.format( row.get('Filename')) work_dict['bf:hasInstance'] = [instance_dict,] work_dict['rda:dateOfPublicationManifestation'] = row.get('Publication Date') subjects = row.get('LCSH').split(",") work_dict = self.__generate_topics__(subjects, work_dict) self.works.append(work_dict)
python
from .algo.algo_endpoints import AlgoEndpoints from .graph.graph_endpoints import GraphEndpoints from .query_runner.query_runner import QueryRunner class IndirectEndpoints(AlgoEndpoints, GraphEndpoints): def __init__(self, query_runner: QueryRunner, namespace: str): super().__init__(query_runner, namespace)
python
#-*- coding:utf-8 -*- import generate_chat import seq2seq_model import tensorflow as tf import numpy as np import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "-1" if __name__ == '__main__': _, _, source_vocab_size = generate_chat.get_vocabs(generate_chat.vocab_encode_file) _, _, target_vocab_size = generate_chat.get_vocabs(generate_chat.vocab_decode_file) train_set = generate_chat.read_data(generate_chat.train_encode_vec_file, generate_chat.train_decode_vec_file) test_set = generate_chat.read_data(generate_chat.test_encode_vec_file, generate_chat.test_decode_vec_file) train_bucket_sizes = [len(train_set[i]) for i in range(len(generate_chat._buckets))] train_total_size = float(sum(train_bucket_sizes)) train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size for i in range(len(train_bucket_sizes))] with tf.Session() as sess: model = seq2seq_model.Seq2SeqModel(source_vocab_size, target_vocab_size, generate_chat._buckets, generate_chat.units_num, generate_chat.num_layers, generate_chat.max_gradient_norm, generate_chat.batch_size, generate_chat.learning_rate, generate_chat.learning_rate_decay_factor, use_lstm=True) ckpt = tf.train.get_checkpoint_state('.') if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): print("Reading model parameters from %s" % ckpt.model_checkpoint_path) model.saver.restore(sess, ckpt.model_checkpoint_path) else: print("Created model with fresh parameters.") sess.run(tf.global_variables_initializer()) loss = 0.0 step = 0 previous_losses = [] run = True while run: random_number_01 = np.random.random_sample() bucket_id = min([i for i in range(len(train_buckets_scale)) if train_buckets_scale[i] > random_number_01]) encoder_inputs, decoder_inputs, target_weights = model.get_batch(train_set, bucket_id) _, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, False) print("step:%d,loss:%f" % (step, step_loss)) loss += step_loss / 2000 step += 1 if step % 1000 == 0: print("step:%d,per_loss:%f" % (step, loss)) if len(previous_losses) > 2 and loss > max(previous_losses[-3:]): sess.run(model.learning_rate_decay_op) previous_losses.append(loss) model.saver.save(sess, "./../../datas/model/share/rebot/chatbot.ckpt", global_step=model.global_step) loss = 0.0 if step % 5000 == 0: for bucket_id in range(len(generate_chat._buckets)): if len(test_set[bucket_id]) == 0: continue encoder_inputs, decoder_inputs, target_weights = model.get_batch(test_set, bucket_id) _, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, True) print("bucket_id:%d,eval_loss:%f" % (bucket_id, eval_loss))
python
import tkinter as tk from src.ui.core import SortableTable from src.library.model import PlaylistModel class Table(SortableTable): def __init__(self, parent, logger, library): SortableTable.__init__(self, parent, logger) self.library = library self.add_column('Playlist Name', sortable=True) self.init_treeview() self.on_playback_event = None self.context_view_switcher = None def get_unsorted_item_list(self): return self.library.session.query(PlaylistModel).all() def create_column_values_for(self, item): return (item.name,) def compare_items(self, a, b): from src.utility import compare_strings multiplier = -1 if self.sort_in_reverse else 1 return compare_strings(a.name, b.name) * multiplier def set_on_playback_event(self, on_playback_event): self.on_playback_event = on_playback_event def dispatch_playback_event(self, event): if self.on_playback_event is not None: self.on_playback_event(event) def display_context_menu(self, event): context_menu = tk.Menu(master=self.frame, tearoff=0) context_menu.add_command(label='Set as Queue', command=self.play_playlist) context_menu.add_command(label='Set as Queue (Shuffled)', command=self.play_playlist_shuffled) context_menu.add_command(label='More Info', command=lambda: self.view_playlist_info(event.x_root, event.y_root)) context_menu.add_command(label='Delete', command=self.delete_playlist) context_menu.post(event.x_root, event.y_root) def play_playlist(self): from src.backend.event import PlayPlaylist self.dispatch_playback_event(PlayPlaylist(self.get_selected_item(), shuffled=False)) def play_playlist_shuffled(self): from src.backend.event import PlayPlaylist self.dispatch_playback_event(PlayPlaylist(self.get_selected_item(), shuffled=True)) def view_playlist_info(self, x, y): from ...info.playlist_info import PlaylistInfo if self.context_view_switcher is not None: info = PlaylistInfo(self.context_view_switcher, self.logger, self.library) info.set_item(self.get_selected_item()) self.context_view_switcher.open_page(info) def delete_playlist(self): item = self.get_selected_item() self.library.session.delete(item) self.library.session.commit() self.refresh()
python
from slicegan import preprocessing, util import torch import torch.nn as nn import torch.backends.cudnn as cudnn import torch.optim as optim import time import matplotlib import wandb # 1. Start a new run wandb.init(project='SuperRes', name='SliceGAN train', entity='tldr-group') def train(pth, imtype, datatype, real_data, Disc, Gen, nc, l, nz, sf): """ train the generator :param pth: path to save all files, imgs and data :param imtype: image type e.g nphase, colour or gray :param datatype: training data format e.g. tif, jpg ect :param real_data: path to training data :param Disc: :param Gen: :param nc: channels :param l: image size :param nz: latent vector size :param sf: scale factor for training data :return: """ if len(real_data) == 1: real_data *= 3 isotropic = True else: isotropic = False print('Loading Dataset...') dataset_xyz = preprocessing.batch(real_data, datatype, l, sf) ## Constants for NNs matplotlib.use('Agg') ngpu = 1 num_epochs = 30 # batch sizes batch_size = 32 D_batch_size = 8 # optimiser params for G and D lrg = 0.0001 lrd = 0.0001 beta1 = 0 beta2 = 0.9 Lambda = 10 critic_iters = 5 cudnn.benchmark = True workers = 0 lz = 4 ##Dataloaders for each orientation device = torch.device("cuda:0" if(torch.cuda.is_available() and ngpu > 0) else "cpu") print(device, " will be used.\n") # D trained using different data for x, y and z directions dataloaderx = torch.utils.data.DataLoader(dataset_xyz[0], batch_size=batch_size, shuffle=True, num_workers=workers) dataloadery = torch.utils.data.DataLoader(dataset_xyz[1], batch_size=batch_size, shuffle=True, num_workers=workers) dataloaderz = torch.utils.data.DataLoader(dataset_xyz[2], batch_size=batch_size, shuffle=True, num_workers=workers) # Create the Genetator network netG = Gen().to(device) if ('cuda' in str(device)) and (ngpu > 1): netG = nn.DataParallel(netG, list(range(ngpu))) optG = optim.Adam(netG.parameters(), lr=lrg, betas=(beta1, beta2)) # Define 1 Discriminator and optimizer for each plane in each dimension netDs = [] optDs = [] for i in range(3): netD = Disc() netD = (nn.DataParallel(netD, list(range(ngpu)))).to(device) netDs.append(netD) optDs.append(optim.Adam(netDs[i].parameters(), lr=lrd, betas=(beta1, beta2))) disc_real_log = [] disc_fake_log = [] gp_log = [] Wass_log = [] print("Starting Training Loop...") # For each epoch start = time.time() for epoch in range(num_epochs): # sample data for each direction for i, (datax, datay, dataz) in enumerate(zip(dataloaderx, dataloadery, dataloaderz), 1): dataset = [datax, datay, dataz] ### Initialise ### Discriminator ## Generate fake image batch with G noise = torch.randn(D_batch_size, nz, lz,lz,lz, device=device) fake_data = netG(noise).detach() # for each dim (d1, d2 and d3 are used as permutations to make 3D volume into a batch of 2D images) for dim, (netD, optimizer, data, d1, d2, d3) in enumerate( zip(netDs, optDs, dataset, [2, 3, 4], [3, 2, 2], [4, 4, 3])): if isotropic: netD = netDs[0] optimizer = optDs[0] netD.zero_grad() ##train on real images real_data = data[0].to(device) out_real = netD(real_data).view(-1).mean() ## train on fake images # perform permutation + reshape to turn volume into batch of 2D images to pass to D fake_data_perm = fake_data.permute(0, d1, 1, d2, d3).reshape(l * D_batch_size, nc, l, l) out_fake = netD(fake_data_perm).mean() gradient_penalty = util.calc_gradient_penalty(netD, real_data, fake_data_perm[:batch_size], batch_size, l, device, Lambda, nc) disc_cost = out_fake - out_real + gradient_penalty disc_cost.backward() optimizer.step() #logs for plotting wandb.log({'out real': out_real.item()}) wandb.log({'out fake': out_fake.item()}) wandb.log({'wass': out_real.item() - out_fake.item()}) ### Generator Training if i % int(critic_iters) == 0: netG.zero_grad() errG = 0 noise = torch.randn(batch_size, nz, lz,lz,lz, device=device) fake = netG(noise) for dim, (netD, d1, d2, d3) in enumerate( zip(netDs, [2, 3, 4], [3, 2, 2], [4, 4, 3])): if isotropic: #only need one D netD = netDs[0] # permute and reshape to feed to disc fake_data_perm = fake.permute(0, d1, 1, d2, d3).reshape(l * batch_size, nc, l, l) output = netD(fake_data_perm) errG -= output.mean() # Calculate gradients for G errG.backward() optG.step() # Output training stats & show imgs if i % 25 == 0: netG.eval() with torch.no_grad(): torch.save(netG.state_dict(), pth + '_Gen.pt') wandb.save(pth + '_Gen.pt') torch.save(netD.state_dict(), pth + '_Disc.pt') noise = torch.randn(1, nz,lz,lz,lz, device=device) img = netG(noise) ###Print progress ## calc ETA steps = len(dataloaderx) util.calc_eta(steps, time.time(), start, i, epoch, num_epochs) ###save example slices util.test_plotter(img, 5, imtype, pth) # plotting graphs # util.graph_plot([disc_real_log, disc_fake_log], ['real', 'perp'], pth, 'LossGraph') # util.graph_plot([Wass_log], ['Wass Distance'], pth, 'WassGraph') # util.graph_plot([gp_log], ['Gradient Penalty'], pth, 'GpGraph') netG.train()
python
from typing import List from ..error import GraphQLError from ..language import DocumentNode from ..type import GraphQLSchema __all__ = ["find_deprecated_usages"] def find_deprecated_usages( schema: GraphQLSchema, ast: DocumentNode ) -> List[GraphQLError]: # pragma: no cover """Get a list of GraphQLError instances describing each deprecated use. .. deprecated:: 3.1.3 Please use ``validate`` with ``NoDeprecatedCustomRule`` instead:: from graphql import validate, NoDeprecatedCustomRule errors = validate(schema, document, [NoDeprecatedCustomRule]) """ from ..validation import validate, NoDeprecatedCustomRule return validate(schema, ast, [NoDeprecatedCustomRule])
python
from .target_generators import HeatmapGenerator from .target_generators import ScaleAwareHeatmapGenerator from .target_generators import JointsGenerator __all__ = ['HeatmapGenerator', 'ScaleAwareHeatmapGenerator', 'JointsGenerator']
python
import re from typing import Annotated, Any, Optional import pytest from arti import ( Annotation, Artifact, Fingerprint, PartitionDependencies, Producer, StoragePartitions, ) from arti import producer as producer_decorator # Avoid shadowing from arti.internal.models import Model from arti.internal.utils import frozendict from arti.producers import ValidateSig from arti.types import Collection, Int64, Struct from arti.versions import String as StringVersion from arti.views import python as python_views from tests.arti.dummies import A1, A2, A3, A4, P1, P2, DummyStorage Int64Artifact = Artifact.from_type(Int64()) class DummyProducer(Producer): a1: A1 @staticmethod def build(a1: dict) -> tuple[Annotated[dict, A2], Annotated[dict, A3]]: # type: ignore pass @staticmethod def map(a1: StoragePartitions) -> PartitionDependencies: pass def check_model_matches(a: Model, b: Model, *, exclude: set[str]) -> None: assert a.dict(exclude=exclude) == b.dict(exclude=exclude) def test_Producer() -> None: a1 = A1() producer = DummyProducer(a1=a1) assert producer.a1 == a1 assert len(list(producer)) == 2 expected_output_classes = [A2, A3] for i, output in enumerate(producer): assert isinstance(output, expected_output_classes[i]) def test_producer_decorator() -> None: @producer_decorator() def dummy_producer(a1: Annotated[dict, A1]) -> Annotated[dict, A2]: # type: ignore return {} assert dummy_producer.__name__ == "dummy_producer" assert dummy_producer._input_artifact_types_ == frozendict(a1=A1) assert len(dummy_producer._output_metadata_) == 1 assert dummy_producer._output_metadata_[0][0] == A2 assert dummy_producer(a1=A1()).annotations == Producer.__fields__["annotations"].default assert dummy_producer(a1=A1()).version == Producer.__fields__["version"].default class MyAnnotation(Annotation): pass def mapper() -> PartitionDependencies: return PartitionDependencies() @producer_decorator( annotations=(MyAnnotation(),), map=mapper, name="test", version=StringVersion(value="test") ) def dummy_producer2(a1: Annotated[dict, A1]) -> Annotated[dict, A2]: # type: ignore return {} assert dummy_producer2.__name__ == "test" assert dummy_producer2.map == mapper assert dummy_producer2(a1=A1()).annotations == (MyAnnotation(),) assert dummy_producer2(a1=A1()).version == StringVersion(value="test") def test_producer_input_metadata() -> None: @producer_decorator() def dummy_producer( a1: Annotated[dict, A1], *, a: int, b: Annotated[int, "non-Artifact"] # type: ignore ) -> Annotated[dict, A2]: # type: ignore return {} assert dummy_producer._input_artifact_types_ == frozendict( a1=A1, a=Int64Artifact, b=Int64Artifact ) def test_Producer_partitioned_input_validation() -> None: class A(Artifact): type = Collection(element=Struct(fields={"x": Int64()}), partition_by=("x",)) class P(Producer): a: A @staticmethod def build(a: list[dict]) -> Annotated[dict, A2]: # type: ignore pass assert P._input_artifact_types_ == frozendict(a=A) assert P._build_input_views_ == frozendict(a=python_views.List) with pytest.raises(ValueError, match="dict.* cannot be used to represent Collection"): class SingularInput(Producer): a: A @staticmethod def build(a: dict) -> Annotated[dict, A2]: # type: ignore pass with pytest.raises( ValueError, match=re.escape("list[int] cannot be used to represent Collection") ): class IncompatibleInput(Producer): a: A @staticmethod def build(a: list[int]) -> Annotated[dict, A]: # type: ignore pass def test_Producer_output_metadata() -> None: assert DummyProducer._output_metadata_ == ((A2, python_views.Dict), (A3, python_views.Dict)) class ImplicitArtifact(Producer): a1: A1 @classmethod def build(cls, a1: dict) -> tuple[int, Annotated[dict, A2]]: # type: ignore pass assert ImplicitArtifact._output_metadata_ == ( (Artifact.from_type(Int64()), python_views.Int), (A2, python_views.Dict), ) class ExplicitView(Producer): a1: A1 @staticmethod def build(a1: dict) -> Annotated[dict, A2, python_views.Dict]: # type: ignore pass assert ExplicitView._output_metadata_ == ((A2, python_views.Dict),) with pytest.raises( ValueError, match=re.escape("DupView.build 1st return (A2) - multiple Views set") ): class DupView(Producer): a1: A1 @staticmethod def build(a1: dict) -> Annotated[dict, A2, python_views.Dict, python_views.Int]: # type: ignore pass with pytest.raises(ValueError, match="DupArtifact.build 1st return - multiple Artifacts set"): class DupArtifact(Producer): a1: A1 @staticmethod def build(a1: dict) -> Annotated[dict, A1, A2]: # type: ignore pass def test_Producer_string_annotation() -> None: # This may be from `x: "Type"` or `from __future__ import annotations`. class StrAnnotation(Producer): a1: "A1" @staticmethod def build(a1: "dict") -> "Annotated[dict, A2]": # type: ignore pass assert isinstance(StrAnnotation(a1=A1()).out(), A2) def test_Producer_fingerprint() -> None: p1 = P1(a1=A1()) assert p1.fingerprint == Fingerprint.from_string( f'P1:{{"a1": {p1.a1.fingerprint.key}, "version": {p1.version.fingerprint.key}}}' ) def test_Producer_compute_input_fingerprint() -> None: p1 = P1(a1=A1(storage=DummyStorage(key="test"))) assert p1.compute_input_fingerprint( frozendict(a1=StoragePartitions()) ) == Fingerprint.from_string(p1._class_key_).combine(p1.version.fingerprint) storage_partition = p1.a1.storage.generate_partition().copy( update={"content_fingerprint": Fingerprint.from_int(10)} ) assert p1.compute_input_fingerprint( frozendict(a1=StoragePartitions([storage_partition])) ) == Fingerprint.from_string(p1._class_key_).combine( p1.version.fingerprint, storage_partition.content_fingerprint ) with pytest.raises( ValueError, match=re.escape("Mismatched dependency inputs; expected {'a1'}, got {'junk'}") ): p1.compute_input_fingerprint(frozendict(junk=StoragePartitions())) def test_Producer_out() -> None: a1, a2, a3, a4 = A1(), A2(), A3(), A4() # single return Producer p1 = P1(a1=a1) a2_ = p1.out(a2) # multi return Producer p2 = P2(a2=a2) a3_, a4_ = p2.out(a3, a4) for (producer, inp, out, type_, position) in ( (p1, a2, a2_, A2, 0), (p2, a3, a3_, A3, 0), (p2, a4, a4_, A4, 1), ): assert inp is not out assert isinstance(out, type_) assert out.producer_output is not None assert out.producer_output.producer == producer assert out.producer_output.position == position check_model_matches(inp, out, exclude={"producer_output"}) assert list(p1) == [a2_] assert list(p2) == [a3_, a4_] def test_Producer_map_artifacts() -> None: class P(Producer): a1: A1 @staticmethod def build(a1: dict) -> Annotated[dict, A2]: # type: ignore pass @staticmethod def map(a1: StoragePartitions) -> PartitionDependencies: pass assert P._map_input_metadata_ == frozendict(a1=A1) with pytest.raises( ValueError, match="BadMapParam.map a1 param - type hint must be `StoragePartitions`", ): class BadMapParam(P): @staticmethod def map(a1: list) -> PartitionDependencies: # type: ignore pass def test_Producer_validate_output() -> None: positive, negative = (True, "Positive"), (False, "Negative") def is_positive(i: int) -> tuple[bool, str]: return positive if i >= 0 else negative @producer_decorator(validate_outputs=is_positive) def p(x: int) -> int: return x assert p.validate_outputs(p.build(1)) == positive assert p.validate_outputs(p.build(-1)) == negative def test_Producer_validate_output_hint_validation() -> None: def validate_any(i: Any) -> tuple[bool, str]: return bool(i), "" def validate_vargs_any(*vals: Any) -> tuple[bool, str]: return bool(vals), "" def validate_int(i: int) -> tuple[bool, str]: return bool(i), "" for validate_outputs in list[ValidateSig]( [ lambda x: (True, ""), validate_any, validate_vargs_any, validate_int, ] ): @producer_decorator(validate_outputs=validate_outputs) def single_return_build(x: int) -> int: return x assert single_return_build.validate_outputs(5) with pytest.raises(ValueError, match="i param - type hint must be `Any` or "): def accepts_vargs_float(*i: float) -> tuple[bool, str]: return bool(i), "" @producer_decorator(validate_outputs=accepts_vargs_float) def bad_vargs(x: int) -> int: return x with pytest.raises(ValueError, match="validate_output - must match the `.build` return"): @producer_decorator(validate_outputs=validate_int) def too_few_arg(x: int) -> tuple[int, int]: return x, x + 1 with pytest.raises(ValueError, match="validate_output i param - must not have a default."): @producer_decorator(validate_outputs=lambda i=5: (True, "")) def bad_default(x: int) -> int: return x with pytest.raises( ValueError, match="validate_output i param - must be usable as a positional argument." ): def validate_kwarg(*, i: int) -> tuple[bool, str]: return bool(i), "" @producer_decorator(validate_outputs=validate_kwarg) def kwarg_only(x: int) -> int: return x with pytest.raises( ValueError, match="validate_output i param - type hint must match the 1st `.build` return" ): def accepts_float(i: float) -> tuple[bool, str]: return bool(i), "" @producer_decorator(validate_outputs=accepts_float) def mismatched_hint(x: int) -> int: return x def test_Producer_build_outputs_check() -> None: class A(Artifact): type = Int64() class B(Artifact): type = Int64() class C(Artifact): type = Collection(element=Struct(fields={"a": Int64()}), partition_by=("a",)) class D(Artifact): type = Collection(element=Struct(fields={"a": Int64(), "b": Int64()}), partition_by=("b",)) class NoPartitioning(Producer): @staticmethod def build() -> tuple[Annotated[int, A], Annotated[int, B]]: pass class MatchingPartitioning(Producer): @staticmethod def build() -> tuple[Annotated[list[dict], C], Annotated[list[dict], C]]: # type: ignore pass @staticmethod def map() -> PartitionDependencies: return PartitionDependencies() for first_output in [Annotated[int, A], Annotated[list[dict], C]]: # type: ignore with pytest.raises( ValueError, match="all output Artifacts must have the same partitioning scheme" ): class MixedPartitioning(Producer): @staticmethod def build() -> tuple[first_output, Annotated[list[dict], D]]: # type: ignore pass with pytest.raises( ValueError, match=r"BadProducer.map - must be implemented when the `build` outputs are partitioned", ): class BadProducer(Producer): # noqa: F811 @staticmethod def build() -> Annotated[list[dict], C]: # type: ignore pass def test_Producer_bad_signature() -> None: # noqa: C901 # pylint: disable=function-redefined # Ensure no error if _abstract_ class OkProducer(Producer): _abstract_ = True with pytest.raises(ValueError, match="BadProducer.build - must be implemented"): class BadProducer(Producer): pass with pytest.raises( ValueError, match=r"BadProducer.build - the following parameter\(s\) must be defined as a field: {'a1'}", ): class BadProducer(Producer): # type: ignore # noqa: F811 @classmethod def build(cls, a1: dict) -> Annotated[dict, A2]: # type: ignore pass with pytest.raises( ValueError, match=r"BadProducer.map - the following parameter\(s\) must be defined as a field: {'a1'}", ): class BadProducer(Producer): # type: ignore # noqa: F811 @classmethod def build(cls) -> Annotated[dict, A2]: # type: ignore pass @classmethod def map(cls, a1: StoragePartitions) -> PartitionDependencies: pass with pytest.raises( ValueError, match=r"BadProducer - the following fields aren't used in `.build` or `.map`: {'a2'}", ): class BadProducer(Producer): # type: ignore # noqa: F811 a1: A1 a2: A2 @classmethod def build(cls, a1: dict) -> Annotated[dict, A3]: # type: ignore pass with pytest.raises(ValueError, match="must have a type hint"): class BadProducer(Producer): # type: ignore # noqa: F811 a1: A1 @classmethod def build(cls, a1): # type: ignore pass with pytest.raises(ValueError, match="type hint must be an Artifact subclass"): class BadProducer(Producer): # type: ignore # noqa: F811 a1: str @classmethod def build(cls, a1: str) -> tuple[A2, A3]: pass with pytest.raises(ValueError, match="must not have a default"): class BadProducer(Producer): # type: ignore # noqa: F811 a1: A1 @classmethod def build(cls, a1: dict = A1()): # type: ignore pass with pytest.raises(ValueError, match="must be usable as a keyword argument"): class BadProducer(Producer): # type: ignore # noqa: F811 a1: A1 @classmethod def build(cls, a1: dict, /): # type: ignore pass with pytest.raises(ValueError, match="must be usable as a keyword argument"): class BadProducer(Producer): # type: ignore # noqa: F811 a1: A1 @classmethod def build(cls, *a1: dict): # type: ignore pass with pytest.raises(ValueError, match="must be usable as a keyword argument"): class BadProducer(Producer): # type: ignore # noqa: F811 a1: A1 @classmethod def build(cls, **a1: dict): # type: ignore pass with pytest.raises(ValueError, match="a return value must be set"): class BadProducer(Producer): # type: ignore # noqa: F811 a1: A1 @classmethod def build(cls, a1: dict): # type: ignore pass with pytest.raises(ValueError, match="missing return signature"): class BadProducer(Producer): # type: ignore # noqa: F811 a1: A1 @classmethod def build(cls, a1: dict) -> None: # type: ignore pass with pytest.raises( ValueError, match="BadProducer.a1 - field must not have a default nor be Optional." ): class BadProducer(Producer): # type: ignore # noqa: F811 a1: A1 = None # type: ignore @classmethod def build(cls, a1: dict): # type: ignore pass with pytest.raises( ValueError, match="BadProducer.a1 - field must not have a default nor be Optional." ): class BadProducer(Producer): # type: ignore # noqa: F811 a1: Optional[A1] @classmethod def build(cls, a1: dict): # type: ignore pass with pytest.raises( ValueError, match=r"BadProducer.a1 - field must not have a default nor be Optional.", ): class BadProducer(Producer): # type: ignore # noqa: F811 a1: A1 = A1() @classmethod def build(cls, a1: dict) -> A2: # type: ignore pass with pytest.raises(ValueError, match=r"str.* cannot be used to represent Struct"): class BadProducer(Producer): # type: ignore # noqa: F811 @classmethod def build(cls) -> Annotated[str, A2]: pass with pytest.raises( ValueError, match=r"BadProducer.build - must be a @classmethod or @staticmethod", ): class BadProducer(Producer): # type: ignore # noqa: F811 def build(cls) -> Annotated[dict, A2]: # type: ignore pass with pytest.raises( ValueError, match=r"BadProducer.map - must be a @classmethod or @staticmethod", ): class BadProducer(Producer): # type: ignore # noqa: F811 @classmethod def build(cls) -> Annotated[dict, A2]: # type: ignore pass def map(cls) -> PartitionDependencies: pass def test_Producer_bad_init() -> None: with pytest.raises(ValueError, match="cannot be instantiated directly"): Producer() with pytest.raises(ValueError, match="extra fields not permitted"): DummyProducer(junk=5) with pytest.raises(ValueError, match="field required"): DummyProducer() with pytest.raises(ValueError, match="expected an instance of"): DummyProducer(a1=5) with pytest.raises(ValueError, match="expected an instance of"): DummyProducer(a1=A2()) def test_Producer_bad_out() -> None: producer = DummyProducer(a1=A1()) with pytest.raises(ValueError, match="expected 2 arguments of"): producer.out(1) # type: ignore with pytest.raises( ValueError, match=r"DummyProducer.out\(\) 1st argument - expected instance of" ): producer.out(1, 2) # type: ignore with pytest.raises( ValueError, match=r"DummyProducer.out\(\) 2nd argument - expected instance of" ): producer.out(A2(), A2()) output = producer.out(A2(), A3()) with pytest.raises(ValueError, match="is produced by"): producer.out(*output)
python
from pathlib import Path from fhir.resources.valueset import ValueSet as _ValueSet from oops_fhir.utils import ValueSet from oops_fhir.r4.code_system.feeding_device_codes import ( FeedingDeviceCodes as FeedingDeviceCodes_, ) from oops_fhir.r4.code_system.snomed_ct import SNOMEDCT __all__ = ["FeedingDeviceCodes"] _resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json")) class FeedingDeviceCodes(ValueSet): """ Feeding Device Codes Materials used or needed to feed the patient. Status: draft - Version: 4.0.1 http://hl7.org/fhir/ValueSet/feeding-device """ # TODO: fix this template issue1 pass class Meta: resource = _resource
python
import time import os import numpy as np from perform.constants import REAL_TYPE class RomSpaceMapping: """Base class for mapping to/from the state/latent space.""" def __init__(self, sol_domain, rom_domain, rom_model): rom_dict = rom_domain.rom_dict model_idx = rom_model.model_idx self.latent_dim = rom_model.latent_dim self.sol_shape = rom_model.sol_shape # all mappings require scaling by default, specific methods may include additional scalings model_dir = rom_dict["model_dir"] self.cent_prof = self.load_feature_scaling( os.path.join(model_dir, rom_dict["cent_profs"][model_idx]), default="zeros" ) self.norm_fac_prof = self.load_feature_scaling( os.path.join(model_dir, rom_dict["norm_fac_profs"][model_idx]), default="ones" ) self.norm_sub_prof = self.load_feature_scaling( os.path.join(model_dir, rom_dict["norm_sub_profs"][model_idx]), default="zeros" ) if callable(getattr(rom_domain.rom_method, "load_extra_scalings", None)): rom_domain.rom_method.load_extra_scalings(model_idx, sol_domain, rom_domain) # specific mapping loading functions implemented by child classes self.load_mapping() # TODO: initialize decoder Jacobian memory once def load_feature_scaling(self, scaling_input, default="zeros"): """Load a normalization or centering profile from NumPy binary. Args: scaling_input: String path to scaling profile NumPy binary. default: String indicating default profile if loading fails due to size mismatch or load failure. Returns: scaling_prof: NumPy array of scaling profile loaded (or default, if load failed). """ try: # Load single complete standardization profile from file scaling_prof = np.load(scaling_input) assert scaling_prof.shape == self.sol_shape return scaling_prof except AssertionError: print("Standardization profile at " + scaling_input + " did not match solution shape") if default == "zeros": print("WARNING: standardization load failed or not specified, defaulting to zeros") time.sleep(1.0) scaling_prof = np.zeros(self.sol_shape, dtype=REAL_TYPE) elif default == "ones": print("WARNING: standardization load failed or not specified, defaulting to ones") time.sleep(1.0) scaling_prof = np.zeros(self.sol_shape, dtype=REAL_TYPE) else: raise ValueError("Invalid default: " + str(default)) return scaling_prof def scale_profile( self, arr_in, normalize=True, norm_fac_prof=None, norm_sub_prof=None, center=True, cent_prof=None, inverse=False ): """(De-)centers and/or (de-)normalizes solution profile. Depending on argument flags, centers and/or normalizes solution profile, or de-normalizes and/or de-centers solution profile. If inverse is False: arr = (arr_in - cent_prof - norm_sub_prof) / norm_fac_prof If inverse is True: arr = arr_in * norm_fac_prof + norm_sub_prof + cent_prof Args: arr_in: NumPy array of solution profile to be scaled. normalize: Boolean flag indicating whether arr_in should be (de-)normalized. norm_fac_prof: NumPy array of divisive normalization profile. norm_sub_prof: NumPy array of subtractive normalization profile. center: Boolean flag indicating whether arr_in should be (de-)centered. cent_prof: NumPy array of centering profile. inverse: If True, de-normalize and de-center. If False, center and normalize. Returns: (De)-centered and/or (de)-normalized copy of arr_in. """ arr = arr_in.copy() assert normalize or center, "Must either (de-)center or (de-)normalize." if normalize: assert norm_fac_prof is not None, "Must provide normalization division factor to normalize" assert norm_sub_prof is not None, "Must provide normalization subtractive factor to normalize" if center: assert cent_prof is not None, "Must provide centering profile to center" # de-normalize and de-center if inverse: if normalize: arr = self.normalize(arr, norm_fac_prof, norm_sub_prof, denormalize=True) if center: arr = self.center(arr, cent_prof, decenter=True) # center and normalize else: if center: arr = self.center(arr, cent_prof, decenter=False) if normalize: arr = self.normalize(arr, norm_fac_prof, norm_sub_prof, denormalize=False) return arr def center(self, arr_in, cent_prof, decenter=False): """(De)center input vector according to provided centering profile. Args: arr_in: NumPy array to be (de-)centered. cent_prof: NumPy array of centering profile. decenter: If True, decenter profile. If False, center profile. Returns: (De-)centered copy of arr_in. """ if decenter: arr = arr_in + cent_prof else: arr = arr_in - cent_prof return arr def normalize(self, arr_in, norm_fac_prof, norm_sub_prof, denormalize=False): """(De)normalize input vector according to subtractive and divisive normalization profiles. Args: arr_in: NumPy array to be (de-)normalized. norm_fac_prof: NumPy array of divisive normalization profile. norm_sub_prof: NumPy array of subtractive normalization profile. denormalize: If True, denormalize profile. If False, normalize profile. Returns: (De-)normalized copy of arr_in. """ if denormalize: arr = arr_in * norm_fac_prof + norm_sub_prof else: arr = (arr_in - norm_sub_prof) / norm_fac_prof return arr def encode_decode_series(self, sol_series_in): """Compute encoding and decoding of a list of solution arrays""" if isinstance(sol_series_in, np.ndarray): sol_series_in = [sol_series_in] code_series_out = [] sol_series_out = [] for sol in sol_series_in: code_series_out.append(self.encode_sol(sol)) sol_series_out.append(self.decode_sol(code_series_out[-1])) return code_series_out, sol_series_out def encode_sol(self, sol_in): sol = self.scale_profile( sol_in, normalize=True, norm_fac_prof=self.norm_fac_prof, norm_sub_prof=self.norm_sub_prof, center=True, cent_prof=self.cent_prof, inverse=False, ) code = self.apply_encoder(sol) return code def decode_sol(self, code_in): """Compute full decoding of solution, including de-centering and de-normalization. Maps low-dimensional code to full-dimensional state, and de-centers and de-normalizes. Note that the apply_decoder is implemented within child classes, as these are specific to a given mapping. Args: code_in: low-dimensional code to be decoded. Returns: Full-dimensional solution NumPy array resulting from decoding and de-scaling. """ sol = self.apply_decoder(code_in) sol = self.scale_profile( sol, normalize=True, norm_fac_prof=self.norm_fac_prof, norm_sub_prof=self.norm_sub_prof, center=True, cent_prof=self.cent_prof, inverse=True, ) return sol
python
from dl.nn.Module import Module import dl.graph.op as OP from dl.graph import variable class DropoutLayer(Module): """ Dropout layer object. """ def __init__(self, rate: float): """ Dropout layer object. Parameters ---------- rate: Dropout rate. """ super().__init__() self.op = OP.Dropout(rate) def forward(self, x) -> variable.Variable: """ Process the dropout operation. See details at dl.graph.op.Dropout Parameters ---------- x: Input Returns ------- out: output """ return self.op(x) def eval(self): """ Set the layer to evaluation mode. in this mode, dropout will not be performed. Returns ------- out: None """ self.op.eval = True def train(self): """ Set the layer to evaluation mode. in this mode, dropout will be performed. Returns ------- out: None """ self.op.eval = False
python
import torch.distributed as dist from .trainer import Trainer from ..util import DDP def average_gradients(model): """ Gradient averaging. """ size = float(dist.get_world_size()) for param in model.parameters(): if param.grad is not None: dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM) param.grad.data /= size class DistTrainer(Trainer): """ Distributed trainer for multi-gpu training. (not finish yet) """ def run_step(self, model, batch, mode='train'): output, loss, loss_stats = model.module.forward_train(batch) loss = loss.mean() if mode == 'train': self.optimizer.zero_grad() loss.backward() average_gradients(model) self.optimizer.step() return output, loss, loss_stats def set_device(self, batch_per_gpu, rank, device): """ Set model device for Distributed-Data-Parallel :param batch_per_gpu: batch size of each gpu :param rank: distributed training process rank :param device: cuda """ self.rank = rank self.model = DDP(batch_per_gpu, module=self.model.cuda(), device_ids=[rank], output_device=rank)
python
from .answer import Answer, CalculatedAnswer, DragText, NumericalAnswer from .enums import * from .questions import (QCalculated, QCalculatedMultichoice, QCalculatedSimple, QCloze, QDescription, QDragAndDropImage, QDragAndDropMarker, QDragAndDropText, QEssay, QMatching, QMissingWord, QMultichoice, QNumerical, QRandomMatching, QShortAnswer, QTrueFalse) __author__ = "Lucas Wolfgang" __version__ = "0.0.1" __all__ = ["GUI", "main", "Answer", "DragText", "NumericalAnswer", "CalculatedAnswer", "QDescription", "QCalculated", "QCalculatedSimple", "QCalculatedMultichoice", "QCloze", "QDragAndDropText", "QDragAndDropImage", "QDragAndDropMarker", "QEssay", "QMatching", "QRandomMatching", "QMissingWord", "QMultichoice", "QNumerical", "QShortAnswer", "QTrueFalse"]
python
import warnings from collections import OrderedDict import pandas as pd from . import dtypes, utils from .alignment import align from .variable import IndexVariable, Variable, as_variable from .variable import concat as concat_vars def concat( objs, dim=None, data_vars="all", coords="different", compat="equals", positions=None, indexers=None, mode=None, concat_over=None, fill_value=dtypes.NA, join="outer", ): """Concatenate xarray objects along a new or existing dimension. Parameters ---------- objs : sequence of Dataset and DataArray objects xarray objects to concatenate together. Each object is expected to consist of variables and coordinates with matching shapes except for along the concatenated dimension. dim : str or DataArray or pandas.Index Name of the dimension to concatenate along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. If dimension is provided as a DataArray or Index, its name is used as the dimension to concatenate along and the values are added as a coordinate. data_vars : {'minimal', 'different', 'all' or list of str}, optional These data variables will be concatenated together: * 'minimal': Only data variables in which the dimension already appears are included. * 'different': Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. * 'all': All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in addition to the 'minimal' data variables. If objects are DataArrays, data_vars must be 'all'. coords : {'minimal', 'different', 'all' or list of str}, optional These coordinate variables will be concatenated together: * 'minimal': Only coordinates in which the dimension already appears are included. * 'different': Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. * 'all': All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of str: The listed coordinate variables will be concatenated, in addition to the 'minimal' coordinates. compat : {'equals', 'identical'}, optional String indicating how to compare non-concatenated variables and dataset global attributes for potential conflicts. 'equals' means that all variable values and dimensions must be the same; 'identical' means that variable attributes and global attributes must also be equal. positions : None or list of integer arrays, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. fill_value : scalar, optional Value to use for newly missing values join : {'outer', 'inner', 'left', 'right', 'exact'}, optional String indicating how to combine differing indexes (excluding dim) in objects - 'outer': use the union of object indexes - 'inner': use the intersection of object indexes - 'left': use indexes from the first object with each dimension - 'right': use indexes from the last object with each dimension - 'exact': instead of aligning, raise `ValueError` when indexes to be aligned are not equal - 'override': if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. indexers, mode, concat_over : deprecated Returns ------- concatenated : type of objs See also -------- merge auto_combine """ # TODO: add ignore_index arguments copied from pandas.concat # TODO: support concatenating scalar coordinates even if the concatenated # dimension already exists from .dataset import Dataset from .dataarray import DataArray try: first_obj, objs = utils.peek_at(objs) except StopIteration: raise ValueError("must supply at least one object to concatenate") if dim is None: warnings.warn( "the `dim` argument to `concat` will be required " "in a future version of xarray; for now, setting it to " "the old default of 'concat_dim'", FutureWarning, stacklevel=2, ) dim = "concat_dims" if indexers is not None: # pragma: no cover warnings.warn( "indexers has been renamed to positions; the alias " "will be removed in a future version of xarray", FutureWarning, stacklevel=2, ) positions = indexers if mode is not None: raise ValueError( "`mode` is no longer a valid argument to " "xarray.concat; it has been split into the " "`data_vars` and `coords` arguments" ) if concat_over is not None: raise ValueError( "`concat_over` is no longer a valid argument to " "xarray.concat; it has been split into the " "`data_vars` and `coords` arguments" ) if isinstance(first_obj, DataArray): f = _dataarray_concat elif isinstance(first_obj, Dataset): f = _dataset_concat else: raise TypeError( "can only concatenate xarray Dataset and DataArray " "objects, got %s" % type(first_obj) ) return f(objs, dim, data_vars, coords, compat, positions, fill_value, join) def _calc_concat_dim_coord(dim): """ Infer the dimension name and 1d coordinate variable (if appropriate) for concatenating along the new dimension. """ from .dataarray import DataArray if isinstance(dim, str): coord = None elif not isinstance(dim, (DataArray, Variable)): dim_name = getattr(dim, "name", None) if dim_name is None: dim_name = "concat_dim" coord = IndexVariable(dim_name, dim) dim = dim_name elif not isinstance(dim, DataArray): coord = as_variable(dim).to_index_variable() dim, = coord.dims else: coord = dim dim, = coord.dims return dim, coord def _calc_concat_over(datasets, dim, data_vars, coords): """ Determine which dataset variables need to be concatenated in the result, and which can simply be taken from the first dataset. """ # Return values concat_over = set() equals = {} if dim in datasets[0]: concat_over.add(dim) for ds in datasets: concat_over.update(k for k, v in ds.variables.items() if dim in v.dims) def process_subset_opt(opt, subset): if isinstance(opt, str): if opt == "different": # all nonindexes that are not the same in each dataset for k in getattr(datasets[0], subset): if k not in concat_over: # Compare the variable of all datasets vs. the one # of the first dataset. Perform the minimum amount of # loads in order to avoid multiple loads from disk # while keeping the RAM footprint low. v_lhs = datasets[0].variables[k].load() # We'll need to know later on if variables are equal. computed = [] for ds_rhs in datasets[1:]: v_rhs = ds_rhs.variables[k].compute() computed.append(v_rhs) if not v_lhs.equals(v_rhs): concat_over.add(k) equals[k] = False # computed variables are not to be re-computed # again in the future for ds, v in zip(datasets[1:], computed): ds.variables[k].data = v.data break else: equals[k] = True elif opt == "all": concat_over.update( set(getattr(datasets[0], subset)) - set(datasets[0].dims) ) elif opt == "minimal": pass else: raise ValueError("unexpected value for %s: %s" % (subset, opt)) else: invalid_vars = [k for k in opt if k not in getattr(datasets[0], subset)] if invalid_vars: if subset == "coords": raise ValueError( "some variables in coords are not coordinates on " "the first dataset: %s" % (invalid_vars,) ) else: raise ValueError( "some variables in data_vars are not data variables " "on the first dataset: %s" % (invalid_vars,) ) concat_over.update(opt) process_subset_opt(data_vars, "data_vars") process_subset_opt(coords, "coords") return concat_over, equals def _dataset_concat( datasets, dim, data_vars, coords, compat, positions, fill_value=dtypes.NA, join="outer", ): """ Concatenate a sequence of datasets along a new or existing dimension """ from .dataset import Dataset if compat not in ["equals", "identical"]: raise ValueError( "compat=%r invalid: must be 'equals' " "or 'identical'" % compat ) dim, coord = _calc_concat_dim_coord(dim) # Make sure we're working on a copy (we'll be loading variables) datasets = [ds.copy() for ds in datasets] datasets = align( *datasets, join=join, copy=False, exclude=[dim], fill_value=fill_value ) concat_over, equals = _calc_concat_over(datasets, dim, data_vars, coords) def insert_result_variable(k, v): assert isinstance(v, Variable) if k in datasets[0].coords: result_coord_names.add(k) result_vars[k] = v # create the new dataset and add constant variables result_vars = OrderedDict() result_coord_names = set(datasets[0].coords) result_attrs = datasets[0].attrs result_encoding = datasets[0].encoding for k, v in datasets[0].variables.items(): if k not in concat_over: insert_result_variable(k, v) # check that global attributes and non-concatenated variables are fixed # across all datasets for ds in datasets[1:]: if compat == "identical" and not utils.dict_equiv(ds.attrs, result_attrs): raise ValueError("dataset global attributes not equal") for k, v in ds.variables.items(): if k not in result_vars and k not in concat_over: raise ValueError("encountered unexpected variable %r" % k) elif (k in result_coord_names) != (k in ds.coords): raise ValueError( "%r is a coordinate in some datasets but not " "others" % k ) elif k in result_vars and k != dim: # Don't use Variable.identical as it internally invokes # Variable.equals, and we may already know the answer if compat == "identical" and not utils.dict_equiv( v.attrs, result_vars[k].attrs ): raise ValueError("variable %s not identical across datasets" % k) # Proceed with equals() try: # May be populated when using the "different" method is_equal = equals[k] except KeyError: result_vars[k].load() is_equal = v.equals(result_vars[k]) if not is_equal: raise ValueError("variable %s not equal across datasets" % k) # we've already verified everything is consistent; now, calculate # shared dimension sizes so we can expand the necessary variables dim_lengths = [ds.dims.get(dim, 1) for ds in datasets] non_concat_dims = {} for ds in datasets: non_concat_dims.update(ds.dims) non_concat_dims.pop(dim, None) def ensure_common_dims(vars): # ensure each variable with the given name shares the same # dimensions and the same shape for all of them except along the # concat dimension common_dims = tuple(pd.unique([d for v in vars for d in v.dims])) if dim not in common_dims: common_dims = (dim,) + common_dims for var, dim_len in zip(vars, dim_lengths): if var.dims != common_dims: common_shape = tuple( non_concat_dims.get(d, dim_len) for d in common_dims ) var = var.set_dims(common_dims, common_shape) yield var # stack up each variable to fill-out the dataset (in order) for k in datasets[0].variables: if k in concat_over: vars = ensure_common_dims([ds.variables[k] for ds in datasets]) combined = concat_vars(vars, dim, positions) insert_result_variable(k, combined) result = Dataset(result_vars, attrs=result_attrs) result = result.set_coords(result_coord_names) result.encoding = result_encoding if coord is not None: # add concat dimension last to ensure that its in the final Dataset result[coord.name] = coord return result def _dataarray_concat( arrays, dim, data_vars, coords, compat, positions, fill_value=dtypes.NA, join="outer", ): arrays = list(arrays) if data_vars != "all": raise ValueError( "data_vars is not a valid argument when " "concatenating DataArray objects" ) datasets = [] for n, arr in enumerate(arrays): if n == 0: name = arr.name elif name != arr.name: if compat == "identical": raise ValueError("array names not identical") else: arr = arr.rename(name) datasets.append(arr._to_temp_dataset()) ds = _dataset_concat( datasets, dim, data_vars, coords, compat, positions, fill_value=fill_value, join=join, ) return arrays[0]._from_temp_dataset(ds, name)
python
import re import os try: from urlparse import urlparse except: from urllib.parse import urlparse from .exceptions import FieldValidationException from .universal_forwarder_compatiblity import UF_MODE, make_splunkhome_path from .contrib.ipaddress import ip_network try: from .server_info import ServerInfo except ImportError: ServerInfo = None class Field(object): """ This is the base class that should be used to for field validators. Sub-class this and override to_python if you need custom validation. """ DATA_TYPE_STRING = 'string' DATA_TYPE_NUMBER = 'number' DATA_TYPE_BOOLEAN = 'boolean' def get_data_type(self): """ Get the type of the field. """ return Field.DATA_TYPE_STRING def __init__(self, name, title, description, none_allowed=False, empty_allowed=True, required_on_create=None, required_on_edit=None): """ Create the field. Arguments: name -- Set the name of the field (e.g. "database_server") title -- Set the human readable title (e.g. "Database server") description -- Set the human readable description of the field (e.g. "The IP or domain name of the database server") none_allowed -- Is a value of none allowed? empty_allowed -- Is an empty string allowed? required_on_create -- Is this field required when creating? required_on_edit -- Is this field required when editing? """ # Try to set required_on_create and required_on_edit to sane defaults if not defined if required_on_create is None and none_allowed: required_on_create = False elif required_on_create is None and not none_allowed: required_on_create = True if required_on_edit is None and required_on_create is not None: required_on_edit = required_on_create if name is None: raise ValueError("The name parameter cannot be none") if len(name.strip()) == 0: raise ValueError("The name parameter cannot be empty") if title is None: raise ValueError("The title parameter cannot be none") if len(title.strip()) == 0: raise ValueError("The title parameter cannot be empty") if description is None: raise ValueError("The description parameter cannot be none") if len(description.strip()) == 0: raise ValueError("The description parameter cannot be empty") self.name = name self.title = title self.description = description self.none_allowed = none_allowed self.empty_allowed = empty_allowed self.required_on_create = required_on_create self.required_on_edit = required_on_edit def to_python(self, value, session_key=None): """ Convert the field to a Python object. Should throw a FieldValidationException if the data is invalid. Arguments: value -- The value to convert session_key- The session key to access Splunk (if needed) """ if not self.none_allowed and value is None: raise FieldValidationException("The value for the '%s' parameter cannot be empty" % (self.name)) if not self.empty_allowed and len(str(value).strip()) == 0: raise FieldValidationException("The value for the '%s' parameter cannot be empty" % (self.name)) return value def to_string(self, value): """ Convert the field to a string value that can be returned. Should throw a FieldValidationException if the data is invalid. Arguments: value -- The value to convert """ return str(value) class BooleanField(Field): """ A validator that converts string versions of boolean to a real boolean. """ def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) if value in [True, False]: return value elif str(value).strip().lower() in ["true", "1"]: return True elif str(value).strip().lower() in ["false", "0"]: return False raise FieldValidationException("The value of '%s' for the '%s' parameter is not a valid boolean" % (str(value), self.name)) def to_string(self, value): if value == True: return "1" elif value == False: return "0" return str(value) def get_data_type(self): return Field.DATA_TYPE_BOOLEAN class ListField(Field): """ A validator that converts a comma seperated string to an array. You can use the instance_class argument to convert individual items in the array to particular type. That way, you can have a list of Python objects that are already converted to the values you want. Consider this example that will include a list of parsed IP network ranges: list_field = ListField('name', 'title', 'description', instance_class=IPNetworkField) parsed_ip_ranges = list_field.to_python(u'10.0.0.0/28,1.2.3.4,10.0.1.0/28') """ def __init__(self, name, title, description, none_allowed=False, empty_allowed=True, required_on_create=None, required_on_edit=None, instance_class=None, trim_values=False): """ Create the field. Arguments: name -- Set the name of the field (e.g. "database_server") title -- Set the human readable title (e.g. "Database server") description -- Set the human readable description of the field (e.g. "The IP or domain name of the database server") none_allowed -- Is a value of none allowed? empty_allowed -- Is an empty string allowed? required_on_create -- Is this field required when creating? required_on_edit -- Is this field required when editing? instance_class -- The name of the class to use for constructing individual objects trim_values -- Trim whitespace off of the ends of the values in case that spaces between the list are not included """ super(ListField, self).__init__(name, title, description, none_allowed, empty_allowed, required_on_create, required_on_edit) self.instance_class = instance_class self.trim_values = trim_values # Create an instance for converting the values if self.instance_class is not None: self.instance = self.instance_class(self.name, self.title, self.description) else: self.instance = None def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) # Convert the value into an array values_list = None if value is not None: values_list = value.split(",") else: values_list = [] # Trim the values if requested if self.trim_values: values_list = [value.strip() for value in values_list] # If we have no instances class, then just return the plain list if self.instance_class is None: return values_list # Otherwise, convert the instances accordingly else: # Convert the value instances_list = [] for instance_value in values_list: instances_list.append(self.instance.to_python(instance_value)) return instances_list def to_string(self, value): if value is not None: # Use the instance to_string if we have an instance if self.instance is not None: values_list = [] for individual_value in value: values_list.append(self.instance.to_string(individual_value)) return ",".join(values_list) # Otherwise, process it as a string else: return ",".join(value) return "" class StaticListField(Field): """ This allows you to specify a list of field values that are allowed. All other values will be rejected. """ _valid_values = None def __init__(self, name, title, description, none_allowed=False, empty_allowed=True, required_on_create=None, required_on_edit=None, valid_values=None): super(StaticListField, self).__init__(name, title, description, none_allowed, empty_allowed, required_on_create, required_on_edit) self.valid_values = valid_values @property def valid_values(self): return self._valid_values @valid_values.setter def valid_values(self, values): self._valid_values = values def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) if value is None: return None elif value not in self.valid_values: raise FieldValidationException('The value of the "' + self.name + '" field is invalid, it must be one of:' + ','.join(self.valid_values)) else: return value class RegexField(Field): """ A validator that validates input matches a regular expression. """ def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) if value is not None: try: return re.compile(value) except Exception as exception: raise FieldValidationException(str(exception)) else: return None def to_string(self, value): if value is not None: return value.pattern return "" class WildcardField(Field): """ Much like a regular expression field but takes wildcards. This will return a regular expression. """ def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) if value is not None: try: regex_escaped = re.escape(value) regex_escaped = regex_escaped.replace('\*', ".*") return re.compile(regex_escaped) except Exception as exception: raise FieldValidationException(str(exception)) else: return None def to_string(self, value): if value is not None: return value.pattern return "" class IntegerField(Field): """ A validator that converts string input to an integer. """ def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) if value is not None: try: return int(value) except ValueError as exception: raise FieldValidationException(str(exception)) else: return None def to_string(self, value): if value is not None: return str(value) return "" def get_data_type(self): return Field.DATA_TYPE_NUMBER class FloatField(Field): """ A validator that converts string input to a float. """ def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) if value is not None: try: return float(value) except ValueError as exception: raise FieldValidationException(str(exception)) else: return None def to_string(self, value): if value is not None: return str(value) return "" def get_data_type(self): return Field.DATA_TYPE_NUMBER class RangeField(Field): """ A validator that converts string input to a pair of integers indicating a range. """ def __init__(self, name, title, description, low, high, none_allowed=False, empty_allowed=True, required_on_create=None, required_on_edit=None): super(RangeField, self).__init__(name, title, description, none_allowed, empty_allowed, required_on_create, required_on_edit) self.low = low self.high = high def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) if value is not None: try: tmp = int(value) if tmp < self.low: raise FieldValidationException("The value of '%s' for the '%s' parameter must be greater than or equal to '%r'" % (str(value), self.name, self.low)) if tmp > self.high: raise FieldValidationException("The value of '%s' for the '%s' parameter must be less than or equal to '%r'" % (str(value), self.name, self.high)) return tmp except ValueError as exception: raise FieldValidationException(str(exception)) else: return None def to_string(self, value): if value is not None: return str(value) return "" def get_data_type(self): return Field.DATA_TYPE_NUMBER class URLField(Field): """ Represents a URL. The URL is converted to a Python object that was created via urlparse. """ require_https_on_cloud = False def __init__(self, name, title, description, none_allowed=False, empty_allowed=True, required_on_create=None, required_on_edit=None, require_https_on_cloud=False): super(URLField, self).__init__(name, title, description, none_allowed, empty_allowed, required_on_create, required_on_edit) self.require_https_on_cloud = require_https_on_cloud @classmethod def parse_url(cls, value, name): """ Parse a URL and generation an exception if it is invalid.BaseException Otherwise, return a parsed URL (via urlparse). """ parsed_value = urlparse(value) if parsed_value.hostname is None or len(parsed_value.hostname) <= 0: raise FieldValidationException("The value of '%s' for the '%s' parameter does not contain a host name" % (str(value), name)) if parsed_value.scheme not in ["http", "https"]: raise FieldValidationException("The value of '%s' for the '%s' parameter does not contain a valid protocol (only http and https are supported)" % (str(value), name)) return parsed_value def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) parsed_value = URLField.parse_url(value.strip(), self.name) if self.require_https_on_cloud and parsed_value.scheme == "http" and session_key is not None and ServerInfo.is_on_cloud(session_key): raise FieldValidationException("The value of '%s' for the '%s' parameter must use encryption (be HTTPS not HTTP)" % (str(value), self.name)) return parsed_value def to_string(self, value): return value.geturl() class DurationField(Field): """ The duration field represents a duration as represented by a string such as 1d for a 24 hour period. The string is converted to an integer indicating the number of seconds. """ DURATION_RE = re.compile("(?P<duration>[0-9]+)\s*(?P<units>[a-z]*)", re.IGNORECASE) MINUTE = 60 HOUR = 60 * MINUTE DAY = 24 * HOUR WEEK = 7 * DAY UNITS = { 'w' : WEEK, 'week' : WEEK, 'd' : DAY, 'day' : DAY, 'h' : HOUR, 'hour' : HOUR, 'm' : MINUTE, 'min' : MINUTE, 'minute' : MINUTE, 's' : 1 } def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) # Parse the duration duration_match = DurationField.DURATION_RE.match(value) # Make sure the duration could be parsed if duration_match is None: raise FieldValidationException("The value of '%s' for the '%s' parameter is not a valid duration" % (str(value), self.name)) # Get the units and duration match_dict = duration_match.groupdict() units = match_dict['units'] # Parse the value provided try: duration = int(match_dict['duration']) except ValueError: raise FieldValidationException("The duration '%s' for the '%s' parameter is not a valid number" % (match_dict['duration'], self.name)) # Make sure the units are valid if len(units) > 0 and units not in DurationField.UNITS: raise FieldValidationException("The unit '%s' for the '%s' parameter is not a valid unit of duration" % (units, self.name)) # Convert the units to seconds if len(units) > 0: return duration * DurationField.UNITS[units] else: return duration def to_string(self, value): return str(value) class DeprecatedField(Field): """ Represents a field that is no longer used. This should be used when you want the input to pass validation with arguments that are no longer used. """ def __init__(self, name, title, description, none_allowed=True, empty_allowed=True, required_on_create=False, required_on_edit=False): """ Create the field. Arguments: name -- Set the name of the field (e.g. "database_server") title -- Set the human readable title (e.g. "Database server") description -- Set the human readable description of the field (e.g. "The IP or domain name of the database server") none_allowed -- Is a value of none allowed? empty_allowed -- Is an empty string allowed? required_on_create -- Is this field required when creating? required_on_edit -- Is this field required when editing? """ super(DeprecatedField, self).__init__(name, title, description, none_allowed=none_allowed, empty_allowed=empty_allowed, required_on_create=required_on_create, required_on_edit=required_on_edit) def to_python(self, value, session_key=None): return None def to_string(self, value): return "" class FilePathField(Field): ''' Represents a path to file. ''' def __init__(self, name, title, description, none_allowed=False, empty_allowed=True, required_on_create=None, required_on_edit=None, validate_file_existence=True): """ Create the field. Arguments: name -- Set the name of the field (e.g. "database_server") title -- Set the human readable title (e.g. "Database server") description -- Set the human readable description of the field (e.g. "The IP or domain name of the database server") none_allowed -- Is a value of none allowed? empty_allowed -- Is an empty string allowed? required_on_create -- Is this field required when creating? required_on_edit -- Is this field required when editing? validate_file_existence -- If true, this field will generate an error if the file doesn't exist """ super(FilePathField, self).__init__(name, title, description, none_allowed, empty_allowed, required_on_create, required_on_edit) self.validate_file_existence = validate_file_existence def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) # Don't bother validating if the parameter wasn't provided if value is None or len(value.strip()) == 0: return value # Resolve the file path as necessary resolved_path = None if value is not None: if os.path.isabs(value) or UF_MODE: resolved_path = value else: path = os.path.join(make_splunkhome_path([value])) resolved_path = path # Validate the file existence if requested if self.validate_file_existence and not os.path.isfile(resolved_path): raise FieldValidationException("The parameter '%s' is not a valid path; '%s' does not exist" % (self.name, resolved_path)) return resolved_path def to_string(self, value): return value class DomainNameField(Field): """ A validator that accepts domain names. """ def is_valid_hostname(self, dn): """ Determine if the given hostname is valid. See https://stackoverflow.com/questions/2532053/validate-a-hostname-string """ if dn.endswith('.'): dn = dn[:-1] if len(dn) < 1 or len(dn) > 253: return False ldh_re = re.compile('^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$', re.IGNORECASE) return all(ldh_re.match(x) for x in dn.split('.')) def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) if value is not None: if not self.is_valid_hostname(value): raise FieldValidationException("The value of '%s' for the '%s' parameter is not a valid domain name" % (value, self.name)) return value else: return None class MultiValidatorField(Field): def __init__(self, name, title, description, none_allowed=False, empty_allowed=True, required_on_create=None, required_on_edit=None, validators=None, default_message=None): """ Create the field. Arguments: name -- Set the name of the field (e.g. "database_server") title -- Set the human readable title (e.g. "Database server") description -- Set the human readable description of the field (e.g. "The IP or domain name of the database server") none_allowed -- Is a value of none allowed? empty_allowed -- Is an empty string allowed? required_on_create -- Is this field required when creating? required_on_edit -- Is this field required when editing? validate_file_existence -- If true, this field will generate an error if the file doesn't exist """ super(MultiValidatorField, self).__init__(name, title, description, none_allowed, empty_allowed, required_on_create, required_on_edit) # Stop if no validators were supplied if validators is None or len(validators) == 0: raise Exception("A list of the validators is required for the MultiValidatorField to test against") # Here is where all of the instances of the validators will be stored self.validators = [] # Construct the validator instances for validator in validators: self.validators.append(validator(self.name, self.title, self.description, self.none_allowed, self.empty_allowed, self.required_on_create, self.required_on_edit)) # This will point to the last validator instance that accepted the last value self.last_used_validator = None # Persist the error message self.default_message = default_message def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) if value is not None: messages =[] for validator in self.validators: try: python_value = validator.to_python(value, session_key) self.last_used_validator = validator return python_value except FieldValidationException as e: messages.append(str(e)) # Generate an exception since the field could not be validated if self.default_message is None: raise FieldValidationException(";".join(messages)) else: raise FieldValidationException(self.default_message) else: return None def to_string(self, value): if value is not None: return self.last_used_validator.to_string(value) return "" class IPNetworkField(Field): """ A validator that accepts IP addresses. """ def to_python(self, value, session_key=None): Field.to_python(self, value, session_key) if value is not None: # Convert the incoming string to bytes # For Python 2, str works fine since it is just bytes. Python 3 defaults to unicode which needs to be converted. try: unicode if not isinstance(value, unicode): value = unicode(value) # The interpreter is Python 2 except NameError: # The interpreter is Python 3, it is unicode already pass try: return ip_network(value, strict=False) except ValueError as exception: raise FieldValidationException(str(exception)) else: return None def to_string(self, value): if value is not None: # Get the main address if this is a single address if value.num_addresses == 1: return str(value.network_address) else: return str(value) return ""
python
import os import pandas as pd import pytest from probatus.feature_elimination import EarlyStoppingShapRFECV, ShapRFECV from probatus.utils import preprocess_labels from sklearn.linear_model import LogisticRegression from sklearn.metrics import get_scorer from sklearn.model_selection import RandomizedSearchCV, StratifiedGroupKFold, StratifiedKFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier @pytest.fixture(scope="function") def X(): """ Fixture for X. """ return pd.DataFrame( { "col_1": [1, 1, 1, 1, 1, 1, 1, 0], "col_2": [0, 0, 0, 0, 0, 0, 0, 1], "col_3": [1, 0, 1, 0, 1, 0, 1, 0], }, index=[1, 2, 3, 4, 5, 6, 7, 8], ) @pytest.fixture(scope="session") def catboost_classifier_class(): """This fixture allows to reuse the import of the CatboostClassifier class across different tests. It is equivalent to importing the package at the beginning of the file. Importing catboost multiple times results in a ValueError: I/O operation on closed file. """ from catboost import CatBoostClassifier return CatBoostClassifier @pytest.fixture(scope="function") def y(): """ Fixture for y. """ return pd.Series([1, 0, 1, 0, 1, 0, 1, 0], index=[1, 2, 3, 4, 5, 6, 7, 8]) @pytest.fixture(scope="function") def sample_weight(): """ Fixture for sample_weight. """ return pd.Series([1, 1, 1, 1, 1, 1, 1, 1], index=[1, 2, 3, 4, 5, 6, 7, 8]) @pytest.fixture(scope="function") def groups(): """ Fixture for groups. """ return pd.Series(["grp1", "grp1", "grp1", "grp1", "grp2", "grp2", "grp2", "grp2"], index=[1, 2, 3, 4, 5, 6, 7, 8]) def test_shap_rfe_randomized_search(X, y, capsys): """ Test with RandomizedSearchCV. """ clf = DecisionTreeClassifier(max_depth=1) param_grid = {"criterion": ["gini"], "min_samples_split": [1, 2]} search = RandomizedSearchCV(clf, param_grid, cv=2, n_iter=2) with pytest.warns(None) as record: shap_elimination = ShapRFECV(search, step=0.8, cv=2, scoring="roc_auc", n_jobs=4, random_state=1) report = shap_elimination.fit_compute(X, y) assert shap_elimination.fitted shap_elimination._check_if_fitted() assert report.shape[0] == 2 assert shap_elimination.get_reduced_features_set(1) == ["col_3"] _ = shap_elimination.plot(show=False) # Ensure that number of warnings was at least 2 for the verbose (2 generated by probatus + possibly more by SHAP) assert len(record) >= 2 # Check if there is any prints out, _ = capsys.readouterr() assert len(out) == 0 def test_shap_rfe(X, y, sample_weight, capsys): """ Test with ShapRFECV. """ clf = DecisionTreeClassifier(max_depth=1, random_state=1) with pytest.warns(None) as record: shap_elimination = ShapRFECV( clf, random_state=1, step=1, cv=2, scoring="roc_auc", n_jobs=4, ) shap_elimination = shap_elimination.fit( X, y, sample_weight=sample_weight, approximate=True, check_additivity=False ) assert shap_elimination.fitted shap_elimination._check_if_fitted() report = shap_elimination.compute() assert report.shape[0] == 3 assert shap_elimination.get_reduced_features_set(1) == ["col_3"] _ = shap_elimination.plot(show=False) # Ensure that number of warnings was 0 assert len(record) == 0 # Check if there is any prints out, _ = capsys.readouterr() assert len(out) == 0 def test_shap_rfe_group_cv(X, y, groups, sample_weight, capsys): """ Test ShapRFECV with StratifiedGroupKFold. """ clf = DecisionTreeClassifier(max_depth=1, random_state=1) cv = StratifiedGroupKFold(n_splits=2, shuffle=True, random_state=1) with pytest.warns(None) as record: shap_elimination = ShapRFECV( clf, random_state=1, step=1, cv=cv, scoring="roc_auc", n_jobs=4, ) shap_elimination = shap_elimination.fit( X, y, groups=groups, sample_weight=sample_weight, approximate=True, check_additivity=False ) assert shap_elimination.fitted shap_elimination._check_if_fitted() report = shap_elimination.compute() assert report.shape[0] == 3 assert shap_elimination.get_reduced_features_set(1) == ["col_3"] _ = shap_elimination.plot(show=False) # Ensure that number of warnings was 0 assert len(record) == 0 # Check if there is any prints out, _ = capsys.readouterr() assert len(out) == 0 def test_shap_pipeline_error(X, y, capsys): """ Test with ShapRFECV for pipelines. """ clf = Pipeline( [ ("scaler", StandardScaler()), ("dt", DecisionTreeClassifier(max_depth=1, random_state=1)), ] ) with pytest.raises(TypeError): shap_elimination = ShapRFECV( clf, random_state=1, step=1, cv=2, scoring="roc_auc", n_jobs=4, ) shap_elimination = shap_elimination.fit(X, y, approximate=True, check_additivity=False) def test_shap_rfe_linear_model(X, y, capsys): """ Test ShapRFECV with linear model. """ clf = LogisticRegression(C=1, random_state=1) with pytest.warns(None) as record: shap_elimination = ShapRFECV(clf, random_state=1, step=1, cv=2, scoring="roc_auc", n_jobs=4) shap_elimination = shap_elimination.fit(X, y) assert shap_elimination.fitted shap_elimination._check_if_fitted() report = shap_elimination.compute() assert report.shape[0] == 3 assert shap_elimination.get_reduced_features_set(1) == ["col_3"] _ = shap_elimination.plot(show=False) # Ensure that number of warnings was 0 assert len(record) == 0 # Check if there is any prints out, _ = capsys.readouterr() assert len(out) == 0 def test_shap_rfe_svm(X, y, capsys): """ Test with ShapRFECV with SVM. """ clf = SVC(C=1, kernel="linear", probability=True) with pytest.warns(None) as record: shap_elimination = ShapRFECV(clf, random_state=1, step=1, cv=2, scoring="roc_auc", n_jobs=4) shap_elimination = shap_elimination.fit(X, y) assert shap_elimination.fitted shap_elimination._check_if_fitted() report = shap_elimination.compute() assert report.shape[0] == 3 assert shap_elimination.get_reduced_features_set(1) == ["col_3"] _ = shap_elimination.plot(show=False) # Ensure that number of warnings was 0 assert len(record) == 0 # Check if there is any prints out, _ = capsys.readouterr() assert len(out) == 0 def test_shap_rfe_cols_to_keep(X, y, capsys): """ Test for shap_rfe_cv with feautures to keep parameter. """ clf = DecisionTreeClassifier(max_depth=1, random_state=1) with pytest.warns(None) as record: shap_elimination = ShapRFECV( clf, random_state=1, step=2, cv=2, scoring="roc_auc", n_jobs=4, min_features_to_select=1, ) shap_elimination = shap_elimination.fit(X, y, columns_to_keep=["col_2", "col_3"]) assert shap_elimination.fitted shap_elimination._check_if_fitted() report = shap_elimination.compute() assert report.shape[0] == 2 reduced_feature_set = set(shap_elimination.get_reduced_features_set(num_features=2)) assert reduced_feature_set == set(["col_2", "col_3"]) # Ensure that number of warnings was 0 assert len(record) == 0 # Check if there is any prints out, _ = capsys.readouterr() assert len(out) == 0 def test_shap_rfe_randomized_search_cols_to_keep(X, y, capsys): """ Test with ShapRFECV with column to keep param. """ clf = DecisionTreeClassifier(max_depth=1) param_grid = {"criterion": ["gini"], "min_samples_split": [1, 2]} search = RandomizedSearchCV(clf, param_grid, cv=2, n_iter=2) with pytest.warns(None) as record: shap_elimination = ShapRFECV(search, step=0.8, cv=2, scoring="roc_auc", n_jobs=4, random_state=1) report = shap_elimination.fit_compute(X, y, columns_to_keep=["col_2", "col_3"]) assert shap_elimination.fitted shap_elimination._check_if_fitted() assert report.shape[0] == 2 reduced_feature_set = set(shap_elimination.get_reduced_features_set(num_features=2)) assert reduced_feature_set == set(["col_2", "col_3"]) _ = shap_elimination.plot(show=False) # Ensure that number of warnings was at least 2 for the verbose (2 generated by probatus + possibly more by SHAP) assert len(record) >= 2 # Check if there is any prints out, _ = capsys.readouterr() assert len(out) == 0 def test_calculate_number_of_features_to_remove(): """ Test with ShapRFECV with n features to remove. """ assert 3 == ShapRFECV._calculate_number_of_features_to_remove( current_num_of_features=10, num_features_to_remove=3, min_num_features_to_keep=5 ) assert 3 == ShapRFECV._calculate_number_of_features_to_remove( current_num_of_features=8, num_features_to_remove=5, min_num_features_to_keep=5 ) assert 0 == ShapRFECV._calculate_number_of_features_to_remove( current_num_of_features=5, num_features_to_remove=1, min_num_features_to_keep=5 ) assert 4 == ShapRFECV._calculate_number_of_features_to_remove( current_num_of_features=5, num_features_to_remove=7, min_num_features_to_keep=1 ) def test_get_feature_shap_values_per_fold(X, y): """ Test with ShapRFECV with features per fold. """ clf = DecisionTreeClassifier(max_depth=1) shap_elimination = ShapRFECV(clf) (shap_values, train_score, test_score,) = shap_elimination._get_feature_shap_values_per_fold( X, y, clf, train_index=[2, 3, 4, 5, 6, 7], val_index=[0, 1], scorer=get_scorer("roc_auc"), ) assert test_score == 1 assert train_score > 0.9 assert shap_values.shape == (2, 3) @pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled") def test_complex_dataset(complex_data, complex_lightgbm): """ Test on complex dataset. """ X, y = complex_data param_grid = { "n_estimators": [5, 7, 10], "num_leaves": [3, 5, 7, 10], } search = RandomizedSearchCV(complex_lightgbm, param_grid, n_iter=1) shap_elimination = ShapRFECV(clf=search, step=1, cv=10, scoring="roc_auc", n_jobs=3, verbose=50) with pytest.warns(None) as record: report = shap_elimination.fit_compute(X, y) assert report.shape[0] == X.shape[1] assert len(record) >= 2 @pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled") def test_shap_rfe_early_stopping_lightGBM(complex_data, capsys): """ Test EarlyStoppingShapRFECV with a LGBMClassifier. """ from lightgbm import LGBMClassifier clf = LGBMClassifier(n_estimators=200, max_depth=3) X, y = complex_data with pytest.warns(None) as record: shap_elimination = EarlyStoppingShapRFECV( clf, random_state=1, step=1, cv=10, scoring="roc_auc", n_jobs=4, early_stopping_rounds=5, eval_metric="auc", ) shap_elimination = shap_elimination.fit(X, y, approximate=False, check_additivity=False) assert shap_elimination.fitted shap_elimination._check_if_fitted() report = shap_elimination.compute() assert report.shape[0] == 5 assert shap_elimination.get_reduced_features_set(1) == ["f5"] _ = shap_elimination.plot(show=False) # Ensure that number of warnings was 0 assert len(record) == 0 # Check if there is any prints out, _ = capsys.readouterr() assert len(out) == 0 @pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled") def test_shap_rfe_early_stopping_XGBoost(complex_data, capsys): """ Test EarlyStoppingShapRFECV with a LGBMClassifier. """ from xgboost import XGBClassifier clf = XGBClassifier(n_estimators=200, max_depth=3, use_label_encoder=False, random_state=42) X, y = complex_data X["f1_categorical"] = X["f1_categorical"].astype(float) with pytest.warns(None) as record: shap_elimination = EarlyStoppingShapRFECV( clf, random_state=1, step=1, cv=10, scoring="roc_auc", n_jobs=4, early_stopping_rounds=5, eval_metric="auc", ) shap_elimination = shap_elimination.fit(X, y, approximate=False, check_additivity=False) assert shap_elimination.fitted shap_elimination._check_if_fitted() report = shap_elimination.compute() assert report.shape[0] == 5 assert shap_elimination.get_reduced_features_set(1) == ["f4"] _ = shap_elimination.plot(show=False) # Ensure that number of warnings was 0 assert len(record) == 0 # Check if there is any prints out, _ = capsys.readouterr() assert len(out) == 0 # For now this test fails, catboost has issues with categorical variables and @pytest.mark.xfail @pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled") def test_shap_rfe_early_stopping_CatBoost(complex_data, capsys, catboost_classifier_class): """ Test EarlyStoppingShapRFECV with a CatBoostClassifier. """ clf = catboost_classifier_class(random_seed=42) X, y = complex_data with pytest.warns(None) as record: shap_elimination = EarlyStoppingShapRFECV( clf, random_state=1, step=1, cv=10, scoring="roc_auc", n_jobs=4, early_stopping_rounds=5, eval_metric="auc", ) shap_elimination = shap_elimination.fit(X, y, approximate=False, check_additivity=False) assert shap_elimination.fitted shap_elimination._check_if_fitted() report = shap_elimination.compute() assert report.shape[0] == 5 assert shap_elimination.get_reduced_features_set(1)[0] in ["f4", "f5"] _ = shap_elimination.plot(show=False) # Ensure that number of warnings was 0 assert len(record) == 0 # Check if there is any prints out, _ = capsys.readouterr() assert len(out) == 0 @pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled") def test_shap_rfe_randomized_search_early_stopping_lightGBM(complex_data): """ Test EarlyStoppingShapRFECV with RandomizedSearchCV and a LGBMClassifier on complex dataset. """ from lightgbm import LGBMClassifier clf = LGBMClassifier(n_estimators=200) X, y = complex_data param_grid = { "max_depth": [3, 4, 5], } search = RandomizedSearchCV(clf, param_grid, cv=2, n_iter=2) with pytest.warns(None) as record: shap_elimination = EarlyStoppingShapRFECV( search, step=1, cv=10, scoring="roc_auc", early_stopping_rounds=5, eval_metric="auc", n_jobs=4, verbose=50, random_state=1, ) report = shap_elimination.fit_compute(X, y) assert shap_elimination.fitted shap_elimination._check_if_fitted() assert report.shape[0] == X.shape[1] assert shap_elimination.get_reduced_features_set(1) == ["f5"] _ = shap_elimination.plot(show=False) # Ensure that number of warnings was at least 3 for the verbose (2 generated by probatus + possibly more by SHAP) assert len(record) >= 3 @pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled") def test_get_feature_shap_values_per_fold_early_stopping_lightGBM(complex_data): """ Test with ShapRFECV with features per fold. """ from lightgbm import LGBMClassifier clf = LGBMClassifier(n_estimators=200, max_depth=3) X, y = complex_data y = preprocess_labels(y, y_name="y", index=X.index) shap_elimination = EarlyStoppingShapRFECV(clf, early_stopping_rounds=5) (shap_values, train_score, test_score,) = shap_elimination._get_feature_shap_values_per_fold( X, y, clf, train_index=list(range(5, 50)), val_index=[0, 1, 2, 3, 4], scorer=get_scorer("roc_auc"), ) assert test_score > 0.6 assert train_score > 0.6 assert shap_values.shape == (5, 5) @pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled") def test_get_feature_shap_values_per_fold_early_stopping_CatBoost(complex_data, catboost_classifier_class): """ Test with ShapRFECV with features per fold. """ clf = catboost_classifier_class(random_seed=42) X, y = complex_data X["f1_categorical"] = X["f1_categorical"].astype(str).astype("category") y = preprocess_labels(y, y_name="y", index=X.index) shap_elimination = EarlyStoppingShapRFECV(clf, early_stopping_rounds=5) (shap_values, train_score, test_score,) = shap_elimination._get_feature_shap_values_per_fold( X, y, clf, train_index=list(range(5, 50)), val_index=[0, 1, 2, 3, 4], scorer=get_scorer("roc_auc"), ) assert test_score > 0 assert train_score > 0.6 assert shap_values.shape == (5, 5) @pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled") def test_get_feature_shap_values_per_fold_early_stopping_XGBoost(complex_data): """ Test with ShapRFECV with features per fold. """ from xgboost import XGBClassifier clf = XGBClassifier(n_estimators=200, max_depth=3, use_label_encoder=False, random_state=42) X, y = complex_data X["f1_categorical"] = X["f1_categorical"].astype(float) y = preprocess_labels(y, y_name="y", index=X.index) shap_elimination = EarlyStoppingShapRFECV(clf, early_stopping_rounds=5) (shap_values, train_score, test_score,) = shap_elimination._get_feature_shap_values_per_fold( X, y, clf, train_index=list(range(5, 50)), val_index=[0, 1, 2, 3, 4], scorer=get_scorer("roc_auc"), ) assert test_score > 0 assert train_score > 0.6 assert shap_values.shape == (5, 5) @pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled") def test_EarlyStoppingShapRFECV_no_categorical(complex_data): """Test EarlyStoppingShapRFECV when no categorical features are present.""" from lightgbm import LGBMClassifier model = LGBMClassifier(n_estimators=50, max_depth=3, num_leaves=3) shap_elimination = EarlyStoppingShapRFECV( clf=model, step=0.33, cv=5, scoring="accuracy", eval_metric="logloss", early_stopping_rounds=5, ) X, y = complex_data X = X.drop(columns=["f1_categorical"]) report = shap_elimination.fit_compute(X, y, feature_perturbation="tree_path_dependent") assert shap_elimination.fitted shap_elimination._check_if_fitted() assert report.shape[0] == X.shape[1] assert shap_elimination.get_reduced_features_set(1) == ["f5"] _ = shap_elimination.plot(show=False) @pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled") def test_LightGBM_stratified_kfold(): """ Test added to check for https://github.com/ing-bank/probatus/issues/170. """ from lightgbm import LGBMClassifier X = pd.DataFrame( [ [1, 2, 3, 4, 5, 101, 102, 103, 104, 105], [-1, -2, 2, -5, -7, 1, 2, 5, -1, 3], ["a", "b"] * 5, # noisy categorical will dropped first ] ).transpose() X[2] = X[2].astype("category") X[1] = X[1].astype("float") X[0] = X[0].astype("float") y = [0] * 5 + [1] * 5 model = LGBMClassifier() n_iter = 2 n_folds = 3 for _ in range(n_iter): skf = StratifiedKFold(n_folds, shuffle=True, random_state=42) shap_elimination = EarlyStoppingShapRFECV( clf=model, step=1 / (n_iter + 1), cv=skf, scoring="accuracy", eval_metric="logloss", early_stopping_rounds=5, ) report = shap_elimination.fit_compute(X, y, feature_perturbation="tree_path_dependent") assert shap_elimination.fitted shap_elimination._check_if_fitted() assert report.shape[0] == X.shape[1] shap_elimination.plot(show=False)
python
# -*- coding: utf8 -*- from base import Stock class Uzmanpara(Stock): stockURL = "http://uzmanpara.milliyet.com.tr/borsa/hisse-senetleri/{0}/" priceQuery = '.realTime > .price-arrow-down, .realTime > .price-arrow-up' volumeQuery = '.realTime table tr td' timezone = "Europe/Istanbul" @classmethod def extractVolume(cls, d): return d(cls.volumeQuery)[7].text[1:].replace(".", "")
python
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import sys from pkg_resources import load_entry_point from subprocess import check_call def main(): check_call([sys.executable, 'setup.py', 'build_ext', '--inplace']) if '--with-coverage' not in sys.argv: sys.argv.extend(('--with-coverage', '--cover-package=cg')) sys.exit( load_entry_point('nose', 'console_scripts', 'nosetests')() ) if __name__ == '__main__': main()
python
"""Tests for ht.events.manager module.""" # ============================================================================= # IMPORTS # ============================================================================= # Third Party import pytest # Houdini Toolbox import ht.events.manager from ht.events.event import HoudiniEvent from ht.events.group import HoudiniEventGroup from ht.events.item import HoudiniEventItem # ============================================================================= # FIXTURES # ============================================================================= @pytest.fixture def init_manager(mocker): """Fixture to initialize a manager.""" mocker.patch.object( ht.events.manager.HoudiniEventManager, "__init__", lambda x: None ) def _create(): return ht.events.manager.HoudiniEventManager() return _create # ============================================================================= # TESTS # ============================================================================= class Test_HoudiniEventManager: """Test ht.events.manager.HoudiniEventManager class.""" def test___init__(self): """Test object initialization.""" manager = ht.events.manager.HoudiniEventManager() assert manager._data == {} assert manager._events == {} assert manager._event_states == {} # Properties def test_data(self, init_manager, mocker): """Test the 'data' property""" mock_value = mocker.MagicMock(spec=dict) manager = init_manager() manager._data = mock_value assert manager.data == mock_value def test_events(self, init_manager, mocker): """Test the 'events' property""" mock_event = mocker.MagicMock(spec=HoudiniEvent) events = {mocker.MagicMock(spec=str): mock_event} manager = init_manager() manager._events = events assert manager.events == events # Methods # _disable_events def test__disable_events__all(self, init_manager, mocker): """Test disabling all events.""" mock_events = mocker.patch.object( ht.events.manager.HoudiniEventManager, "events", new_callable=mocker.PropertyMock, ) mock_event1 = mocker.MagicMock(spec=HoudiniEvent) mock_enabled1 = mocker.PropertyMock(return_value=False) type(mock_event1).enabled = mock_enabled1 mock_event2 = mocker.MagicMock(spec=HoudiniEvent) mock_enabled2 = mocker.PropertyMock(return_value=True) type(mock_event2).enabled = mock_enabled2 mock_events.return_value = { mock_event1.name: mock_event1, mock_event2.name: mock_event2, } manager = init_manager() manager._event_states = {} manager._disable_events() # Each event should have it's enabled property accessed twice: # once to store the current value and then to set the value to False mock_enabled1.assert_has_calls([mocker.call(), mocker.call(False)]) mock_enabled2.assert_has_calls([mocker.call(), mocker.call(False)]) assert not manager._event_states[mock_event1.name] assert manager._event_states[mock_event2.name] def test__disable_events__specific_names(self, init_manager, mocker): """Test disabling specific events.""" mock_events = mocker.patch.object( ht.events.manager.HoudiniEventManager, "events", new_callable=mocker.PropertyMock, ) mock_event1 = mocker.MagicMock(spec=HoudiniEvent) mock_enabled1 = mocker.PropertyMock(return_value=True) type(mock_event1).enabled = mock_enabled1 mock_event2 = mocker.MagicMock(spec=HoudiniEvent) mock_enabled2 = mocker.PropertyMock(return_value=True) type(mock_event2).enabled = mock_enabled2 mock_events.return_value = { mock_event1.name: mock_event1, mock_event2.name: mock_event2, } manager = init_manager() manager._event_states = {} manager._disable_events(names=[mock_event2.name]) # Event 1's enabled property should not have been accessed. mock_enabled1.assert_not_called() # Event 2's should have been accessed to get the current value # and once to disable it. mock_enabled2.assert_has_calls([mocker.call(), mocker.call(False)]) assert manager._event_states[mock_event2.name] assert len(manager._event_states) == 1 def test__restore_events(self, init_manager, mocker): """Test restoring disabled events.""" mock_events = mocker.patch.object( ht.events.manager.HoudiniEventManager, "events", new_callable=mocker.PropertyMock, ) mock_event1 = mocker.MagicMock(spec=HoudiniEvent) mock_enabled1 = mocker.PropertyMock(return_value=False) type(mock_event1).enabled = mock_enabled1 mock_event2 = mocker.MagicMock(spec=HoudiniEvent) mock_enabled2 = mocker.PropertyMock(return_value=False) type(mock_event2).enabled = mock_enabled2 mock_events.return_value = { mock_event1.name: mock_event1, mock_event2.name: mock_event2, } mock_states = mocker.MagicMock(spec=dict) states = {mock_event1.name: False, mock_event2.name: True} mock_states.items.return_value = list(states.items()) manager = init_manager() manager._event_states = mock_states manager._restore_events() # Event 1's enable should have been set to False, 2's True mock_enabled1.assert_has_calls([mocker.call(False)]) mock_enabled2.assert_has_calls([mocker.call(True)]) mock_states.clear.assert_called_once() def test_create_event(self, init_manager, mocker): """Test creating an event.""" mock_events = mocker.patch.object( ht.events.manager.HoudiniEventManager, "events", new_callable=mocker.PropertyMock, ) mock_factory = mocker.patch("ht.events.manager.HoudiniEventFactory") mock_event = mocker.MagicMock(spec=HoudiniEvent) mock_factory.get_event_type.return_value = mock_event events = {} mock_events.return_value = events manager = init_manager() mock_name = mocker.MagicMock(spec=str) result = manager.create_event(mock_name) assert result == mock_event assert mock_event in list(events.values()) mock_factory.get_event_type.assert_called_with(mock_name) def test_event_disabler(self, init_manager, mocker): """Test the event_disabler context manager.""" mock_disable = mocker.patch.object( ht.events.manager.HoudiniEventManager, "_disable_events" ) mock_restore = mocker.patch.object( ht.events.manager.HoudiniEventManager, "_restore_events" ) manager = init_manager() mock_names = mocker.MagicMock(spec=tuple) with manager.event_disabler(names=mock_names): pass mock_disable.assert_called_with(mock_names) mock_restore.assert_called_once() # register_event_group def test_register_event_group__invalid_type(self, init_manager, mocker): """Test registering an event group with an invalid object type.""" # Don't spec so it will fail isinstance(EventGroup) mock_group = mocker.MagicMock() manager = init_manager() with pytest.raises(TypeError): manager.register_event_group(mock_group) def test_register_event_group__single_items(self, init_manager, mocker): """Test registering a group where no event of that name has been created.""" mock_events = mocker.patch.object( ht.events.manager.HoudiniEventManager, "events", new_callable=mocker.PropertyMock, ) mock_create = mocker.patch.object( ht.events.manager.HoudiniEventManager, "create_event" ) mock_item1 = mocker.MagicMock(spec=HoudiniEventItem) mock_item2 = mocker.MagicMock(spec=HoudiniEventItem) mock_event_name1 = mocker.MagicMock(spec=str) mock_event_name2 = mocker.MagicMock(spec=str) event_map = {mock_event_name1: mock_item1, mock_event_name2: mock_item2} mock_group = mocker.MagicMock(spec=HoudiniEventGroup) type(mock_group).event_map = mocker.PropertyMock(return_value=event_map) mock_event1 = mocker.MagicMock(spec=HoudiniEvent) mock_event2 = mocker.MagicMock(spec=HoudiniEvent) events = {mock_event_name2: mock_event2} mock_events.return_value = events mock_create.side_effect = lambda name: events.setdefault(name, mock_event1) manager = init_manager() manager.register_event_group(mock_group) mock_create.assert_called_with(mock_event_name1) mock_event1.register_item.assert_called_with(mock_item1) mock_event2.register_item.assert_called_with(mock_item2) def test_register_event_group__item_lists(self, init_manager, mocker): """Test registering a group where no event of that name has been created.""" mock_events = mocker.patch.object( ht.events.manager.HoudiniEventManager, "events", new_callable=mocker.PropertyMock, ) mock_create = mocker.patch.object( ht.events.manager.HoudiniEventManager, "create_event" ) mock_item1 = mocker.MagicMock(spec=HoudiniEventItem) mock_item2 = mocker.MagicMock(spec=HoudiniEventItem) mock_event_name1 = mocker.MagicMock(spec=str) mock_event_name2 = mocker.MagicMock(spec=str) event_map = {mock_event_name1: [mock_item1], mock_event_name2: [mock_item2]} mock_group = mocker.MagicMock(spec=HoudiniEventGroup) type(mock_group).event_map = mocker.PropertyMock(return_value=event_map) event_name1 = mock_event_name1 mock_event1 = mocker.MagicMock(spec=HoudiniEvent) mock_event2 = mocker.MagicMock(spec=HoudiniEvent) events = {mock_event_name2: mock_event2} mock_events.return_value = events mock_create.side_effect = lambda name: events.setdefault(name, mock_event1) manager = init_manager() manager.register_event_group(mock_group) mock_create.assert_called_with(event_name1) mock_event1.register_item.assert_called_with(mock_item1) mock_event2.register_item.assert_called_with(mock_item2) # register_item def test_register_item__invalid_type(self, init_manager, mocker): """Test registering an invalid type.""" # Don't spec so it will fail isinstance(HoudiniEventItem) manager = init_manager() with pytest.raises(TypeError): manager.register_item(None, mocker.MagicMock(spec=str)) def test_register_item__new_event(self, init_manager, mocker): """Test registering an item whose event does not exist yet.""" mock_events = mocker.patch.object( ht.events.manager.HoudiniEventManager, "events", new_callable=mocker.PropertyMock, ) mock_create = mocker.patch.object( ht.events.manager.HoudiniEventManager, "create_event" ) mock_event_name = mocker.MagicMock(spec=str) mock_event = mocker.MagicMock(spec=HoudiniEvent) events = {} mock_events.return_value = events mock_create.side_effect = lambda name: events.setdefault(name, mock_event) mock_item = mocker.MagicMock(spec=HoudiniEventItem) manager = init_manager() manager.register_item(mock_item, mock_event_name) mock_create.assert_called_with(mock_event_name) mock_event.register_item.assert_called_with(mock_item) def test_register_item__existing_event(self, init_manager, mocker): """Test registering an item to an existing event.""" mock_events = mocker.patch.object( ht.events.manager.HoudiniEventManager, "events", new_callable=mocker.PropertyMock, ) mock_create = mocker.patch.object( ht.events.manager.HoudiniEventManager, "create_event" ) mock_event_name = mocker.MagicMock(spec=str) mock_event = mocker.MagicMock(spec=HoudiniEvent) mock_events.return_value = {mock_event_name: mock_event} mock_item = mocker.MagicMock(spec=HoudiniEventItem) manager = init_manager() manager.register_item(mock_item, mock_event_name) mock_create.assert_not_called() mock_event.register_item.assert_called_with(mock_item) # run_event def test_run_event__no_event(self, init_manager, mocker): """Test running an event where there are no matching events.""" mock_events = mocker.patch.object( ht.events.manager.HoudiniEventManager, "events", new_callable=mocker.PropertyMock, ) mock_event_name = mocker.MagicMock(spec=str) mock_events.return_value = {} scriptargs = {} manager = init_manager() manager.run_event(mock_event_name, scriptargs) assert scriptargs == {} def test_run_event__no_scriptargs(self, init_manager, mocker): """Test running an event with no particular args.""" mock_events = mocker.patch.object( ht.events.manager.HoudiniEventManager, "events", new_callable=mocker.PropertyMock, ) mock_event_name = mocker.MagicMock(spec=str) mock_event = mocker.MagicMock(spec=HoudiniEvent) mock_events.return_value = {mock_event_name: mock_event} manager = init_manager() manager.run_event(mock_event_name) scriptargs = {"_manager_": manager} mock_event.run.assert_called_with(scriptargs) def test_run_event__scriptargs(self, init_manager, mocker): """Test running an event while passing in args.""" mock_events = mocker.patch.object( ht.events.manager.HoudiniEventManager, "events", new_callable=mocker.PropertyMock, ) mock_event_name = mocker.MagicMock(spec=str) mock_event = mocker.MagicMock(spec=HoudiniEvent) mock_events.return_value = {mock_event_name: mock_event} manager = init_manager() scriptargs = {"key": "value"} manager.run_event(mock_event_name, scriptargs) expected_scriptargs = {"key": "value", "_manager_": manager} mock_event.run.assert_called_with(expected_scriptargs) assert scriptargs == expected_scriptargs def test_register_event_group(mocker): """Test ht.events.manager.register_event_group.""" mock_manager = mocker.patch("ht.events.manager.EVENT_MANAGER") mock_group = mocker.MagicMock(spec=HoudiniEventGroup) ht.events.manager.register_event_group(mock_group) mock_manager.register_event_group.assert_called_with(mock_group) class Test_register_function: """Test ht.events.manager.register_function.""" def test_not_callable(self, mocker): """Test registering a non-callable object.""" mock_event_name = mocker.MagicMock(spec=str) mock_item_name = mocker.MagicMock(spec=str) mock_priority = mocker.MagicMock(spec=int) mock_tags = mocker.MagicMock(spec=list) with pytest.raises(TypeError): ht.events.manager.register_function( None, mock_event_name, mock_item_name, mock_priority, mock_tags ) def test(self, mocker): """Test registering a callable object.""" mock_cls = mocker.patch("ht.events.manager.HoudiniEventItem", autospec=True) mock_register_item = mocker.patch("ht.events.manager.register_item") mock_func = mocker.MagicMock() mock_event_name = mocker.MagicMock(spec=str) mock_item_name = mocker.MagicMock(spec=str) mock_priority = mocker.MagicMock(spec=int) mock_tags = mocker.MagicMock(spec=list) ht.events.manager.register_function( mock_func, mock_event_name, mock_item_name, mock_priority, mock_tags ) mock_cls.assert_called_with( (mock_func,), mock_item_name, mock_priority, stat_tags=mock_tags ) mock_register_item.assert_called_with(mock_cls.return_value, mock_event_name) class Test_register_item: """Test ht.events.manager.register_item.""" def test_not_item(self, mocker): """Test registering an invalid type.""" mock_event_name = mocker.MagicMock(spec=str) with pytest.raises(TypeError): ht.events.manager.register_item(None, mock_event_name) def test(self, mocker): """Test registering a valid item.""" mock_manager = mocker.patch("ht.events.manager.EVENT_MANAGER") mock_event_name = mocker.MagicMock(spec=str) mock_item = mocker.MagicMock(spec=HoudiniEventItem) ht.events.manager.register_item(mock_item, mock_event_name) mock_manager.register_item.assert_called_with(mock_item, mock_event_name) def test_run_event(mocker): """Test ht.events.manager.run_event.""" mock_manager = mocker.patch("ht.events.manager.EVENT_MANAGER") mock_event_name = mocker.MagicMock(spec=str) mock_scriptargs = mocker.MagicMock(spec=dict) ht.events.manager.run_event(mock_event_name, mock_scriptargs) mock_manager.run_event.assert_called_with(mock_event_name, mock_scriptargs)
python
from .. cupy_utils import to_numpy, trapz, xp from ..utils import powerlaw import numpy as np from astropy.cosmology import Planck15 class PowerLawRedshift(object): """ Redshift model from Fishbach+ https://arxiv.org/abs/1805.10270 Note that this is deliberately off by a factor of dVc/dz """ def __init__(self): self.zs_ = np.linspace(1e-3, 1, 1000) self.zs = xp.asarray(self.zs_) self.dvc_dz_ = ( Planck15.differential_comoving_volume(self.zs_).value * 4 * np.pi) self.dvc_dz = xp.asarray(self.dvc_dz_) self.cached_dvc_dz = None def __call__(self, dataset, lamb): p_z = powerlaw(1 + dataset['redshift'], alpha=(lamb - 1), high=(1 + self.zs_[-1]), low=1) try: p_z *= self.cached_dvc_dz except (TypeError, ValueError): self._cache_dvc_dz(dataset['redshift']) p_z *= self.cached_dvc_dz p_z /= self.normalisation(lamb) return p_z def normalisation(self, lamb): p_z_ = powerlaw(1 + self.zs, alpha=(lamb - 1), high=(1 + self.zs_[-1]), low=1) norm = trapz(p_z_ * self.dvc_dz, self.zs) return norm def _cache_dvc_dz(self, redshifts): self.cached_dvc_dz = xp.asarray(np.interp( to_numpy(redshifts), self.zs_, self.dvc_dz_)) power_law_redshift = PowerLawRedshift()
python
from flask import Flask from flask_bootstrap import Bootstrap app = Flask(__name__) Bootstrap(app) with app.app_context(): import routes import stats if __name__ == "__main__": app.config['DEBUG'] = True app.run()
python
from receptor_affinity.mesh import Mesh from wait_for import TimedOutError import time import pytest @pytest.yield_fixture( scope="function", params=[ "test/perf/flat-mesh.yaml", "test/perf/tree-mesh.yaml", "test/perf/random-mesh.yaml", ], ids=["flat", "tree", "random"], ) def mesh(request): mesh = Mesh.load_mesh_from_file(request.param, use_diag_node=True) try: mesh.start(wait=True) yield mesh except TimedOutError: raise finally: print(f"{time.time()} - Stopping current mesh") print(mesh.nodes['controller']) mesh.stop() def test_pings_perf(mesh): results = mesh.ping() mesh.validate_ping_results(results)
python
# Copyright 2021 Gakuto Furuya # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sudachipy import tokenizer from sudachipy import dictionary def main(): tokenizer_obj = dictionary.Dictionary().create() mode = tokenizer.Tokenizer.SplitMode.C while True: sentence = input() tokens = tokenizer_obj.tokenize(sentence, mode) pekofied_sentence = '' noun_flag = False final_form_flag = False for t in tokens: if noun_flag: if t.part_of_speech()[1] == '句点': pekofied_sentence += 'ぺこ' + t.surface() elif t.part_of_speech()[1] == '終助詞': pekofied_sentence += 'ぺこ' + t.surface() elif t.part_of_speech()[0] == '助動詞' and t.part_of_speech()[5] == '終止形-一般': pekofied_sentence += 'ぺこ' + t.surface() else: pekofied_sentence += t.surface() noun_flag = False elif final_form_flag: if t.part_of_speech()[0] == '助動詞': pekofied_sentence += t.surface() elif t.part_of_speech()[1] == '終助詞': if t.dictionary_form() == 'じゃん': pekofied_sentence += 'ぺこ' + t.surface() else: pekofied_sentence += t.surface() elif t.part_of_speech()[1] == '接続助詞': if t.dictionary_form() == 'と' or t.dictionary_form() == 'けれど': pekofied_sentence += t.surface() else: pekofied_sentence += 'ぺこだ' + t.surface() else: pekofied_sentence += 'ぺこ' + t.surface() final_form_flag = False elif t.part_of_speech()[0] == '名詞': pekofied_sentence += t.surface() noun_flag = True elif t.part_of_speech()[5] == '終止形-一般': pekofied_sentence += t.surface() final_form_flag = True else: pekofied_sentence += t.surface() if noun_flag: pekofied_sentence += 'ぺこ' if final_form_flag: pekofied_sentence += 'ぺこ' print(pekofied_sentence) if __name__ == '__main__': main()
python