content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/env python
import os
import sys
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, path)
import django
def manage_16ormore():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
def manage_15orless():
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
execute_manager(settings)
if __name__ == "__main__":
if django.VERSION > (1, 6):
manage_16ormore()
else:
manage_15orless()
| python |
#Adding python objects to database
import sqlite3
from employee import Employee
#we are calling in the Employee class from the program which we made earlier, they must be in the same directory
conn=sqlite3.connect('sql.db')
c = conn.cursor()
#c.execute("""CREATE TABLE employees (
# first text,
# last text,
# pay integer
# )""")
emp_1 = Employee('John', 'Doe', 80000)
emp_2= Employee('Jane','Doe', 80000)
c.execute("INSERT INTO employees VALUES (?,?,?)", (emp_1.first,emp_1.last,emp_1.pay))
#here we are inserting the above instances into the database
c.execute("SELECT * FROM employees WHERE last=?", ('Grasshopper',))
#application of ? placeholder method
print(c.fetchall())
c.execute("SELECT * FROM employees WHERE last=:last", {'last':'Doe'})
#application of key placeholder method
print(c.fetchall())
conn.commit()
conn.close()
#executing this code will print the data which was inserted into the database
#check the output in the image file in this folder
| python |
from XTax import Tax
import io
import unittest
import unittest.mock
class Test_XTax(unittest.TestCase):
def test_TaxInitYear(self):
MyTax = Tax(2019,autoload=False)
self.assertEqual(MyTax.Year, 2019)
@unittest.mock.patch('sys.stdout', new_callable=io.StringIO)
def test_TaxInitLog(self,mock_stdout):
MyTax = Tax(2019,loglevel=1,autoload=False)
OutputList = mock_stdout.getvalue().split('\n')
self.assertEqual(len(OutputList), 4)
self.assertEqual(OutputList[0], "Beginning of Init")
self.assertEqual(OutputList[2], "End of Init")
if __name__ == '__main__':
unittest.main() | python |
import sys
try:
import threading
except ImportError:
import dummy_threading as threading
py32 = sys.version_info >= (3, 2)
py3k = sys.version_info >= (3, 0)
py2k = sys.version_info <= (3, 0)
if py3k:
string_types = str,
import itertools
itertools_filterfalse = itertools.filterfalse
if py32:
callable = callable
else:
def callable(fn):
return hasattr(fn, '__call__')
else:
string_types = basestring,
import itertools
itertools_filterfalse = itertools.ifilterfalse
callable = callable
| python |
import sys
import Heuristic
import RandomProblem
import SolveProblem
def main():
# auto random file if no input
if len(sys.argv) != 4:
RandomProblem.createRandomProblem('rand_in.txt', 8, 16)
pf = SolveProblem.ARA('rand_in.txt', 'rand_log.txt', 3,
Heuristic.EuclidDistance, 5)
pf.writeSolution('rand_out.txt')
else:
pf = SolveProblem.ARA(sys.argv[1], 'ARA_log.txt', 3,
Heuristic.EuclidDistance, int(sys.argv[3]))
pf.writeSolution(sys.argv[2])
if __name__ == '__main__':
main() | python |
"""Playbook Create"""
# standard library
import base64
import json
import logging
from typing import Any, Dict, Iterable, List, Optional, Union
# third-party
from pydantic import BaseModel
# first-party
from tcex.key_value_store import KeyValueApi, KeyValueRedis
from tcex.utils.utils import Utils
# get tcex logger
logger = logging.getLogger('tcex')
class PlaybookCreate:
"""Playbook Write ABC"""
def __init__(
self,
context: str,
key_value_store: Union[KeyValueApi, KeyValueRedis],
output_variables: list,
):
"""Initialize the class properties."""
self.context = context
self.key_value_store = key_value_store
self.output_variables = output_variables
# properties
self.log = logger
self.utils = Utils()
@staticmethod
def _check_iterable(value: str, validate: bool) -> None:
"""Raise an exception if value is not an Iterable.
Validation:
- not a dict (dicts are iterable)
- not a string (strings are iterable)
- is Iterable
"""
if validate is True and (isinstance(value, (dict, str)) or not isinstance(value, Iterable)):
raise RuntimeError('Invalid data provided for KeyValueArray.')
def _check_null(self, key: str, value: Any) -> bool:
"""Return True if key or value is null."""
invalid = False
if key is None:
self.log.warning('The provided key was None.')
invalid = True
if value is None:
self.log.warning(f'The provided value for key {key} was None.')
invalid = True
return invalid
def _check_requested(self, variable: str, when_requested: bool) -> None:
"""Return True if output variable was requested by downstream app."""
if when_requested is True and not self.is_requested(variable):
self.log.debug(f'Variable {variable} was NOT requested by downstream app.')
return False
return True
def _check_variable_type(self, variable: str, type_: str) -> bool:
"""Validate the correct type was passed to the method."""
if self.utils.get_playbook_variable_type(variable).lower() != type_.lower():
raise RuntimeError(
f'Invalid variable provided ({variable}), variable must be of type {type_}.'
)
@staticmethod
def _coerce_string_value(value: Union[bool, float, int, str]) -> str:
"""Return a string value from an bool or int."""
# coerce bool before int as python says a bool is an int
if isinstance(value, bool):
# coerce bool to str type
value = str(value).lower()
# coerce int to str type
if isinstance(value, (float, int)):
value = str(value)
return value
def _create_data(self, key: str, value: Any) -> None:
"""Write data to key value store."""
self.log.debug(f'writing variable {key.strip()}')
try:
return self.key_value_store.create(self.context, key.strip(), value)
except RuntimeError as e: # pragma: no cover
self.log.error(e)
return None
def _get_variable(self, key: str, variable_type: Optional[str] = None) -> str:
"""Return properly formatted variable.
A key can be provided as the variable key (e.g., app.output) or the
entire (e.g., #App:1234:app.output!String). The full variable is required
to create the record in the KV Store.
If a variable_type is provided an exact match will be found, however if no
variable type is known the first key match will be returned. Uniqueness of
keys is not guaranteed, but in more recent Apps it is the standard.
If no variable is found it means that the variable was not requested by the
any downstream Apps or could possible be formatted incorrectly.
"""
if not self.utils.is_playbook_variable(key):
# try to lookup the variable in the requested output variables.
for output_variable in self.output_variables:
variable_model = self.utils.get_playbook_variable_model(output_variable)
if variable_model.key == key and (
variable_type is None or variable_model.type == variable_type
):
# either an exact match, or first match
return output_variable
# not requested by downstream App or misconfigured
return None
# key was already a properly formatted variable
return key
@staticmethod
def _serialize_data(value: str) -> str:
"""Get the value from Redis if applicable."""
try:
return json.dumps(value)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Invalid data provided, failed to serialize value ({e}).')
@staticmethod
def _process_object_types(
value: Union[BaseModel, dict],
validate: Optional[bool] = True,
allow_none: Optional[bool] = False,
) -> Dict[str, Any]:
"""Process object types (e.g., KeyValue, TCEntity)."""
types = (BaseModel, dict)
if allow_none is True:
types = (BaseModel, dict, type(None))
if validate and not isinstance(value, types):
raise RuntimeError(f'Invalid type provided for object type ({type(value)}).')
if isinstance(value, BaseModel):
value = value.dict(exclude_unset=True)
return value
@staticmethod
def is_key_value(data: dict) -> bool:
"""Return True if provided data has proper structure for Key Value."""
if not isinstance(data, dict):
return False
return all(x in data for x in ['key', 'value'])
def is_requested(self, variable: str) -> bool:
"""Return True if provided variable was requested by downstream App."""
return variable in self.output_variables
@staticmethod
def is_tc_entity(data: dict) -> bool:
"""Return True if provided data has proper structure for TC Entity."""
if not isinstance(data, dict):
return False
return all(x in data for x in ['id', 'value', 'type'])
def any(
self,
key: str,
value: Union[
'BaseModel', bytes, dict, str, List['BaseModel'], List[bytes], List[dict], List[str]
],
validate: Optional[bool] = True,
variable_type: Optional[str] = None,
when_requested: Optional[bool] = True,
) -> Optional[Union[bytes, dict, list, str]]:
"""Write the value to the keystore for all types.
This is a quick helper method, for more advanced features
the individual write methods should be used (e.g., binary).
Args:
key: The variable to write to the DB (e.g., app.colors).
value: The data to write to the DB.
variable_type: The variable type being written. Only required if not unique.
Returns:
(str): Result string of DB write.
"""
if self._check_null(key, value) is True:
return None
# convert key to variable if required
variable = self._get_variable(key, variable_type)
if self._check_requested(variable, when_requested) is False:
return None
# get the type from the variable
variable_type = self.utils.get_playbook_variable_type(variable).lower()
# map type to create method
variable_type_map = {
'binary': self.binary,
'binaryarray': self.binary_array,
'keyvalue': self.key_value,
'keyvaluearray': self.key_value_array,
'string': self.string,
'stringarray': self.string_array,
'tcentity': self.tc_entity,
'tcentityarray': self.tc_entity_array,
# 'tcenhancedentity': self.tc_enhanced_entity_array,
}
return variable_type_map.get(variable_type, self.raw)(
variable, value, validate, when_requested
)
def binary(
self,
key: str,
value: bytes,
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
) -> Optional[int]:
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# convert key to variable if required
variable = self._get_variable(key, 'Binary')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'Binary')
# basic validation of value
if validate and not isinstance(value, bytes):
raise RuntimeError('Invalid data provided for Binary.')
# prepare value - playbook Binary fields are base64 encoded
value = base64.b64encode(value).decode('utf-8')
value = self._serialize_data(value)
return self._create_data(variable, value)
def binary_array(
self,
key: str,
value: List[bytes],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
):
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# validate array type provided
self._check_iterable(value, validate)
# convert key to variable if required
variable = self._get_variable(key, 'BinaryArray')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'BinaryArray')
# basic validation and prep of value
value_encoded = []
for v in value:
if v is not None:
if validate and not isinstance(v, bytes):
raise RuntimeError('Invalid data provided for Binary.')
v = base64.b64encode(v).decode('utf-8')
value_encoded.append(v)
value = value_encoded
value = self._serialize_data(value)
return self._create_data(variable, value)
def key_value(
self,
key: str,
value: Union[BaseModel, dict],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
) -> Optional[int]:
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# convert key to variable if required
variable = self._get_variable(key, 'KeyValue')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'KeyValue')
# basic validation and prep of value
value = self._process_object_types(value, validate)
if validate and not self.is_key_value(value):
raise RuntimeError('Invalid data provided for KeyValueArray.')
value = self._serialize_data(value)
return self._create_data(variable, value)
def key_value_array(
self,
key: str,
value: List[Union[BaseModel, dict]],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
):
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# validate array type provided
self._check_iterable(value, validate)
# convert key to variable if required
variable = self._get_variable(key, 'KeyValueArray')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'KeyValueArray')
# basic validation and prep of value
_value = []
for v in value:
v = self._process_object_types(v, validate, allow_none=True)
if validate and not self.is_key_value(v):
raise RuntimeError('Invalid data provided for KeyValueArray.')
_value.append(v)
value = _value
value = self._serialize_data(value)
return self._create_data(variable, value)
def string(
self,
key: str,
value: Union[bool, float, int, str],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
) -> Optional[int]:
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# convert key to variable if required
variable = self._get_variable(key, 'String')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'String')
# coerce string values
value = self._coerce_string_value(value)
# validation only needs to check str because value was coerced
if validate and not isinstance(value, str):
raise RuntimeError('Invalid data provided for String.')
value = self._serialize_data(value)
return self._create_data(variable, value)
def string_array(
self,
key: str,
value: List[Union[bool, float, int, str]],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
):
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# validate array type provided
self._check_iterable(value, validate)
# convert key to variable if required
variable = self._get_variable(key, 'StringArray')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'StringArray')
# basic validation and prep of value
value_coerced = []
for v in value:
# coerce string values
v = self._coerce_string_value(v)
# validation only needs to check str because value was coerced
if validate and not isinstance(v, (type(None), str)):
raise RuntimeError('Invalid data provided for StringArray.')
value_coerced.append(v)
value = value_coerced
value = self._serialize_data(value)
return self._create_data(variable, value)
# pylint: disable=unused-argument
def raw(
self,
key: str,
value: Union[bytes, str, int],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
) -> str:
"""Create method of CRUD operation for raw data.
Raw data can only be a byte, str or int. Other data
structures (dict, list, etc) must be serialized.
"""
if self._check_null(key, value):
return None
return self._create_data(key, value)
def tc_entity(
self,
key: str,
value: Union[BaseModel, dict],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
) -> Optional[int]:
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# convert key to variable if required
variable = self._get_variable(key, 'TCEntity')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'TCEntity')
# basic validation
value = self._process_object_types(value, validate)
if validate and not self.is_tc_entity(value):
raise RuntimeError('Invalid data provided for TcEntityArray.')
value = self._serialize_data(value)
return self._create_data(variable, value)
def tc_entity_array(
self,
key: str,
value: List[Union[BaseModel, dict]],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
):
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# validate array type provided
self._check_iterable(value, validate)
# convert key to variable if required
variable = self._get_variable(key, 'TCEntityArray')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'TCEntityArray')
# basic validation and prep of value
_value = []
for v in value:
v = self._process_object_types(v, validate, allow_none=True)
if validate and not self.is_tc_entity(v):
raise RuntimeError('Invalid data provided for TcEntityArray.')
_value.append(v)
value = _value
value = self._serialize_data(value)
return self._create_data(variable, value)
def variable(
self,
key: str,
value: Union[
'BaseModel', bytes, dict, str, List['BaseModel'], List[bytes], List[dict], List[str]
],
variable_type: Optional[str] = None,
) -> str:
"""Alias for any method of CRUD operation for working with KeyValue DB.
This method will automatically check to see if provided variable was requested by
a downstream app and if so create the data in the KeyValue DB.
Args:
key: The variable to write to the DB (e.g., app.colors).
value: The data to write to the DB.
variable_type: The variable type being written. Only required if not unique.
Returns:
(str): Result string of DB write.
"""
if self._check_null(key, value) is True:
return None
# short-circuit the process, if there are no dowstream variables requested.
if not self.output_variables: # pragma: no cover
self.log.debug(f'Variable {key} was NOT requested by downstream app.')
return None
# key can be provided as the variable key (e.g., app.output) or
# the entire (e.g., #App:1234:app.output!String). we need the
# full variable to proceed.
variable = self._get_variable(key, variable_type)
if variable is None or variable not in self.output_variables:
self.log.debug(f'Variable {key} was NOT requested by downstream app.')
return None
# write the variable
return self.any(variable, value)
| python |
from moviepy.editor import *
clip = (VideoFileClip("../output_videos/project_video.mp4").subclip(10, 40).resize(0.3))
clip.write_gif("../output_videos/project_video.gif") | python |
# -*- coding: utf-8 -*-
"""
admin security exceptions module.
"""
from pyrin.core.exceptions import CoreException, CoreBusinessException
from pyrin.security.exceptions import AuthorizationFailedError
class AdminSecurityException(CoreException):
"""
admin security exception.
"""
pass
class AdminSecurityBusinessException(CoreBusinessException,
AdminSecurityException):
"""
admin security business exception.
"""
pass
class AdminAccessNotAllowedError(AuthorizationFailedError,
AdminSecurityBusinessException):
"""
admin access not allowed error.
"""
pass
| python |
# -*- coding: utf-8 -*-
#
# Copyright (c), 2018-2019, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
XPathToken and helper functions for XPath nodes. XPath error messages and node helper functions
are embedded in XPathToken class, in order to raise errors related to token instances.
In XPath there are 7 kinds of nodes:
element, attribute, text, namespace, processing-instruction, comment, document
Element-like objects are used for representing elements and comments, ElementTree-like objects
for documents. Generic tuples are used for representing attributes and named-tuples for namespaces.
"""
from __future__ import unicode_literals
import locale
import contextlib
from decimal import Decimal
from .compat import string_base_type, unicode_type
from .exceptions import xpath_error
from .namespaces import XQT_ERRORS_NAMESPACE
from .xpath_nodes import AttributeNode, TypedAttribute, TypedElement, \
is_etree_element, is_attribute_node, elem_iter_strings, is_text_node, \
is_namespace_node, is_comment_node, is_processing_instruction_node, \
is_element_node, is_document_node, is_xpath_node, is_schema_node
from .datatypes import UntypedAtomic, Timezone, DayTimeDuration, XSD_BUILTIN_TYPES
from .schema_proxy import AbstractSchemaProxy
from .tdop_parser import Token
from .xpath_context import XPathSchemaContext
def ordinal(n):
if n in {11, 12, 13}:
return '%dth' % n
least_significant_digit = n % 10
if least_significant_digit == 1:
return '%dst' % n
elif least_significant_digit == 2:
return '%dnd' % n
elif least_significant_digit == 3:
return '%drd' % n
else:
return '%dth' % n
class XPathToken(Token):
"""Base class for XPath tokens."""
comment = None # for XPath 2.0+ comments
xsd_type = None # fox XPath 2.0+ schema types labeling
def evaluate(self, context=None):
"""
Evaluate default method for XPath tokens.
:param context: The XPath dynamic context.
"""
return [x for x in self.select(context)]
def select(self, context=None):
"""
Select operator that generates XPath results.
:param context: The XPath dynamic context.
"""
item = self.evaluate(context)
if item is not None:
if isinstance(item, list):
for _item in item:
yield _item
else:
if context is not None:
context.item = item
yield item
def __str__(self):
symbol, label = self.symbol, self.label
if symbol == '$':
return '$%s variable reference' % (self[0].value if self else '')
elif symbol == ',':
return 'comma operator' if self.parser.version > '1.0' else 'comma symbol'
elif label == 'function':
return '%r function' % symbol
elif label == 'axis':
return '%r axis' % symbol
return super(XPathToken, self).__str__()
@property
def source(self):
symbol, label = self.symbol, self.label
if label == 'axis':
return '%s::%s' % (self.symbol, self[0].source)
elif label in ('function', 'constructor'):
return '%s(%s)' % (self.symbol, ', '.join(item.source for item in self))
elif symbol == ':':
return '%s:%s' % (self[0].source, self[1].source)
elif symbol == '(':
return '()' if not self else '(%s)' % self[0].source
elif symbol == '[':
return '%s[%s]' % (self[0].source, self[1].source)
elif symbol == ',':
return '%s, %s' % (self[0].source, self[1].source)
elif symbol == '$':
return '$%s' % self[0].source
elif symbol == '{':
return '{%s}%s' % (self[0].value, self[1].value)
elif symbol == 'instance':
return '%s instance of %s' % (self[0].source, ''.join(t.source for t in self[1:]))
elif symbol == 'treat':
return '%s treat as %s' % (self[0].source, ''.join(t.source for t in self[1:]))
return super(XPathToken, self).source
@property
def error_prefix(self):
for prefix, ns in self.parser.namespaces.items():
if ns == XQT_ERRORS_NAMESPACE:
return prefix
else:
return 'err'
###
# Helper methods
def get_argument(self, context, index=0, required=False, default_to_context=False,
default=None, cls=None):
"""
Get the argument value of a function of constructor token. A zero length sequence is
converted to a `None` value. If the function has no argument returns the context's
item if the dynamic context is not `None`.
:param context: the dynamic context.
:param index: an index for select the argument to be got, the first for default.
:param required: if set to `True` missing or empty sequence arguments are not allowed.
:param default_to_context: if set to `True` then the item of the dynamic context is \
returned when the argument is missing.
:param default: the default value returned in case the argument is an empty sequence. \
If not provided returns `None`.
:param cls: if a type is provided performs a type checking on item.
"""
try:
selector = self[index].select
except IndexError:
if default_to_context:
if context is None:
self.missing_context()
item = context.item if context.item is not None else context.root
elif required:
raise self.error('XPST0017', "Missing %s argument" % ordinal(index + 1))
else:
return
else:
item = None
for k, result in enumerate(selector(context)):
if k == 0:
item = result
elif not self.parser.compatibility_mode:
self.wrong_context_type("a sequence of more than one item is not allowed as argument")
else:
break
else:
if item is None:
if not required:
return default
ord_arg = ordinal(index + 1)
self.missing_sequence("A not empty sequence required for %s argument" % ord_arg)
# Type promotion checking (see "function conversion rules" in XPath 2.0 language definition)
if cls is not None and not isinstance(item, cls):
if self.parser.compatibility_mode:
if issubclass(cls, string_base_type):
return self.string_value(item)
elif issubclass(cls, float) or issubclass(float, cls):
return self.number_value(item)
if self.parser.version > '1.0':
value = self.data_value(item)
if isinstance(value, cls):
return value
elif isinstance(value, UntypedAtomic):
try:
if issubclass(cls, string_base_type):
return str(value)
else:
return cls(value)
except (TypeError, ValueError):
pass
code = 'XPTY0004' if self.label == 'function' else 'FORG0006'
message = "the %s argument %r is not an instance of %r"
raise self.error(code, message % (ordinal(index + 1), item, cls))
return item
def atomization(self, context=None):
"""
Helper method for value atomization of a sequence.
Ref: https://www.w3.org/TR/xpath20/#id-atomization
:param context: the XPath context.
"""
for item in self.select(context):
value = self.data_value(item)
if value is None:
raise self.error('FOTY0012', "argument node {!r} does not have a typed value".format(item))
else:
yield value
def get_atomized_operand(self, context=None):
"""
Get the atomized value for an XPath operator.
:param context: the XPath context.
:return: the atomized value of a single length sequence or `None` if the sequence is empty.
"""
selector = iter(self.atomization(context))
try:
value = next(selector)
except StopIteration:
return
else:
try:
next(selector)
except StopIteration:
if isinstance(value, UntypedAtomic):
value = str(value)
if isinstance(context, XPathSchemaContext):
return value
if self.xsd_type is not None and isinstance(value, string_base_type):
try:
value = self.xsd_type.decode(value)
except (TypeError, ValueError):
msg = "Type {!r} is not appropriate for the context"
self.wrong_context_type(msg.format(type(value)))
return value
else:
self.wrong_context_type("atomized operand is a sequence of length greater than one")
def get_comparison_data(self, context):
"""
Get comparison data couples for the general comparison of sequences. Different sequences
maybe generated with an XPath 2.0 parser, depending on compatibility mode setting.
Ref: https://www.w3.org/TR/xpath20/#id-general-comparisons
:param context: the XPath dynamic context.
:returns: a list of data couples.
"""
if context is None:
operand1 = [x for x in self[0].select()]
operand2 = [x for x in self[1].select()]
else:
operand1 = [x for x in self[0].select(context.copy())]
operand2 = [x for x in self[1].select(context.copy())]
if self.parser.compatibility_mode:
# Boolean comparison if one of the results is a single boolean value (1.)
try:
if isinstance(operand1[0], bool):
if len(operand1) == 1:
return [(operand1[0], self.boolean_value(operand2))]
if isinstance(operand2[0], bool):
if len(operand2) == 1:
return [(self.boolean_value(operand1), operand2[0])]
except IndexError:
return []
# Converts to float for lesser-greater operators (3.)
if self.symbol in ('<', '<=', '>', '>='):
return [
(float(self.data_value(value1)), float(self.data_value(value2)))
for value1 in operand1 for value2 in operand2
]
return [(self.data_value(value1), self.data_value(value2))
for value1 in operand1 for value2 in operand2]
def select_results(self, context):
"""
Generates formatted XPath results.
:param context: the XPath dynamic context.
"""
for result in self.select(context):
if isinstance(result, TypedElement):
yield result[0]
elif isinstance(result, AttributeNode):
yield result[1]
elif isinstance(result, TypedAttribute):
yield result[0][1] if hasattr(result[0][1], 'type') else result[1]
else:
yield result
def get_results(self, context):
"""
Returns formatted XPath results.
:param context: the XPath dynamic context.
:return: a list or a simple datatype when the result is a single simple type \
generated by a literal or function token.
"""
results = [x for x in self.select_results(context)]
if len(results) == 1:
res = results[0]
if isinstance(res, (bool, int, float, Decimal)):
return res
elif isinstance(res, tuple) or is_etree_element(res) or is_document_node(res):
return results
elif is_schema_node(res):
return results
elif self.symbol in ('text', 'node'):
return results
elif self.label in ('function', 'literal'):
return res
else:
return results
else:
return results
def get_operands(self, context, cls=None):
"""
Returns the operands for a binary operator. Float arguments are converted
to decimal if the other argument is a `Decimal` instance.
:param context: the XPath dynamic context.
:param cls: if a type is provided performs a type checking on item.
:return: a couple of values representing the operands. If any operand \
is not available returns a `(None, None)` couple.
"""
arg1 = self.get_argument(context, cls=cls)
if arg1 is None:
return None, None
arg2 = self.get_argument(context, index=1, cls=cls)
if arg2 is None:
return None, None
if isinstance(arg1, Decimal) and isinstance(arg2, float):
return arg1, Decimal(arg2)
elif isinstance(arg2, Decimal) and isinstance(arg1, float):
return Decimal(arg1), arg2
return arg1, arg2
def adjust_datetime(self, context, cls):
"""
XSD datetime adjust function helper.
:param context: the XPath dynamic context.
:param cls: the XSD datetime subclass to use.
:return: an empty list if there is only one argument that is the empty sequence \
or the adjusted XSD datetime instance.
"""
if len(self) == 1:
item = self.get_argument(context, cls=cls)
if item is None:
return []
timezone = getattr(context, 'timezone', None)
else:
item = self.get_argument(context=None, cls=cls) # don't use implicit timezone
timezone = self.get_argument(context, 1, cls=DayTimeDuration)
if timezone is not None:
timezone = Timezone.fromduration(timezone)
if item.tzinfo is not None and timezone is not None:
item += timezone.offset - item.tzinfo.offset
item.tzinfo = timezone
elif item.tzinfo is None:
if timezone is not None:
item.tzinfo = timezone
elif timezone is None:
item.tzinfo = None
return item
def match_xsd_type(self, schema_item, name):
"""
Match a token with a schema type, checking the matching between the provided schema
item and name. If there is a match and the token is already related with another
schema type an exception is raised.
:param schema_item: an XPath item related with a schema instance.
:param name: a QName in extended format for matching the item.
:returns: the matched XSD type or `None` if there isn't a match.
"""
if isinstance(schema_item, AttributeNode):
if not schema_item[1].is_matching(name):
return
try:
xsd_type = schema_item[1].type
except AttributeError:
try:
xsd_type = self.parser.schema.get_attribute(name).type
except AttributeError:
return
elif is_etree_element(schema_item):
if hasattr(schema_item, 'is_matching'):
if not schema_item.is_matching(name, self.parser.default_namespace):
return
elif schema_item.tag != name:
return
try:
xsd_type = schema_item.type
except AttributeError:
try:
xsd_type = self.parser.schema.get_element(name).type
except AttributeError:
return
else:
return
if self.xsd_type is None:
self.xsd_type = xsd_type
elif self.xsd_type is not xsd_type:
self.wrong_context_type("Multiple XSD type matching during static analysis")
return xsd_type
def get_typed_node(self, context, item):
"""
Returns a typed node if the token is bound to an XSD type.
:param context: the XPath dynamic context.
:param item: an untyped XPath attribute ot element.
"""
if isinstance(self.xsd_type, (type(None), AbstractSchemaProxy)):
return item
if isinstance(context, XPathSchemaContext):
primitive_type = self.parser.schema.get_primitive_type(self.xsd_type)
try:
value = XSD_BUILTIN_TYPES[primitive_type.local_name or 'anyType'].value
except KeyError:
value = XSD_BUILTIN_TYPES['anyType'].value
if isinstance(item, AttributeNode):
return TypedAttribute(item, value)
else:
return TypedElement(item, value)
else:
try:
if isinstance(item, AttributeNode):
return TypedAttribute(item, self.xsd_type.decode(item[1]))
elif self.xsd_type.is_simple() or self.xsd_type.has_simple_content():
return TypedElement(item, self.xsd_type.decode(item.text))
else:
return item
except (TypeError, ValueError):
msg = "Type {!r} does not match sequence type of {!r}"
self.wrong_sequence_type(msg.format(self.xsd_type, item))
@contextlib.contextmanager
def use_locale(self, collation):
"""A context manager for setting a specific collation for a code block."""
locale.setlocale(locale.LC_ALL, '')
default_locale = locale.getlocale()
try:
locale.setlocale(locale.LC_ALL, collation)
except locale.Error:
raise self.error('FOCH0002', 'Unsupported collation %r' % collation)
else:
yield
finally:
locale.setlocale(locale.LC_ALL, default_locale)
###
# XPath data accessors base functions
def data_value(self, obj):
"""
The typed value, as computed by fn:data() on each item. Returns an instance of
UntypedAtomic.
"""
if is_attribute_node(obj) or isinstance(obj, TypedElement):
obj = obj[1]
if obj is None:
return
elif not is_xpath_node(obj):
return obj
elif hasattr(obj, 'type'):
return self.schema_node_value(obj) # Schema context
return UntypedAtomic(self.string_value(obj))
def boolean_value(self, obj):
"""
The effective boolean value, as computed by fn:boolean().
"""
if isinstance(obj, list):
if not obj:
return False
elif isinstance(obj[0], tuple) or is_element_node(obj[0]):
return True
elif len(obj) == 1:
return bool(obj[0])
else:
raise self.error(
code='FORG0006',
message="Effective boolean value is not defined for a sequence of two or "
"more items not starting with an XPath node.",
)
elif isinstance(obj, tuple) or is_element_node(obj):
raise self.error('FORG0006', "Effective boolean value is not defined for {}.".format(obj))
return bool(obj)
def string_value(self, obj):
"""
The string value, as computed by fn:string().
"""
if obj is None:
return ''
elif is_element_node(obj):
return ''.join(elem_iter_strings(obj))
elif is_attribute_node(obj):
return unicode_type(obj[1])
elif is_text_node(obj):
return obj
elif is_document_node(obj):
return ''.join(e.text for e in obj.getroot().iter() if e.text is not None)
elif is_namespace_node(obj):
return obj[1]
elif is_comment_node(obj):
return obj.text
elif is_processing_instruction_node(obj):
return obj.text
elif is_schema_node(obj):
return str(self.schema_node_value(obj))
else:
return str(obj)
def number_value(self, obj):
"""
The numeric value, as computed by fn:number() on each item. Returns a float value.
"""
try:
return float(self.string_value(obj) if is_xpath_node(obj) else obj)
except (TypeError, ValueError):
return float('nan')
def schema_node_value(self, obj):
"""
Returns a sample typed value for the XSD schema node, valid in the value space
of the node. Used for schema-based dynamic evaluation of XPath expressions.
"""
try:
if obj.type.is_simple() or obj.type.has_simple_content():
# In case of schema element or attribute use a the sample value
# of the primitive type
primitive_type = self.parser.schema.get_primitive_type(obj.type)
return XSD_BUILTIN_TYPES[primitive_type.local_name].value
elif obj.type.local_name == 'anyType':
return XSD_BUILTIN_TYPES['anyType'].value
else:
return UntypedAtomic('')
except AttributeError:
raise self.wrong_type("the argument %r is not a node of an XSD schema" % obj)
###
# Error handling helpers
def error(self, code, message=None):
"""
Returns an XPath error instance related with a code. An XPath/XQuery/XSLT error code is an
alphanumeric token starting with four uppercase letters and ending with four digits.
:param code: the error code.
:param message: an optional custom additional message.
"""
return xpath_error(code, message, self, self.error_prefix)
# Shortcuts for XPath errors
def wrong_syntax(self, message=None):
if self.symbol == '::' and self.parser.token.symbol == '(name)':
self.missing_axis(message or "Axis '%s::' not found" % self.parser.token.value)
super(XPathToken, self).wrong_syntax(message)
def wrong_value(self, message=None):
raise self.error('FOCA0002', message)
def wrong_type(self, message=None):
raise self.error('FORG0006', message)
def missing_schema(self, message=None):
raise self.error('XPST0001', message)
def missing_context(self, message=None):
raise self.error('XPDY0002', message)
def wrong_context_type(self, message=None):
raise self.error('XPTY0004', message)
def missing_sequence(self, message=None):
raise self.error('XPST0005', message)
def missing_name(self, message=None):
raise self.error('XPST0008', message)
def missing_axis(self, message=None):
raise self.error('XPST0010', message)
def wrong_nargs(self, message=None):
raise self.error('XPST0017', message)
def wrong_step_result(self, message=None):
raise self.error('XPTY0018', message)
def wrong_intermediate_step_result(self, message=None):
raise self.error('XPTY0019', message)
def wrong_axis_argument(self, message=None):
raise self.error('XPTY0020', message)
def wrong_sequence_type(self, message=None):
raise self.error('XPDY0050', message)
def unknown_atomic_type(self, message=None):
raise self.error('XPST0051', message)
def wrong_target_type(self, message=None):
raise self.error('XPST0080', message)
def unknown_namespace(self, message=None):
raise self.error('XPST0081', message)
| python |
#! /usr/bin/env python3
# Conditions:
# * A child is playing with a ball on the nth floor of a tall building
# * The height of this floor, h, is known
# * He drops the ball out of the window. The ball bounces (for example),
# to two-thirds of its height (a bounce of 0.66).
# * His mother looks out of a window 1.5 meters from the ground.
# * How many times will the mother see the ball pass in front of her
# window (including when it's falling and bouncing?
#
# -- Three conditions must be met for a valid experiment:
# 1) Float parameter "h" in meters must be greater than 0
# 2) Float parameter "bounce" must be greater than 0 and less than 1
# 3) Float parameter "window" must be less than h.
# == If all three conditions above are fulfilled, return a positive
# integer, otherwise return -1.
#
# Note: The ball can only be seen if the height of the rebounding ball
# is strictly greater than the window parameter.
#
# Example:
# 1) h = 3, bounce = 0.66, window = 1.5, result is 3
# 2) h = 3, bounce = 1, window = 1.5, result is -1 (*)
# (*) Condition 2 not fulfilled.
#
def bouncing_ball(h, bounce, window):
if h <= 0 or not (0 < bounce < 1) or window >= h:
return -1
count = 0
while h > window:
count += 2 if count % 2 == 1 else 1
h *= bounce
return count
def test_bouncing_ball():
assert -1 == bouncing_ball(-1, 0, 0)
assert -1 == bouncing_ball(0, 0, 0)
assert -1 == bouncing_ball(1, -1, 0)
assert -1 == bouncing_ball(1, 0, 0)
assert -1 == bouncing_ball(1, 1, 0)
assert -1 == bouncing_ball(1, 2, 0)
assert -1 == bouncing_ball(1, .5, 1)
assert -1 == bouncing_ball(1, .5, 2)
assert 3 == bouncing_ball(3, 0.66, 1.5)
assert -1 == bouncing_ball(3, 1, 1.5)
if __name__ == '__main__':
test_bouncing_ball()
| python |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: lbrynet/schema/proto/source.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='lbrynet/schema/proto/source.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n!lbrynet/schema/proto/source.proto\"\xde\x01\n\x06Source\x12 \n\x07version\x18\x01 \x02(\x0e\x32\x0f.Source.Version\x12\'\n\nsourceType\x18\x02 \x02(\x0e\x32\x13.Source.SourceTypes\x12\x0e\n\x06source\x18\x03 \x02(\x0c\x12\x13\n\x0b\x63ontentType\x18\x04 \x02(\t\"*\n\x07Version\x12\x13\n\x0fUNKNOWN_VERSION\x10\x00\x12\n\n\x06_0_0_1\x10\x01\"8\n\x0bSourceTypes\x12\x17\n\x13UNKNOWN_SOURCE_TYPE\x10\x00\x12\x10\n\x0clbry_sd_hash\x10\x01')
)
_SOURCE_VERSION = _descriptor.EnumDescriptor(
name='Version',
full_name='Source.Version',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_VERSION', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='_0_0_1', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=160,
serialized_end=202,
)
_sym_db.RegisterEnumDescriptor(_SOURCE_VERSION)
_SOURCE_SOURCETYPES = _descriptor.EnumDescriptor(
name='SourceTypes',
full_name='Source.SourceTypes',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_SOURCE_TYPE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='lbry_sd_hash', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=204,
serialized_end=260,
)
_sym_db.RegisterEnumDescriptor(_SOURCE_SOURCETYPES)
_SOURCE = _descriptor.Descriptor(
name='Source',
full_name='Source',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='Source.version', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sourceType', full_name='Source.sourceType', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source', full_name='Source.source', index=2,
number=3, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contentType', full_name='Source.contentType', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SOURCE_VERSION,
_SOURCE_SOURCETYPES,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=38,
serialized_end=260,
)
_SOURCE.fields_by_name['version'].enum_type = _SOURCE_VERSION
_SOURCE.fields_by_name['sourceType'].enum_type = _SOURCE_SOURCETYPES
_SOURCE_VERSION.containing_type = _SOURCE
_SOURCE_SOURCETYPES.containing_type = _SOURCE
DESCRIPTOR.message_types_by_name['Source'] = _SOURCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Source = _reflection.GeneratedProtocolMessageType('Source', (_message.Message,), dict(
DESCRIPTOR = _SOURCE,
__module__ = 'lbrynet.schema.proto.source_pb2'
# @@protoc_insertion_point(class_scope:Source)
))
_sym_db.RegisterMessage(Source)
# @@protoc_insertion_point(module_scope)
| python |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 9 23:58:12 2021
@author: AKayal
"""
from collections import namedtuple
from typing import List, NamedTuple
import datetime
from datetime import date
class personal_details(NamedTuple):
"""
Using the typing module, we can be even more explicit about our data structures.
https://realpython.com/python-namedtuple/
"""
ssn: str
first_name: str
last_name: str
gender: str
language: str
| python |
from whirlwind.store import create_task
from delfick_project.norms import sb, dictobj, Meta
from tornado.web import RequestHandler, HTTPError
from tornado import websocket
import binascii
import logging
import asyncio
import json
import uuid
log = logging.getLogger("whirlwind.request_handlers.base")
class Finished(Exception):
def __init__(self, status=500, **kwargs):
self.kwargs = kwargs
self.kwargs["status"] = status
def as_dict(self):
return self.kwargs
def reprer(o):
if type(o) is bytes:
return binascii.hexlify(o).decode()
return repr(o)
class MessageFromExc:
def __init__(self, *, log_exceptions=True, see_exception=None):
self.see_exception = see_exception
self.log_exceptions = log_exceptions
def __call__(self, exc_type, exc, tb):
if isinstance(exc, Finished):
return exc.kwargs
else:
return self.process(exc_type, exc, tb)
def process(self, exc_type, exc, tb):
if self.see_exception:
self.see_exception(exc_type, exc, tb)
if exc_type is asyncio.CancelledError:
return {
"status": 500,
"error": "Request was cancelled",
"error_code": "RequestCancelled",
}
else:
if self.see_exception is None and self.log_exceptions:
log.error(exc, exc_info=(exc_type, exc, tb))
return {
"status": 500,
"error": "Internal Server Error",
"error_code": "InternalServerError",
}
class AsyncCatcher(object):
def __init__(self, request, info, final=None):
self.info = info
self.final = final
self.request = request
async def __aenter__(self):
pass
async def __aexit__(self, exc_type, exc, tb):
if exc is None:
self.complete(self.info.get("result"), status=200)
return
msg = self.request.message_from_exc(exc_type, exc, tb)
self.complete(msg, status=500, exc_info=(exc_type, exc, tb))
# And don't reraise the exception
return True
def send_msg(self, msg, status=200, exc_info=None):
if self.request._finished and not hasattr(self.request, "ws_connection"):
if type(msg) is dict:
msg = json.dumps(msg, default=self.request.reprer, sort_keys=True, indent=" ")
self.request.hook("request_already_finished", msg)
return
if hasattr(msg, "exc_info") and exc_info is None:
exc_info = msg.exc_info
if self.final is None:
self.request.send_msg(msg, status, exc_info=exc_info)
else:
self.final(msg, exc_info=exc_info)
def complete(self, msg, status=sb.NotSpecified, exc_info=None):
if type(msg) is dict:
result = json.loads(json.dumps(msg, default=self.request.reprer, indent=" "))
else:
result = msg
self.send_msg(result, status=status, exc_info=exc_info)
class RequestsMixin:
"""
A mixin class you may use for your handler which provides some handy methods
for dealing with data
"""
_merged_options_formattable = True
def hook(self, func, *args, **kwargs):
if hasattr(self, func):
return getattr(self, func)(*args, **kwargs)
# def process_reply(self, msg, exc_info=None):
# """A hook that provides the msg sent as reply or progress"""
# pass
# def request_already_finished(self, msg):
# """Hook for when we would send a message to an already closed websocket"""
@property
def reprer(self):
if not hasattr(self, "_reprer"):
self._reprer = reprer
return self._reprer
@reprer.setter
def reprer(self, value):
self._reprer = value
@property
def message_from_exc(self):
if not hasattr(self, "_message_from_exc"):
self._message_from_exc = MessageFromExc(
see_exception=self.see_returned_exception,
log_exceptions=getattr(self, "log_exceptions", True),
)
return self._message_from_exc
def see_returned_exception(self, exc_typ, exc, tb):
if getattr(self, "log_exceptions", True):
log.error(exc, exc_info=(exc_typ, exc, tb))
@message_from_exc.setter
def message_from_exc(self, value):
self._message_from_exc = value
def async_catcher(self, info, final=None):
return AsyncCatcher(self, info, final=final)
def body_as_json(self, body=None):
"""
Return the body of the request as a json object
If there is a special ``__body__`` file in the request, we will consider this
to be the body instead of the request body
"""
if body is None:
if "__body__" in self.request.files:
body = self.request.files["__body__"][0]["body"].decode()
else:
body = self.request.body.decode()
try:
if type(body) is str:
body = json.loads(body)
except (TypeError, ValueError) as error:
self.log_json_error(body, error)
raise Finished(status=400, reason="Failed to load body as json", error=error)
return body
def log_json_error(self, body, error):
"""
Do any logging for errors parsing the request body as json
"""
log.error("Failed to load body as json\t%s", body)
def send_msg(self, msg, status=sb.NotSpecified, exc_info=None):
"""
This determines what content-type and exact body to write to the response
If ``msg`` has ``as_dict``, we call it.
If ``msg`` is a dictionary and has status, we use that as the status of
the request, otherwise we say it's a 200.
If there is ``html`` in ``msg``, we use that as the body of the request.
If ``msg`` is None, we close without a body.
* If ``msg`` is a ``dict`` or ``list``, we write it as a json object.
* If ``msg`` starts with ``<html>`` or ``<!DOCTYPE html>`` we treat it
as html content
* Otherwise we write ``msg`` as ``text/plain``
"""
if hasattr(msg, "exc_info") and exc_info is None:
exc_info = msg.exc_info
if hasattr(msg, "as_dict"):
msg = msg.as_dict()
self.hook("process_reply", msg, exc_info=exc_info)
if type(msg) is dict and "status" in msg:
status = msg["status"]
elif exc_info and exc_info[1]:
if hasattr(exc_info[1], "status"):
status = exc_info[1].status
else:
status = 500
if status is sb.NotSpecified:
status = 200
self.set_status(status)
if type(msg) is dict and "html" in msg:
msg = msg["html"]
if msg is None:
self.finish()
return
if type(msg) in (dict, list):
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(json.dumps(msg, default=self.reprer, sort_keys=True, indent=" "))
elif msg.lstrip().startswith("<html>") or msg.lstrip().startswith("<!DOCTYPE html>"):
self.write(msg)
else:
self.set_header("Content-Type", "text/plain; charset=UTF-8")
self.write(msg)
self.finish()
class Simple(RequestsMixin, RequestHandler):
"""
Helper for using ``self.async_catcher`` from ``RequestsMixin`` for most HTTP verbs.
.. code-block:: python
class MyRequestHandler(Simple):
async def do_get():
return "<html><body><p>lol</p></body></html>"
Essentially you define ``async def do_<verb>(self)`` methods for each verb
you want to support.
This supports
* get
* put
* post
* patch
* delete
"""
log_exceptions = True
async def get(self, *args, **kwargs):
if not hasattr(self, "do_get"):
raise HTTPError(405)
info = {"result": None}
async with self.async_catcher(info):
info["result"] = await self.do_get(*args, **kwargs)
async def put(self, *args, **kwargs):
if not hasattr(self, "do_put"):
raise HTTPError(405)
info = {"result": None}
async with self.async_catcher(info):
info["result"] = await self.do_put(*args, **kwargs)
async def post(self, *args, **kwargs):
if not hasattr(self, "do_post"):
raise HTTPError(405)
info = {"result": None}
async with self.async_catcher(info):
info["result"] = await self.do_post(*args, **kwargs)
async def patch(self, *args, **kwargs):
if not hasattr(self, "do_patch"):
raise HTTPError(405)
info = {"result": None}
async with self.async_catcher(info):
info["result"] = await self.do_patch(*args, **kwargs)
async def delete(self, *args, **kwargs):
if not hasattr(self, "do_delete"):
raise HTTPError(405)
info = {"result": None}
async with self.async_catcher(info):
info["result"] = await self.do_delete(*args, **kwargs)
json_spec = sb.match_spec(
(bool, sb.any_spec()),
(int, sb.any_spec()),
(float, sb.any_spec()),
(str, sb.any_spec()),
(list, lambda: sb.listof(json_spec)),
(type(None), sb.any_spec()),
fallback=lambda: sb.dictof(sb.string_spec(), json_spec),
)
class SimpleWebSocketBase(RequestsMixin, websocket.WebSocketHandler):
"""
Used for websocket handlers
Implement ``process_message``
.. automethod:: whirlwind.request_handlers.base.SimpleWebSocketBase.process_message
This class takes in messages of the form ``{"path": <string>, "message_id": <string>, "body": <dictionary}``
It will respond with messages of the form ``{"reply": <reply>, "message_id": <message_id>}``
It treats path of ``__tick__`` as special and respond with ``{"reply": {"ok": "thankyou"}, "message_id": "__tick__"}``
It relies on the client side closing the connection when it's finished.
"""
log_exceptions = True
def initialize(self, final_future, server_time, wsconnections):
self.server_time = server_time
self.final_future = final_future
self.wsconnections = wsconnections
class WSMessage(dictobj.Spec):
path = dictobj.Field(sb.string_spec, wrapper=sb.required)
message_id = dictobj.Field(
sb.or_spec(sb.string_spec(), sb.tupleof(sb.string_spec())), wrapper=sb.required
)
body = dictobj.Field(json_spec, wrapper=sb.required)
message_spec = WSMessage.FieldSpec()
class Closing(object):
pass
def open(self):
self.key = str(uuid.uuid1())
self.connection_future = asyncio.Future()
if self.final_future.done():
self.connection_future.cancel()
return
canceller = lambda res: self.connection_future.cancel()
self.final_future.add_done_callback(canceller)
self.connection_future.add_done_callback(
lambda res: self.final_future.remove_done_callback(canceller)
)
if self.server_time is not None:
self.reply(self.server_time, message_id="__server_time__")
self.hook("websocket_opened")
def reply(self, msg, message_id=None, exc_info=None):
if msg is None:
msg = {"done": True}
# I bypass tornado converting the dictionary so that non jsonable things can be repr'd
if hasattr(msg, "as_dict"):
msg = msg.as_dict()
reply = {"reply": msg, "message_id": message_id}
reply = json.dumps(reply, default=self.reprer).replace("</", "<\\/")
if message_id not in ("__tick__", "__server_time__"):
self.hook("process_reply", msg, exc_info=exc_info)
if self.ws_connection:
self.write_message(reply)
def on_message(self, message):
self.hook("websocket_message", message)
try:
parsed = json.loads(message)
except (TypeError, ValueError) as error:
self.reply({"error": "Message wasn't valid json\t{0}".format(str(error))})
return
if type(parsed) is dict and "path" in parsed and parsed["path"] == "__tick__":
parsed["message_id"] = "__tick__"
parsed["body"] = "__tick__"
try:
msg = self.message_spec.normalise(Meta.empty(), parsed)
except Exception as error:
self.hook("websocket_invalid_message", error, parsed)
if hasattr(error, "as_dict"):
error = error.as_dict()
else:
error = str(error)
self.reply({"error_code": "InvalidMessage", "error": error})
else:
path = msg.path
body = msg.body
message_id = msg.message_id
message_key = str(uuid.uuid4())
if path == "__tick__":
self.reply({"ok": "thankyou"}, message_id=message_id)
return
def on_processed(final, exc_info=None):
if final is self.Closing:
self.reply({"closing": "goodbye"}, message_id=message_id)
self.close()
else:
self.reply(final, message_id=message_id, exc_info=exc_info)
try:
self.message_done(msg, final, message_key, exc_info=exc_info)
except Exception as error:
self.handle_message_done_error(error, msg, final, message_key, exc_info)
async def doit():
info = {}
def progress_cb(progress, **kwargs):
for m in self.transform_progress(msg, progress, **kwargs):
self.reply(m, message_id=message_id)
async with self.async_catcher(info, on_processed):
result = await self.process_message(
path, body, message_id, message_key, progress_cb
)
if isinstance(result, asyncio.Future) or hasattr(result, "__await__"):
result = await result
info["result"] = result
def done(res):
if message_key in self.wsconnections:
del self.wsconnections[message_key]
if not res.cancelled():
self.handle_request_done_exception(res.exception())
t = create_task(doit(), name=f"<process_command: {body}>")
t.add_done_callback(done)
self.wsconnections[message_key] = t
def message_done(self, request, final, message_key, exc_info=None):
"""
Hook for when we have finished processing a request
By default nothing is done.
request
The original request
final
The last response to be sent back.
message_key
The uuid the server generated for this request
exc_info
The (exc_type, exc, traceback) for any exception that stopped the processing of the request
"""
def handle_message_done_error(self, error, msg, final, message_key, exc_info):
"""
Hook for when message_done raised an exception
By default we ``log.exception(error)``
error
The exception that was raised
request
The original request
final
The last response to be sent back.
message_key
The uuid the server generated for this request
exc_info
The (exc_type, exc, traceback) for any exception that stopped the processing of the request
before message_done had been called
"""
log.exception(error)
def handle_request_done_exception(self, error):
"""Hook that takes in an exception from the entire request"""
if error and self.log_exceptions:
log.exception(error, exc_info=(type(error), error, error.__traceback__))
def transform_progress(self, body, progress, **kwargs):
"""
Hook for transforming progress messages. This must be a generator that yields 0 or more messages
So when the ``progress_cb`` is called like ``progress_cb("some message", arg=1)`` we will do:
.. code-block:: python
for m in self.transform_progress(<request>, "some message", arg=1):
# write ``{"reply": m, "message_id": <message_id>}``
where ``<request>`` is the entire message that started this stream.
By default kwargs are ignored and we just yield ``{"progress": progress}`` once
"""
yield {"progress": progress}
async def process_message(self, path, body, message_id, message_key, progress_cb):
"""
Return the response to be sent back when we get a message from the conn.
path
The uri specified in the message
body
The body specified in the message
message_id
The unique message_id for this stream of requests as supplied in the request
message_key
A unique id for this stream created by the server
progress_cb
A callback that will send a message of the form ``{"progress": <progress>, "message_id": <message_id}``
where ``<progress>`` is the argument passed into the callback
"""
raise NotImplementedError
def on_close(self):
"""Hook for when a websocket connection closes"""
self.connection_future.cancel()
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 22 14:33:38 2017
@author: paul
"""
from weatherTLKT import Weather
typ='ens'
for ss in range(1,9):
if typ=='solo':
mydate='20171127'
website='http://nomads.ncep.noaa.gov:9090/dods'
model='gfs'
resolution='0p25'
url=website+'/'+model+'_'+resolution+'/'+model+mydate+'/'+model+'_'+resolution+'_00z'
pathToSaveObj='../data/'+ model+mydate+'_'+resolution
else :
mydate='20171127'
website='http://nomads.ncep.noaa.gov:9090/dods'
model='gens'
resolution='0p25'
num_scenario='0'+str(ss)
url=website+'/'+model+'/'+model+mydate+'/'+'gep'+num_scenario+'_00z'
pathToSaveObj='../data/'+ model+mydate+'_'+num_scenario
latBound=[43,50]
lonBound=[-10+360, 360]
Weather.download(url,pathToSaveObj,latBound=latBound,lonBound=lonBound,timeSteps=[0,85],ens=True)
| python |
from django.views.generic import TemplateView, ListView, DetailView
from . import models
class DashboardView(TemplateView):
template_name = "organizations/dashboard.html"
class OrganizationDetailView(DetailView):
template_name = "organizations/organization_details.html"
model = models.Organization
class OrganizationListView(ListView):
template_name = "organizations/organization_list.html"
model = models.Organization
class OrganizationalUnitDetailView(DetailView):
template_name = "organizations/organizational_unit_details.html"
model = models.OrganizationalUnit
class OrganizationalUnitListView(ListView):
template_name = "organizations/organizational_unit_list.html"
model = models.OrganizationalUnit
| python |
import csv
import xlsxwriter
import datetime
# Sequence Analysis Data Object
# Holds all items needed for analysis
class SeqData:
its_dict = None
seq_config = None
num_threads = None
output_format = None
def __init__(self, its_dict, seq_config, num_threads, output_format):
self.num_threads = num_threads
self.its_dict = its_dict
self.seq_config = seq_config
self.output_format = output_format
# Sequence Analysis Run Object
# Put into queue; used in Perform()
class SeqRun:
p_id = None
path = None
def __init__(self, p_id, path):
self.p_id = p_id
self.path = path
# Output Object
# sent to output functions
class OutData:
batch_store = None
seq_config = None
results = None
def __init__(self, batch_store, seq_config, results):
self.batch_store = batch_store
self.seq_config = seq_config
self.results = results
# Output to CSV format
def output_csv(out_data):
"This method outputs the analysis results to a .csv file"
# output code
print("Output in .csv")
# create + write csv file
out_file = out_data.seq_config['outputDirPath'] +'//'+ "LC2-"+out_data.batch_store+"-"+out_data.seq_config['seqType']+"-"+str(out_data.seq_config['PauseDur']).replace('.','p')+"-"+str(out_data.seq_config['roundingEnabled'])+"-"+datetime.datetime.now().strftime('%m%d%y-%H%M')+".csv"
with open( out_file, 'wb') as f:#open csv file to be written in
csv_writer = csv.writer(f, delimiter = ',')
for line in out_data.results:#loop to write rows to csv file
line = line.split(',')
csv_writer.writerow(line)
# Output to TXT format
def ouput_txt(out_data):
"This method outputs the analysis results to a .txt file"
# output code
print("Output in .txt")
# create + write txt file
out_file = out_data.seq_config['outputDirPath'] +'//'+ "LC2-"+out_data.batch_store+"-"+out_data.seq_config['seqType']+"-"+str(out_data.seq_config['PauseDur']).replace('.','p')+"-"+str(out_data.seq_config['roundingEnabled'])+"-"+datetime.datetime.now().strftime('%m%d%y-%H%M')+".txt"
with open(out_file,'w') as f:
for line in out_data.results:
f.writelines(line+"\n")
# Output to Excel format
def output_xlsx(out_data):
"This method outputs the analysis results to a .xlsx file"
print("Output in .xlsx")
# create workbook & add sheet
out_file = out_data.seq_config['outputDirPath'] +'//'+ "LC2-"+out_data.batch_store+"-"+out_data.seq_config['seqType']+"-"+str(out_data.seq_config['PauseDur']).replace('.','p')+"-"+str(out_data.seq_config['roundingEnabled'])+"-"+datetime.datetime.now().strftime('%m%d%y-%H%M')+".xlsx"
workbook = xlsxwriter.Workbook(out_file)
worksheet = workbook.add_worksheet()
# start from first cell
row = 0
# insert into worksheet
for line in out_data.results:
col = 0
for cell in str(line).split(","):
worksheet.write(row, col, cell)
col += 1
row += 1
# close file
workbook.close() | python |
import pandas as pd
from strategy.astrategy import AStrategy
from processor.processor import Processor as p
from datetime import timedelta
import pytz
from tqdm import tqdm
from time import sleep
pd.options.mode.chained_assignment = None
class ProgressReport(AStrategy):
def __init__(self,start_date,end_date,modeling_params={},trading_params={"value":True,"requirement":5}):
super().__init__(f"progress_report",
start_date,
end_date,
{"market":{}},modeling_params=modeling_params,trading_params=trading_params)
self.exit_days = 45
self.last_call_day = 90
@classmethod
def required_params(self):
required = {"timeframe":"quarterly"
,"requirement":5}
return required
def create_sim(self):
if self.simmed:
self.db.connect()
sim = self.db.retrieve("sim")
self.db.disconnect()
else:
start_year = self.start_date.year
end_year = self.end_date.year
market = self.subscriptions["market"]["db"]
market.connect()
self.db.connect()
tickers = market.retrieve_tickers("prices")
sim = []
for ticker in tqdm(tickers["ticker"].unique(),desc=f"{self.name}_sim"):
prices = market.retrieve_ticker_prices("prices",ticker)
prices = p.column_date_processing(prices)
prices["year"] = [x.year for x in prices["date"]]
prices["quarter"] = [x.quarter for x in prices["date"]]
for year in range(start_year,end_year):
for quarter in range(1,5):
try:
ticker_data = prices[(prices["year"]==year) & (prices["quarter"]==quarter)].sort_values("date")
sp = ticker_data.iloc[0]["adjclose"].item()
ticker_data["quarter_start"] = sp
ticker_data["delta"] = (ticker_data["adjclose"] - sp) / sp
ticker_data = ticker_data[["date","adjclose","delta","ticker"]]
for param in self.modeling_params:
ticker_data[param]=self.modeling_params[param]
sim.append(ticker_data)
self.db.store("sim",ticker_data)
except Exception as e:
continue
sim = pd.concat(sim)
self.db.disconnect()
market.disconnect()
self.simmed = True
return sim
def create_rec(self,date):
self.db.connect()
rec = self.db.query("rec",self.modeling_params)
self.db.disconnect()
if rec.index.size > 1:
rec = p.column_date_processing(rec)
small_rec = rec[rec["date"]>=date]
if small_rec.index.size > 1:
return small_rec
else:
year = date.year
month = date.month
quarter = int((month-1)/3) + 1
market = self.subscriptions["market"]["db"]
market.connect()
self.db.connect()
tickers = market.retrieve_tickers("prices")
sim = []
for ticker in tqdm(tickers["ticker"].unique(),desc=f"{self.name}_sim"):
prices = market.retrieve_ticker_prices("prices",ticker)
prices = p.column_date_processing(prices)
prices["year"] = [x.year for x in prices["date"]]
prices["quarter"] = [x.quarter for x in prices["date"]]
try:
ticker_data = prices[(prices["year"]==year) & (prices["quarter"]==quarter)].sort_values("date")
sp = ticker_data.iloc[0]["adjclose"].item()
ticker_data["quarter_start"] = sp
ticker_data["delta"] = (ticker_data["adjclose"] - sp) / sp
ticker_data = ticker_data[["date","adjclose","delta","ticker"]]
for param in self.modeling_params:
ticker_data[param]=self.modeling_params[param]
sim.append(ticker_data.tail(1))
self.db.store("rec",ticker_data.tail(1))
except Exception as e:
continue
recs = pd.concat(sim)
self.db.disconnect()
market.disconnect()
return recs | python |
from functools import reduce
from operator import mul
import numpy as onp
from numpy.testing import assert_allclose
import pytest
import scipy.stats as osp_stats
import jax
from jax import grad, lax, random
import jax.numpy as np
from jax.scipy.special import logit
import numpyro.contrib.distributions as dist
from numpyro.contrib.distributions import jax_multivariate, validation_enabled
from numpyro.distributions import constraints
def idfn(param):
if isinstance(param, (osp_stats._distn_infrastructure.rv_generic,
osp_stats._multivariate.multi_rv_generic)):
return param.name
elif isinstance(param, constraints.Constraint):
return param.__class__.__name__
return repr(param)
@pytest.mark.parametrize('jax_dist', [
dist.beta,
dist.cauchy,
dist.expon,
dist.gamma,
dist.halfcauchy,
dist.halfnorm,
dist.lognorm,
dist.pareto,
dist.trunccauchy,
dist.truncnorm,
dist.norm,
dist.t,
dist.uniform,
], ids=idfn)
@pytest.mark.parametrize('loc, scale', [
(1, 1),
(1., np.array([1., 2.])),
])
@pytest.mark.parametrize('prepend_shape', [
None,
(),
(2,),
(2, 3),
])
def test_continuous_shape(jax_dist, loc, scale, prepend_shape):
rng = random.PRNGKey(0)
args = [i + 1 for i in range(jax_dist.numargs)]
expected_shape = lax.broadcast_shapes(*[np.shape(loc), np.shape(scale)])
samples = jax_dist.rvs(*args, loc=loc, scale=scale, random_state=rng)
assert isinstance(samples, jax.interpreters.xla.DeviceArray)
assert np.shape(samples) == expected_shape
assert np.shape(jax_dist(*args, loc=loc, scale=scale).rvs(random_state=rng)) == expected_shape
if prepend_shape is not None:
expected_shape = prepend_shape + lax.broadcast_shapes(*[np.shape(loc), np.shape(scale)])
assert np.shape(jax_dist.rvs(*args, loc=loc, scale=scale,
size=expected_shape, random_state=rng)) == expected_shape
assert np.shape(jax_dist(*args, loc=loc, scale=scale)
.rvs(random_state=rng, size=expected_shape)) == expected_shape
@pytest.mark.parametrize('jax_dist, dist_args, sample', [
(dist.beta, (-1, 1), -1),
(dist.beta, (2, np.array([1., -3])), np.array([1., -2])),
(dist.cauchy, (), np.inf),
(dist.cauchy, (), np.array([1., np.nan])),
(dist.expon, (), -1),
(dist.expon, (), np.array([1., -2])),
(dist.gamma, (-1,), -1),
(dist.gamma, (np.array([-2., 3]),), np.array([1., -2])),
(dist.halfcauchy, (), -1),
(dist.halfcauchy, (), np.array([1., -2])),
(dist.halfnorm, (), -1),
(dist.halfnorm, (), np.array([1., -2])),
(dist.lognorm, (-1,), -1),
(dist.lognorm, (np.array([-2., 3]),), np.array([1., -2])),
(dist.norm, (), np.inf),
(dist.norm, (), np.array([1., np.nan])),
(dist.pareto, (-1,), -1),
(dist.pareto, (np.array([-2., 3]),), np.array([1., -2])),
(dist.t, (-1,), np.inf),
(dist.t, (np.array([-2., 3]),), np.array([1., np.nan])),
(dist.trunccauchy, (), -1),
(dist.trunccauchy, (), np.array([1., -2])),
(dist.truncnorm, (), -1),
(dist.truncnorm, (), np.array([1., -2])),
(dist.uniform, (), -1),
(dist.uniform, (), np.array([0.5, -2])),
], ids=idfn)
def test_continuous_validate_args(jax_dist, dist_args, sample):
valid_args = [i + 1 for i in range(jax_dist.numargs)]
with validation_enabled():
if dist_args:
with pytest.raises(ValueError, match='Invalid parameters'):
jax_dist(*dist_args)
with pytest.raises(ValueError, match='Invalid scale parameter'):
jax_dist(*valid_args, scale=-1)
frozen_dist = jax_dist(*valid_args)
with pytest.raises(ValueError, match='Invalid values'):
frozen_dist.logpdf(sample)
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.categorical, (np.array([0.1, 0.9]),)),
(dist.categorical, (np.array([[0.1, 0.9], [0.2, 0.8]]),)),
(dist.dirichlet, (np.ones(3),)),
(dist.dirichlet, (np.ones((2, 3)),)),
(dist.multinomial, (10, np.array([0.1, 0.9]),)),
(dist.multinomial, (10, np.array([[0.1, 0.9], [0.2, 0.8]]),)),
], ids=idfn)
@pytest.mark.parametrize('prepend_shape', [
None,
(),
(2,),
(2, 3),
])
def test_multivariate_shape(jax_dist, dist_args, prepend_shape):
rng = random.PRNGKey(0)
expected_shape = jax_dist._batch_shape(*dist_args) + jax_dist._event_shape(*dist_args)
samples = jax_dist.rvs(*dist_args, random_state=rng)
assert isinstance(samples, jax.interpreters.xla.DeviceArray)
assert np.shape(samples) == expected_shape
assert np.shape(jax_dist(*dist_args).rvs(random_state=rng)) == expected_shape
if prepend_shape is not None:
size = prepend_shape + jax_dist._batch_shape(*dist_args)
expected_shape = size + jax_dist._event_shape(*dist_args)
samples = jax_dist.rvs(*dist_args, size=size, random_state=rng)
assert np.shape(samples) == expected_shape
samples = jax_dist(*dist_args).rvs(random_state=rng, size=size)
assert np.shape(samples) == expected_shape
@pytest.mark.parametrize('jax_dist, valid_args, invalid_args, invalid_sample', [
(dist.categorical, (np.array([0.1, 0.9]),), (np.array([0.1, 0.8]),), np.array([1, 4])),
(dist.dirichlet, (np.ones(3),), (np.array([-1., 2., 3.]),), np.array([0.1, 0.7, 0.1])),
(dist.multinomial, (10, np.array([0.1, 0.9]),), (10, np.array([0.2, 0.9]),), np.array([-1, 9])),
], ids=idfn)
def test_multivariate_validate_args(jax_dist, valid_args, invalid_args, invalid_sample):
with validation_enabled():
with pytest.raises(ValueError, match='Invalid parameters'):
jax_dist(*invalid_args)
frozen_dist = jax_dist(*valid_args)
with pytest.raises(ValueError, match='Invalid values'):
frozen_dist.logpmf(invalid_sample)
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.bernoulli, (0.1,)),
(dist.bernoulli, (np.array([0.3, 0.5]),)),
(dist.binom, (10, 0.4)),
(dist.binom, (np.array([10]), np.array([0.4, 0.3]))),
(dist.poisson, (1.,)),
(dist.poisson, (np.array([1., 4., 10.]),)),
], ids=idfn)
@pytest.mark.parametrize('prepend_shape', [
None,
(),
(2,),
(2, 3),
])
def test_discrete_shape(jax_dist, dist_args, prepend_shape):
rng = random.PRNGKey(0)
sp_dist = getattr(osp_stats, jax_dist.name)
expected_shape = np.shape(sp_dist.rvs(*dist_args))
samples = jax_dist.rvs(*dist_args, random_state=rng)
assert isinstance(samples, jax.interpreters.xla.DeviceArray)
assert np.shape(samples) == expected_shape
if prepend_shape is not None:
shape = prepend_shape + lax.broadcast_shapes(*[np.shape(arg) for arg in dist_args])
expected_shape = np.shape(sp_dist.rvs(*dist_args, size=shape))
assert np.shape(jax_dist.rvs(*dist_args, size=shape, random_state=rng)) == expected_shape
@pytest.mark.parametrize('jax_dist, valid_args, invalid_args, invalid_sample', [
(dist.bernoulli, (0.8,), (np.nan,), 2),
(dist.binom, (10, 0.8), (-10, 0.8), -10),
(dist.binom, (10, 0.8), (10, 1.1), -1),
(dist.poisson, (4.,), (-1.,), -1),
], ids=idfn)
def test_discrete_validate_args(jax_dist, valid_args, invalid_args, invalid_sample):
with validation_enabled():
with pytest.raises(ValueError, match='Invalid parameters'):
jax_dist(*invalid_args)
frozen_dist = jax_dist(*valid_args)
with pytest.raises(ValueError, match='Invalid values'):
frozen_dist.logpmf(invalid_sample)
@pytest.mark.parametrize('jax_dist', [
dist.beta,
dist.cauchy,
dist.expon,
dist.gamma,
dist.halfcauchy,
dist.halfnorm,
dist.lognorm,
dist.norm,
dist.pareto,
dist.t,
dist.trunccauchy,
dist.truncnorm,
dist.uniform,
], ids=idfn)
@pytest.mark.parametrize('loc, scale', [
(1., 1.),
(1., np.array([1., 2.])),
])
def test_sample_gradient(jax_dist, loc, scale):
rng = random.PRNGKey(0)
args = [i + 1 for i in range(jax_dist.numargs)]
expected_shape = lax.broadcast_shapes(*[np.shape(loc), np.shape(scale)])
def fn(args, loc, scale):
return jax_dist.rvs(*args, loc=loc, scale=scale, random_state=rng).sum()
# FIXME: find a proper test for gradients of arg parameters
assert len(grad(fn)(args, loc, scale)) == jax_dist.numargs
assert_allclose(grad(fn, 1)(args, loc, scale),
loc * reduce(mul, expected_shape[:len(expected_shape) - np.ndim(loc)], 1.))
assert_allclose(grad(fn, 2)(args, loc, scale),
jax_dist.rvs(*args, size=expected_shape, random_state=rng))
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.dirichlet, (np.ones(3),)),
(dist.dirichlet, (np.ones((2, 3)),)),
], ids=idfn)
def test_mvsample_gradient(jax_dist, dist_args):
rng = random.PRNGKey(0)
def fn(args):
return jax_dist.rvs(*args, random_state=rng).sum()
# FIXME: find a proper test for gradients of arg parameters
assert len(grad(fn)(dist_args)) == jax_dist.numargs
@pytest.mark.parametrize('jax_dist', [
dist.beta,
dist.cauchy,
dist.expon,
dist.gamma,
dist.halfcauchy,
dist.halfnorm,
dist.lognorm,
dist.norm,
dist.pareto,
dist.t,
dist.trunccauchy,
dist.truncnorm,
dist.uniform,
], ids=idfn)
@pytest.mark.parametrize('loc_scale', [
(),
(1,),
(1, 1),
(1., np.array([1., 2.])),
])
def test_continuous_logpdf(jax_dist, loc_scale):
rng = random.PRNGKey(0)
args = [i + 1 for i in range(jax_dist.numargs)] + list(loc_scale)
samples = jax_dist.rvs(*args, random_state=rng)
if jax_dist is dist.trunccauchy:
sp_dist = osp_stats.cauchy
assert_allclose(jax_dist.logpdf(samples, args[0], args[1]),
sp_dist.logpdf(samples) - np.log(sp_dist.cdf(args[1]) - sp_dist.cdf(args[0])),
atol=1e-6)
else:
sp_dist = getattr(osp_stats, jax_dist.name)
assert_allclose(jax_dist.logpdf(samples, *args), sp_dist.logpdf(samples, *args), atol=1.3e-6)
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.dirichlet, (np.array([1., 2., 3.]),)),
], ids=idfn)
@pytest.mark.parametrize('shape', [
None,
(),
(2,),
(2, 3),
])
def test_multivariate_continuous_logpdf(jax_dist, dist_args, shape):
rng = random.PRNGKey(0)
samples = jax_dist.rvs(*dist_args, size=shape, random_state=rng)
# XXX scipy.stats.dirichlet does not work with batch
if samples.ndim == 1:
sp_dist = getattr(osp_stats, jax_dist.name)
assert_allclose(jax_dist.logpdf(samples, *dist_args),
sp_dist.logpdf(samples, *dist_args), atol=1e-6)
event_dim = len(jax_dist._event_shape(*dist_args))
batch_shape = samples.shape if event_dim == 0 else samples.shape[:-1]
assert jax_dist.logpdf(samples, *dist_args).shape == batch_shape
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.categorical, (np.array([0.7, 0.3]),)),
(dist.multinomial, (10, np.array([0.3, 0.7]),)),
], ids=idfn)
@pytest.mark.parametrize('shape', [
None,
(),
(2,),
(2, 3),
])
def test_multivariate_discrete_logpmf(jax_dist, dist_args, shape):
rng = random.PRNGKey(0)
samples = jax_dist.rvs(*dist_args, size=shape, random_state=rng)
# XXX scipy.stats.multinomial does not work with batch
if samples.ndim == 1:
if jax_dist is dist.categorical:
# test against PyTorch
assert_allclose(jax_dist.logpmf(np.array([1, 0]), *dist_args),
np.array([-1.2040, -0.3567]), atol=1e-4)
else:
sp_dist = getattr(osp_stats, jax_dist.name)
assert_allclose(jax_dist.logpmf(samples, *dist_args),
sp_dist.logpmf(samples, *dist_args), atol=1e-5)
event_dim = len(jax_dist._event_shape(*dist_args))
batch_shape = samples.shape if event_dim == 0 else samples.shape[:-1]
assert jax_dist.logpmf(samples, *dist_args).shape == batch_shape
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.bernoulli, (0.1,)),
(dist.bernoulli, (np.array([0.3, 0.5]),)),
(dist.binom, (10, 0.4)),
(dist.binom, (np.array([10]), np.array([0.4, 0.3]))),
(dist.binom, (np.array([2, 5]), np.array([[0.4], [0.5]]))),
(dist.poisson, (4.,)),
(dist.poisson, (np.array([1., 4., 10.]),)),
], ids=idfn)
@pytest.mark.parametrize('shape', [
None,
(),
(2,),
(2, 3),
])
def test_discrete_logpmf(jax_dist, dist_args, shape):
rng = random.PRNGKey(0)
sp_dist = getattr(osp_stats, jax_dist.name)
samples = jax_dist.rvs(*dist_args, random_state=rng)
assert_allclose(jax_dist.logpmf(samples, *dist_args),
sp_dist.logpmf(onp.asarray(samples), *dist_args),
rtol=1e-5)
if shape is not None:
shape = shape + lax.broadcast_shapes(*[np.shape(arg) for arg in dist_args])
samples = jax_dist.rvs(*dist_args, size=shape, random_state=rng)
assert_allclose(jax_dist.logpmf(samples, *dist_args),
sp_dist.logpmf(onp.asarray(samples), *dist_args),
rtol=1e-5)
def fn(sample, *args):
return np.sum(jax_dist.logpmf(sample, *args))
for i in range(len(dist_args)):
logpmf_grad = grad(fn, i + 1)(samples, *dist_args)
assert np.all(np.isfinite(logpmf_grad))
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.bernoulli, (0.1,)),
(dist.bernoulli, (np.array([0.3, 0.5]),)),
(dist.binom, (10, 0.4)),
(dist.binom, (np.array([10]), np.array([0.4, 0.3]))),
(dist.binom, (np.array([2, 5]), np.array([[0.4], [0.5]]))),
(dist.categorical, (np.array([0.1, 0.9]),)),
(dist.categorical, (np.array([[0.1, 0.9], [0.2, 0.8]]),)),
(dist.multinomial, (10, np.array([0.1, 0.9]),)),
(dist.multinomial, (10, np.array([[0.1, 0.9], [0.2, 0.8]]),)),
], ids=idfn)
def test_discrete_with_logits(jax_dist, dist_args):
rng = random.PRNGKey(0)
logit_to_prob = np.log if isinstance(jax_dist, jax_multivariate) else logit
logit_args = dist_args[:-1] + (logit_to_prob(dist_args[-1]),)
actual_sample = jax_dist.rvs(*dist_args, random_state=rng)
expected_sample = jax_dist(*logit_args, is_logits=True).rvs(random_state=rng)
assert_allclose(actual_sample, expected_sample)
actual_pmf = jax_dist.logpmf(actual_sample, *dist_args)
expected_pmf = jax_dist(*logit_args, is_logits=True).logpmf(actual_sample)
assert_allclose(actual_pmf, expected_pmf, rtol=1e-6)
| python |
import os
import os.path as osp
import sys
import numpy.random
import torch.nn
from deltalogger.deltalogger import Deltalogger
from reinforce_modules.utils import ConfusionGame, get_defense_visual_fool_model
from utils.train_utils import StateCLEVR, ImageCLEVR_HDF5
sys.path.insert(0, osp.abspath('.'))
import random
import argparse
from modules.embedder import *
import seaborn as sns
from reinforce_modules.policy_networks import Re1nforceTrainer, PolicyNet
sns.set_style('darkgrid')
def _print(something):
print(something, flush=True)
return
def PolicyEvaluation(args, seed=1, logger=None):
torch.manual_seed(seed)
random.seed(seed)
numpy.random.seed(seed)
effective_range_name = 'all'
BS = args.bs
TRAIN_DURATION = args.train_duration
if osp.exists(f'./results/experiment_reinforce_increasing_defense_{args.defense_level}'):
pass
else:
os.mkdir(f'./results/experiment_reinforce_increasing_defense_{args.defense_level}')
if args.backend == 'states':
load_from = './results/experiment_rn/mos_epoch_164.pt'
else:
load_from = './results/experiment_fp/mos_epoch_219.pt'
model, (
model_fool,
resnet), val_dataloader, predictions_before_pre_calc, initial_example = get_defense_visual_fool_model(
device=args.device,
load_from=load_from,
scenes_path=args.scenes_path,
questions_path=args.questions_path,
clvr_path=args.clvr_path,
batch_size=BS,
defense_level=args.defense_level)
rl_game = ConfusionGame(testbed_model=model,
confusion_model=model_fool,
device='cuda',
batch_size=BS,
confusion_weight=args.confusion_weight,
change_weight=args.change_weight,
fail_weight=args.fail_weight,
invalid_weight=args.invalid_weight,
mode=args.mode,
render=args.mode == 'visual',
backend=args.backend)
if args.backend == 'states':
input_size = 512
elif args.backend == 'pixels':
input_size = 256
else:
raise ValueError(f"Backend must be [states/pixels] you entered: {args.backend}")
model = PolicyNet(input_size=input_size, hidden_size=512, dropout=0.0, reverse_input=True)
trainer = Re1nforceTrainer(model=model,
game=rl_game,
dataloader=val_dataloader,
device=args.device,
lr=args.lr,
train_duration=TRAIN_DURATION,
batch_size=BS,
name=effective_range_name,
predictions_before_pre_calc=predictions_before_pre_calc,
resnet=resnet,
fool_model_name='Defense',
initial_example=initial_example)
best_drop, best_confusion = trainer.train(log_every=-1, save_every=100, logger=logger)
return best_drop, best_confusion
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, help='cpu or cuda', default='cuda')
parser.add_argument('--scenes_path', type=str, help='folder of scenes', default='data/')
parser.add_argument('--questions_path', type=str, help='folder of questions', default='data/')
parser.add_argument('--clvr_path', type=str, help='folder before images', default='data/')
parser.add_argument('--use_cache', type=int, help='if to use cache (only in image clever)', default=0)
parser.add_argument('--use_hdf5', type=int, help='if to use hdf5 loader', default=0)
parser.add_argument('--confusion_weight', type=float, help='what kind of experiment to run', default=1)
parser.add_argument('--change_weight', type=float, help='what kind of experiment to run', default=0.1)
parser.add_argument('--fail_weight', type=float, help='what kind of experiment to run', default=-0.1)
parser.add_argument('--invalid_weight', type=float, help='what kind of experiment to run', default=-0.8)
parser.add_argument('--train_duration', type=int, help='what kind of experiment to run', default=30)
parser.add_argument('--lr', type=float, help='what kind of experiment to run', default=5e-4)
parser.add_argument('--bs', type=int, help='what kind of experiment to run', default=10)
parser.add_argument('--mode', type=str, help='state | visual | imagenet', default='visual')
parser.add_argument('--range', type=float, default=-1)
parser.add_argument('--seed', type=int, default=51)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--backend', type=str, help='states or pixels', default='states')
parser.add_argument('--defense_level', type=int, default=10)
args = parser.parse_args()
if args.backend == 'states':
exp_name = 'DeltaDefense'
elif args.backend == 'pixels':
exp_name = 'DeltaDefensePixels'
else:
raise ValueError(f'Backend has to be one of states/pixels, you entered : {args.backend}')
if args.repeat == 1:
logger = Deltalogger(exp_name, run_tag=[args.defense_level, 0], dummy=True)
_print(PolicyEvaluation(args, args.seed, logger=logger))
else:
acc_drops = []
cons_drops = []
for seed in range(args.seed, args.repeat + args.seed):
experiment_number = seed - args.seed
logger = Deltalogger(exp_name, run_tag=[args.defense_level, experiment_number],
dummy=False)
a, c = PolicyEvaluation(args, seed, logger=logger)
acc_drops.append(a)
cons_drops.append(c)
_print(f'Accuracy: Min: {min(acc_drops)}, Mean: {sum(acc_drops) / len(acc_drops)}, Max: {max(acc_drops)}')
_print(
f'Consistency: Min: {min(cons_drops)}, Mean: {sum(cons_drops) / len(cons_drops)}, Max: {max(cons_drops)}')
| python |
from django.conf import settings
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from api.search.product import views
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r"search", views.ProductDocumentView, basename="product_search")
urlpatterns = [
path("", include(router.urls)),
path("suggest/", views.ProductSuggestDocumentView.as_view(), name="product_suggest"),
path("spire/<str:pk>/comment/", views.CommentView.as_view(), name="retrieve_spire_product_comment"),
path("lite/<uuid:pk>/comment/", views.CommentView.as_view(), name="retrieve_lite_product_comment"),
path("lite/<uuid:pk>/", views.RetrieveLiteProductView.as_view(), name="retrieve_lite_product"),
path("more-like-this/<str:pk>/", views.MoreLikeThisView.as_view(), name="more_like_this"),
path("more-like-this/<uuid:pk>/", views.MoreLikeThisView.as_view(), name="more_like_this"),
]
if settings.ENABLE_SPIRE_SEARCH:
urlpatterns += [
path("spire/<str:pk>/", views.RetrieveSpireProductView.as_view(), name="retrieve_spire_product"),
]
| python |
import copy
import numpy as np
# configure matplotlib for use without xserver
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def get_neuron_features(features, neurons):
"""
Gets neuron activations from activations specified by `neurons`.
Args:
features: numpy arraylike of shape `[n,d]`
neurons: numpy arraylike of shape `[k]` (where k is the number of neuron activations to select)
used to index neuron activations from `features`. `1<=neurons[i]<=d` for all `i`
Returns:
numpy arraylike of shape `[n,k]`
"""
return np.reshape(features[:,neurons], [len(features), -1])
def mask_neuron_weights(weights, neurons, inplace=False):
"""
Zero masks rows of weights specified by neurons
Args:
weights: numpy array like of shape `[d,num_classes]`
neurons: 1D numpy array of shape `[k]`. `1<=neurons[i]<d` for all `i`
inplace: Boolean specifying whether to mask `weights` in place in addition to returning masked_vals
Returns:
masked_vals: zero masked `weights` with mask specified by `neurons`
"""
mask = np.zeros_like(weights)
mask[neurons,np.arange(mask.shape[-1])] = 1
masked_vals = weights*mask
if inplace:
weights[:] = masked_vals
return masked_vals
def get_masked_model(log_reg_model, top_neurons):
masked_log_reg_model = copy.copy(log_reg_model)
masked_log_reg_model.coef_ = mask_neuron_weights(masked_log_reg_model.coef_.T, top_neurons).T
return masked_log_reg_model
def get_top_k_neuron_weights(weights, k=1):
"""
Get's the indices of the top weights based on the l1 norm contributions of the weights
based off of https://rakeshchada.github.io/Sentiment-Neuron.html interpretation of
https://arxiv.org/pdf/1704.01444.pdf (Radford et. al)
Args:
weights: numpy arraylike of shape `[d,num_classes]`
k: integer specifying how many rows of weights to select
Returns:
k_indices: numpy arraylike of shape `[k]` specifying indices of the top k rows
"""
weight_penalties = np.squeeze(np.linalg.norm(weights, ord=1, axis=1))
if k == 1:
k_indices = np.array([np.argmax(weight_penalties)])
elif k >= np.log(len(weight_penalties)):
# runs O(nlogn)
k_indices = np.argsort(weight_penalties)[-k:][::-1]
else:
# runs O(n+klogk)
k_indices = np.argpartition(weight_penalties, -k)[-k:]
k_indices = (k_indices[np.argsort(weight_penalties[k_indices])])[::-1]
return k_indices
def plot_logit_and_save(logits, labels, logit_index, name):
"""
Plots histogram (wrt to what label it is) of logit corresponding to logit_index.
Saves plotted histogram to name.
Args:
logits:
labels:
logit_index:
name:
"""
logit = logits[:,logit_index]
plt.title('Distribution of Logit Values')
plt.ylabel('# of logits per bin')
plt.xlabel('Logit Value')
plt.hist(logit[labels < .5], bins=25, alpha=0.5, label='neg')
plt.hist(logit[labels >= .5], bins=25, alpha=0.5, label='pos')
plt.legend()
plt.savefig(name+'.png')
plt.clf()
def plot_weight_contribs_and_save(coef, name):
plt.title('Values of Resulting L1 Penalized Weights')
plt.tick_params(axis='both', which='major')
coef = normalize(coef)
plt.plot(range(len(coef[0])), coef.T)
plt.xlabel('Neuron (Feature) Index')
plt.ylabel('Neuron (Feature) weight')
plt.savefig(name)
plt.clf()
def normalize(coef):
norm = np.linalg.norm(coef)
coef = coef/norm
return coef
| python |
'''
Multiples of 3 and 5
'''
sum = 0
for i in range(1000):
if i%3 == 0 or i%5 == 0:
sum = sum + i
print sum
| python |
#!/usr/bin/env python
import sys, gym, time
#
# Test yourself as a learning agent! Pass environment name as a command-line argument, for example:
#
# python keyboard_agent.py SpaceInvadersNoFrameskip-v4
#
import gym_game
import pygame
if len(sys.argv) < 3:
print('Usage: python keyboard_agent.py ENV_NAME CONFIG_FILE')
sys.exit(-1)
env_name = sys.argv[1]
print('Making Gym[PyGame] environment:', env_name)
config_file = sys.argv[2]
print('Config file:', config_file)
env = gym.make(env_name, config_file=config_file)
sleep_time = 0.1
if not hasattr(env.action_space, 'n'):
raise Exception('Keyboard agent only supports discrete action spaces')
ACTIONS = env.action_space.n
print("ACTIONS={}".format(ACTIONS))
print("Press keys 1 2 3 ... to take actions 1 2 3 ... etc.")
print("No keys pressed is taking action 0")
render_mode = 'human'
# render_mode = 'rgb_array'
env.use_wall_clock = True
env.reset()
#env.render(render_mode)
def get_action(pressed_keys):
action = None
if pressed_keys[pygame.K_0] == 1:
action = 0
elif pressed_keys[pygame.K_1] == 1:
action = 1
elif pressed_keys[pygame.K_2] == 1:
action = 2
elif pressed_keys[pygame.K_3] == 1:
action = 3
elif pressed_keys[pygame.K_4] == 1:
action = 4
elif pressed_keys[pygame.K_5] == 1:
action = 5
elif pressed_keys[pygame.K_6] == 1:
action = 6
elif pressed_keys[pygame.K_7] == 1:
action = 7
elif pressed_keys[pygame.K_8] == 1:
action = 8
elif pressed_keys[pygame.K_9] == 1:
action = 9
if action is None:
action = 0
return action
def rollout(env):
observation = env.reset()
quit = False
total_reward = 0
total_timesteps = 0
while 1:
# Check for quit from user
events = env.get_events()
for event in events:
if event.type == pygame.QUIT:
quit = True
print('Quit event')
# Get selected action from user
pressed_keys = env.get_keys_pressed()
a = get_action(pressed_keys)
# Update the environment
observation, reward, done, info = env.step(a)
total_timesteps += 1
total_reward += reward
# print('Obs: ',str(observation))
# Render the new state
img = env.render(mode=render_mode, close=quit) # Render the game
# Handle quit request
if quit:
print('Quitting (truncating rollout)...')
break
if done:
print('Episode (rollout) complete.')
env.reset()
break
# Wait a short time
time.sleep(sleep_time)
print("Rollout summary: Timesteps %i Reward %0.2f" % (total_timesteps, total_reward))
return quit
while 1:
quit = rollout(env)
if quit:
break
| python |
import enum
import re
import string
from typing import Text, List
from xml.sax import saxutils
import emoji
from six import string_types
from collections.abc import Iterable
from tklearn.preprocessing import TextPreprocessor
__all__ = [
'Normalize',
'TweetPreprocessor',
]
@enum.unique
class Normalize(enum.Enum):
NONE = 0
ALL = 1
LINKS = 2
HASHTAGS = 3
MENTIONS = 4
IMAGES = 5
class TweetPreprocessor(TextPreprocessor):
""" Preprocessor for Tweets.
Instance of this class can be used to create a preprocessor for tour tweet data.
Several options are provided and you might be using them according to your use case.
"""
RE_LINKS = re.compile(r'(https?://\S+)')
RE_IMAGE_LINKS = re.compile(r'(pic.twitter.com\S+)')
RE_MENTIONS = re.compile(r'(@[a-zA-Z0-9_]{1,15})')
RE_HASHTAGS = re.compile(r'(#\w+)')
def __init__(self, normalize=Normalize.NONE, lowercase=False, **kwargs):
""" Initialize `TweetPreprocessor` object.
Parameters
----------
kwargs
Parameters
"""
super(TweetPreprocessor, self).__init__()
self.normalize = []
self.lowercase = lowercase
if normalize == Normalize.ALL:
self.normalize = [
Normalize.LINKS,
Normalize.HASHTAGS,
Normalize.MENTIONS,
Normalize.IMAGES,
]
elif (normalize != Normalize.NONE) and isinstance(normalize, Iterable):
for item in normalize:
if isinstance(item, string_types):
if not item.endswith('s'):
item = '{}s'.format(item)
item = Normalize[item.upper()]
self.normalize.append(item)
@staticmethod
def _replace(s: List[Text], old: Text, new: Text) -> List[Text]:
return [new if x == old else x for x in s if x.strip() != '']
def preprocess(self, s: Text) -> Text:
""" Preprocess the input text. Expected input is a Tweet text.
Parameters
----------
s
Input Tweet text.
Returns
-------
Preprocessed tweet.
"""
s = self._clean_tweet(s)
if Normalize.LINKS in self.normalize:
s = self.RE_LINKS.sub('<link>', s)
if Normalize.IMAGES in self.normalize:
s = self.RE_IMAGE_LINKS.sub('<image>', s)
if Normalize.HASHTAGS in self.normalize:
s = self.RE_HASHTAGS.sub('<hashtag>', s)
if Normalize.MENTIONS in self.normalize:
s = self.RE_MENTIONS.sub('<mention>', s)
tokens = s.split()
for ns in self.normalize:
if isinstance(ns, str):
pass
elif isinstance(ns, tuple):
assert len(ns) == 2, \
'Required a tuple of size 2 indicating (new_word, old_words) values for the normalization.'
assert isinstance(ns[1], list), \
'Required a list of old values to replace with the new value.'
for n in ns[1]:
tokens = self._replace(tokens, n, ns[0])
if self.lowercase:
return ' '.join(tokens).lower()
else:
return ' '.join(tokens)
@staticmethod
def _clean_tweet(x):
""" Cleans a given text (tweet) while keeping important characters.
Parameters
----------
x
Input String.
Returns
-------
Cleaned Text.
"""
x = saxutils.unescape(x)
x = x.replace('\xa0', ' ')
x = emoji.demojize(x)
x = ''.join(filter(lambda item: item in set(string.printable), x))
x = emoji.emojize(x)
return x
| python |
from flask import Flask, request, jsonify, render_template
from flask_cors import CORS
import math
import pickle
app = Flask(__name__)
CORS(app)
uniq_fire_date = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'July', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
uniq_county = ['No Data', 'Skamania', 'Cowlitz', 'Thurston', 'Okanogan', 'Pacific', 'Clark', 'Columbia', 'Grays Harbor', 'Adams', 'Benton', 'Asotin', 'Stevens', 'Chelan', 'Klickitat', 'King', 'Lewis', 'Douglas', 'Franklin', 'Jefferson', 'San Juan', 'Kittitas', 'Garfield', 'Grant', 'Pierce', 'Wahkiakum', 'Ferry', 'Clallam', 'Spokane', 'Mason', 'Skagit', 'Pend Oreille', 'Walla Walla', 'Whatcom', 'Kitsap', 'Lincoln', 'Island', 'Snohomish', 'Yakima', 'Whitman']
uniq_cause = ['Smoker', 'Miscellaneou', 'Under Invest', 'Logging', 'Debris Burn', 'Undetermined', 'Recreation', 'Railroad', 'Lightning', 'Children', 'Arson', 'None']
uniq_binlat = [1, 2, 3, 4]
uniq_binlon = [1, 2, 3, 4, 5, 6, 7, 8]
uniq_binacres = [2, 3, 4, 5, 6, 7, 8, 9]
# def binLat(lat):
# print(lat)
# if lat > 48:
# return 1
# elif 48 >= lat > 47:
# return 2
# elif 47 >= lat > 46:
# return 3
# elif 46 >= lat > 45:
# return 4
# else:
# return 5
# def binLon(lon):
# if lon < -124:
# return 1
# elif -124 <= lon < -123:
# return 2
# elif -123 <= lon < -122:
# return 3
# elif -122 <= lon < -121:
# return 4
# elif -121 <= lon < -120:
# return 5
# elif -120 <= lon < -119:
# return 6
# elif -119 <= lon < -118:
# return 7
# else:
# return 8
def unBinAcres(acres_binned):
if acres_binned == 1:
return "0-2"
elif acres_binned == 2:
return "2-10"
elif acres_binned == 3:
return "10-50"
elif acres_binned == 4:
return "50-100"
elif acres_binned == 5:
return "100-500"
elif acres_binned == 6:
return "500-2000"
elif acres_binned == 7:
return "2000-10000"
elif acres_binned == 8:
return "10000-50000"
elif acres_binned == 9:
return "50000-300000"
else:
return "Failure to Compute..."
def acres_to_circle_radius_in_miles(acres):
sqft = acres * 43560
radius = math.sqrt(sqft / math.pi)
return radius / 5280
@app.route('/', methods=['GET'])
def main_route():
return render_template('index.html',
mth=uniq_fire_date,
cnt=uniq_county,
cau=uniq_cause,
lat=uniq_binlat,
lon=uniq_binlon,
acr=uniq_binacres)
@app.route('/api/predict', methods=["GET"])
def return_prediction():
acres = 10000
cause = request.args.get("cause", "")
county = request.args.get("county", "")
fire_date = request.args.get("month", "")
lat = request.args.get("binlat", "")
lon = request.args.get("binlon", "")
# lat = binLat(float(lat))
# lon = binLon(float(lon))
instance = [fire_date, county, cause, lat, lon]
infile = open("trees.p", "rb")
best_trees = pickle.load(infile)
infile.close()
prediction = predict_acres([instance], best_trees)
print(prediction)
if prediction is not None:
acres_binned = prediction[0]
result = {"prediction": unBinAcres(acres_binned)}
return jsonify(result), 200
else:
# failure!!
return "Error making prediction", 400
def predict_acres(X_test, best_trees):
header = []
predictions = []
for i in range(0, len(X_test[0])):
header.append("att" + str(i))
for instance in X_test:
tree_predictions = {}
for tree in best_trees:
temp = tree['tree']
prediction = tdidt_predict(header, tree['tree'], instance)
if prediction in tree_predictions:
tree_predictions[prediction] += 1
else:
tree_predictions[prediction] = 1
max_key = max(tree_predictions, key = tree_predictions.get)
predictions.append(max_key)
return predictions
def tdidt_predict(header, tree, instance):
info_type = tree[0]
if info_type == "Attribute":
attribute_index = header.index(tree[1])
instance_value = instance[attribute_index]
# now I need to find which "edge" to follow recursively
for i in range(2, len(tree)):
value_list = tree[i]
if value_list[1] == instance_value:
# we have a match!! recurse!!
return tdidt_predict(header, value_list[2], instance)
else: # "Leaf"
return tree[1] # leaf class label
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8888) | python |
def mallow(y, y_pred, y_sub, k, p):
"""
Return an mallows Cp score for a model.
Input:
y: array-like of shape = (n_samples) including values of observed y
y_pred: vector including values of predicted y
k: int number of predictive variable(s) used in the model
p: int number of predictive variable(s) used in the sub model
Output:
mallow_score: int or float Mallows Cp score of the model and sub model
Raise InputError if k is less than p.
Raise InputError if y , y_sub and y_pred are not in same length.
Raise InputError if length(y) <= 1, length(y_sub)<=1, or length(y_pred) <= 1.
Raise TypeError if y , y_sub and y_pred are not vector.
Raise TypeError if p is not int.
Raise InputError if p < 0.
Raise TypeError if k is not int.
Raise InputError if k < 0.
"""
import numpy as np
import pandas as pd
if k<p:
raise ValueError("number of predictive variable(s) used in the model must larger than in subset model")
if len(y)!=len(y_sub) or len(y_sub)!=len(y_pred) or len(y)!= len(y_pred):
raise ValueError("The length of observed y, predicted y, and predicted y in subset model must be same")
if len(y)<=1 or len(y_sub)<=1 or len(y)<=1:
raise ValueError("The length of observed y, predicted y, and predicted y in subset model must be larger than 1")
if isinstance(y, (np.ndarray, list, tuple, pd.core.series.Series)) == False or isinstance(y_pred, (np.ndarray, list, tuple, pd.core.series.Series)) == False:
raise TypeError("The observed y, predicted y, and predicted y in subset model must be array-like shape (e.g. array, list, tuple, data column)")
else:
for i in y:
for j in y_pred:
if isinstance(i, (int, float)) != True or isinstance(j, (int, float)) != True:
raise TypeError("The observed y, predicted y, and predicted y in subset model must be numeric elements")
if isinstance(p,int) !=True or isinstance(k,int)!=True:
raise TypeError("The number of predictive variable(s) used in the sub model must be integer")
if p<=0 or k<=0:
raise Exception("The number of predictive variable(s) used in the sub model must be positive")
if isinstance(y,list)==True:
y=np.array(y)
if isinstance(y_sub,list)==True:
y_sub=np.array(y_sub)
if isinstance(y_pred,list)==True:
y_pred=np.array(y_pred)
SSE_p=np.sum((y-y_sub)**2)
MSE= np.sum((y-y_pred)**2)/(len(y)-k)
mallowcp=SSE_p/MSE-len(y)+2*p
return mallowcp
| python |
import functools,fractions
n=int(input())
a=list(map(int,input().split()))
print(functools.reduce(fractions.gcd,a)) | python |
from pymining import itemmining
from pymining import seqmining
import sys
if(len(sys.argv) != 3):
print("Please provide the data file and the minimum support as input, e.g., python freq_seq.py ./output.txt 40")
sys.exit(-1)
f = open(sys.argv[1], 'r')
lines = f.read().splitlines()
seqs = []
for s in lines:
seq = s.split("---")[1]
seq = seq[1:-1]
seqs.append(seq.split(", "))
freq_seqs = seqmining.freq_seq_enum(seqs, int(sys.argv[2]))
for p in freq_seqs:
print(p)
| python |
"""
属性的使用
- 访问器/修改器/删除器
- 使用__slots__对属性加以限制
Version: 0.1
Author: BDFD
Date: 2018-03-12
"""
class Car(object):
__slots__ = ('_brand', '_max_speed')
def __init__(self, brand, max_speed):
self._brand = brand
self._max_speed = max_speed
@property
def brand(self):
return self._brand
@brand.setter
def brand(self, brand):
self._brand = brand
@brand.deleter
def brand(self):
del self._brand
@property
def max_speed(self):
return self._max_speed
@max_speed.setter
def max_speed(self, max_speed):
if max_speed < 0:
raise ValueError('Invalid max speed for car')
self._max_speed = max_speed
def __str__(self):
return 'Car: [品牌=%s, 最高时速=%d]' % (self._brand, self._max_speed)
car = Car('QQ', 120)
print(car)
# ValueError
# car.max_speed = -100
car.max_speed = 320
car.brand = "Benz"
# 使用__slots__属性限制后下面的代码将产生异常
# car.current_speed = 80
print(car)
# 如果提供了删除器可以执行下面的代码
# del car.brand
# 属性的实现
print(Car.brand)
print(Car.brand.fget)
print(Car.brand.fset)
print(Car.brand.fdel)
# 通过上面的代码帮助学生理解之前提到的包装器的概念
# Python中有很多类似的语法糖后面还会出现这样的东西
| python |
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for confusion matrix at thresholds."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import math
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.metrics import confusion_matrix_metrics
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.metrics import metric_util
class ConfusionMatrixMetricsTest(testutil.TensorflowModelAnalysisTest,
parameterized.TestCase):
@parameterized.named_parameters(
('specificity', confusion_matrix_metrics.Specificity(), 2.0 /
(2.0 + 3.0)),
('fall_out', confusion_matrix_metrics.FallOut(), 3.0 / (3.0 + 2.0)),
('miss_rate', confusion_matrix_metrics.MissRate(), 4.0 / (4.0 + 1.0)),
('negative_predictive_value',
confusion_matrix_metrics.NegativePredictiveValue(), 2.0 / (2.0 + 4.0)),
('false_discovery_rate', confusion_matrix_metrics.FalseDiscoveryRate(),
3.0 / (3.0 + 1.0)),
('false_omission_rate', confusion_matrix_metrics.FalseOmissionRate(),
4.0 / (4.0 + 2.0)),
('prevalence', confusion_matrix_metrics.Prevalence(),
(1.0 + 4.0) / (1.0 + 2.0 + 3.0 + 4.0)),
('prevalence_threshold', confusion_matrix_metrics.PrevalenceThreshold(),
(math.sqrt((1.0 / (1.0 + 4.0)) * (1.0 - 1.0 * (2.0 / (2.0 + 3.0)))) +
(2.0 / (2.0 + 3.0) - 1.0)) / ((1.0 / (1.0 + 4.0) +
(2.0 / (2.0 + 3.0)) - 1.0))),
('threat_score', confusion_matrix_metrics.ThreatScore(), 1.0 /
(1.0 + 4.0 + 3.0)),
('balanced_accuracy', confusion_matrix_metrics.BalancedAccuracy(),
((1.0 / (1.0 + 4.0)) + (2.0 / (2.0 + 3.0))) / 2),
('f1_score', confusion_matrix_metrics.F1Score(), 2 * 1.0 /
(2 * 1.0 + 3.0 + 4.0)),
('matthews_correlation_coefficient',
confusion_matrix_metrics.MatthewsCorrelationCoefficent(),
(1.0 * 2.0 - 3.0 * 4.0) / math.sqrt(
(1.0 + 3.0) * (1.0 + 4.0) * (2.0 + 3.0) * (2.0 + 4.0))),
('fowlkes_mallows_index', confusion_matrix_metrics.FowlkesMallowsIndex(),
math.sqrt(1.0 / (1.0 + 3.0) * 1.0 / (1.0 + 4.0))),
('informedness', confusion_matrix_metrics.Informedness(),
(1.0 / (1.0 + 4.0)) + (2.0 / (2.0 + 3.0)) - 1.0),
('markedness', confusion_matrix_metrics.Markedness(),
(1.0 / (1.0 + 3.0)) + (2.0 / (2.0 + 4.0)) - 1.0),
('positive_likelihood_ratio',
confusion_matrix_metrics.PositiveLikelihoodRatio(),
(1.0 / (1.0 + 4.0)) / (3.0 / (3.0 + 2.0))),
('negative_likelihood_ratio',
confusion_matrix_metrics.NegativeLikelihoodRatio(),
(4.0 / (4.0 + 1.0)) / (2.0 / (2.0 + 3.0))),
('diagnostic_odds_ratio', confusion_matrix_metrics.DiagnosticOddsRatio(),
((1.0 / 3.0)) / (4.0 / 2.0)),
)
def testConfusionMatrixMetrics(self, metric, expected_value):
computations = metric.computations()
histogram = computations[0]
matrices = computations[1]
metrics = computations[2]
# tp = 1
# tn = 2
# fp = 3
# fn = 4
example1 = {
'labels': np.array([1.0]),
'predictions': np.array([0.6]),
'example_weights': np.array([1.0]),
}
example2 = {
'labels': np.array([0.0]),
'predictions': np.array([0.3]),
'example_weights': np.array([1.0]),
}
example3 = {
'labels': np.array([0.0]),
'predictions': np.array([0.2]),
'example_weights': np.array([1.0]),
}
example4 = {
'labels': np.array([0.0]),
'predictions': np.array([0.6]),
'example_weights': np.array([1.0]),
}
example5 = {
'labels': np.array([0.0]),
'predictions': np.array([0.7]),
'example_weights': np.array([1.0]),
}
example6 = {
'labels': np.array([0.0]),
'predictions': np.array([0.8]),
'example_weights': np.array([1.0]),
}
example7 = {
'labels': np.array([1.0]),
'predictions': np.array([0.1]),
'example_weights': np.array([1.0]),
}
example8 = {
'labels': np.array([1.0]),
'predictions': np.array([0.2]),
'example_weights': np.array([1.0]),
}
example9 = {
'labels': np.array([1.0]),
'predictions': np.array([0.3]),
'example_weights': np.array([1.0]),
}
example10 = {
'labels': np.array([1.0]),
'predictions': np.array([0.4]),
'example_weights': np.array([1.0]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([
example1, example2, example3, example4, example5, example6,
example7, example8, example9, example10
])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeMatrices' >> beam.Map(
lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore
| 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1])))
) # pyformat: ignore
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
self.assertLen(got_metrics, 1)
key = metrics.keys[0]
self.assertDictElementsAlmostEqual(
got_metrics, {key: expected_value}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testConfusionMatrixMetricsWithNan(self):
computations = confusion_matrix_metrics.Specificity().computations()
histogram = computations[0]
matrices = computations[1]
metrics = computations[2]
example1 = {
'labels': np.array([1.0]),
'predictions': np.array([1.0]),
'example_weights': np.array([1.0]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeMatrices' >> beam.Map(
lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore
| 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1])))
) # pyformat: ignore
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
self.assertLen(got_metrics, 1)
key = metrics.keys[0]
self.assertIn(key, got_metrics)
self.assertTrue(math.isnan(got_metrics[key]))
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testConfusionMatrixAtThresholds(self):
computations = confusion_matrix_metrics.ConfusionMatrixAtThresholds(
thresholds=[0.3, 0.5, 0.8]).computations()
histogram = computations[0]
matrices = computations[1]
metrics = computations[2]
example1 = {
'labels': np.array([0.0]),
'predictions': np.array([0.0]),
'example_weights': np.array([1.0]),
}
example2 = {
'labels': np.array([0.0]),
'predictions': np.array([0.5]),
'example_weights': np.array([1.0]),
}
example3 = {
'labels': np.array([1.0]),
'predictions': np.array([0.3]),
'example_weights': np.array([1.0]),
}
example4 = {
'labels': np.array([1.0]),
'predictions': np.array([0.9]),
'example_weights': np.array([1.0]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3, example4])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeMatrices' >> beam.Map(
lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore
| 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1])))
) # pyformat: ignore
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
self.assertLen(got_metrics, 1)
key = metric_types.MetricKey(name='confusion_matrix_at_thresholds')
self.assertIn(key, got_metrics)
got_metric = got_metrics[key]
self.assertProtoEquals(
"""
matrices {
threshold: 0.3
false_negatives: 1.0
true_negatives: 1.0
false_positives: 1.0
true_positives: 1.0
precision: 0.5
recall: 0.5
}
matrices {
threshold: 0.5
false_negatives: 1.0
true_negatives: 2.0
true_positives: 1.0
precision: 1.0
recall: 0.5
}
matrices {
threshold: 0.8
false_negatives: 1.0
true_negatives: 2.0
true_positives: 1.0
precision: 1.0
recall: 0.5
}
""", got_metric)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
if __name__ == '__main__':
tf.test.main()
| python |
# Generated by Django 4.0.2 on 2022-02-19 14:09
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Review',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.IntegerField(validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(1)])),
('text', models.TextField(blank=True, default='')),
('created', models.DateTimeField(auto_now_add=True)),
('is_published', models.BooleanField(default=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created'],
},
),
]
| python |
import cv2
import numpy as np
import torch
from ..builder import MOTION
@MOTION.register_module()
class CameraMotionCompensation(object):
"""Camera motion compensation.
Args:
warp_mode (str): Warp mode in opencv.
num_iters (int): Number of the iterations.
stop_eps (float): Terminate threshold.
"""
def __init__(self,
warp_mode='cv2.MOTION_EUCLIDEAN',
num_iters=50,
stop_eps=0.001):
self.warp_mode = eval(warp_mode)
self.num_iters = num_iters
self.stop_eps = stop_eps
def get_warp_matrix(self, img, ref_img):
"""Calculate warping matrix between two images."""
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ref_img = cv2.cvtColor(ref_img, cv2.COLOR_RGB2GRAY)
warp_matrix = np.eye(2, 3, dtype=np.float32)
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
self.num_iters, self.stop_eps)
cc, warp_matrix = cv2.findTransformECC(img, ref_img, warp_matrix,
self.warp_mode, criteria, None,
1)
warp_matrix = torch.from_numpy(warp_matrix)
return warp_matrix
def warp_bboxes(self, bboxes, warp_matrix):
"""Warp bounding boxes according to the warping matrix."""
tl, br = bboxes[:, :2], bboxes[:, 2:]
tl = torch.cat((tl, torch.ones(tl.shape[0], 1).to(bboxes.device)),
dim=1)
br = torch.cat((br, torch.ones(tl.shape[0], 1).to(bboxes.device)),
dim=1)
trans_tl = torch.mm(warp_matrix, tl.t()).t()
trans_br = torch.mm(warp_matrix, br.t()).t()
trans_bboxes = torch.cat((trans_tl, trans_br), dim=1)
return trans_bboxes.to(bboxes.device)
def track(self, img, ref_img, tracks, num_samples, frame_id):
"""Tracking forward."""
img = img.squeeze(0).cpu().numpy().transpose((1, 2, 0))
ref_img = ref_img.squeeze(0).cpu().numpy().transpose((1, 2, 0))
warp_matrix = self.get_warp_matrix(img, ref_img)
bboxes = []
num_bboxes = []
for k, v in tracks.items():
if int(v['frame_ids'][-1]) < frame_id - 1:
_num = 1
else:
_num = min(num_samples, len(v.bboxes))
num_bboxes.append(_num)
bboxes.extend(v.bboxes[-_num:])
bboxes = torch.cat(bboxes, dim=0)
warped_bboxes = self.warp_bboxes(bboxes, warp_matrix.to(bboxes.device))
warped_bboxes = torch.split(warped_bboxes, num_bboxes)
for b, (k, v) in zip(warped_bboxes, tracks.items()):
_num = b.shape[0]
b = torch.split(b, [1] * _num)
tracks[k].bboxes[-_num:] = b
return tracks
| python |
""" Access to data resources installed with this package
"""
from servicelib.resources import ResourcesFacade
resources = ResourcesFacade(
package_name=__name__,
distribution_name="simcore-service-storage",
config_folder="",
)
| python |
#!/usr/bin/env python
import time
from slackclient import SlackClient
import os, re
base_dir = os.path.dirname(os.path.realpath(__file__))
player = 'afplay'
text2voice = 'espeak'
sounds_dir = 'sounds'
filetype = 'mp3'
debug = True
bots_channel = 'build'
play_fixed = re.compile("FIXED")
play_cancelled = re.compile("CANCELLED")
play_failed = re.compile("FAILED")
play_broken = re.compile("BROKEN")
play_building = re.compile("BUILDING")
add_sound_regex = re.compile("^add-sound\s([a-z0-9]+)\s<?(https?:\/\/[a-z./]*\?v=[a-zA-Z0-9_-]*)>?(\s([0-9.]*)\s([0-9.]*)$)?")
def action(command, message):
global debug
global sc
global bots_channel
sc.rtm_send_message(bots_channel, message)
if debug: print ('Running command: ' + command)
os.system(command)
whitelist = {}
with open(os.path.join(base_dir, 'whitelist.txt')) as f:
for line in f:
(name, identifier) = line.split()
whitelist[identifier] = name
f = open(os.path.join(base_dir, 'token.txt'))
token = f.readline().rstrip()
f.close()
print ("Connecting using token " + token)
sc = SlackClient(token)
if sc.rtm_connect():
while True:
for event in sc .rtm_read():
if 'type' in event and event['type'] == 'message' and 'text' in event:
if ('user' in event and event['user'] in whitelist.keys()):
user = whitelist[event['user']]
elif ('subtype' in event and event['subtype'] == 'bot_message' and 'bot_id' in event and event['bot_id'] in whitelist.keys()):
user = whitelist[event['bot_id']]
else:
user = False
if user:
if debug: print ("Parsing message from " + user + ": '" + event['attachments'][0]['fallback'] + "'")
add_sound_match = add_sound_regex.match(event['attachments'][0]['fallback'])
fixed = play_fixed.search(event['attachments'][0]['fallback'])
cancelled = play_cancelled.search(event['attachments'][0]['fallback'])
failed = play_failed.search(event['attachments'][0]['fallback'])
broken = play_broken.search(event['attachments'][0]['fallback'])
building = play_building.search(event['attachments'][0]['fallback'])
if fixed:
message = user + ' FIXED '
sound_file = os.path.join(base_dir, sounds_dir, 'dai' + '.' + filetype)
command = player + ' ' + sound_file
action(command, message)
elif cancelled:
message = user + ' CANCELLED '
sound_file = os.path.join(base_dir, sounds_dir, 'noooo' + '.' + filetype)
command = player + ' ' + sound_file
action(command, message)
elif failed:
message = user + ' FAILED '
sound_file = os.path.join(base_dir, sounds_dir, 'heygirl' + '.' + filetype)
command = player + ' ' + sound_file
action(command, message)
elif broken:
message = user + ' BROKEN '
sound_file = os.path.join(base_dir, sounds_dir, 'horror' + '.' + filetype)
command = player + ' ' + sound_file
action(command, message)
elif building:
message = user + ' BUILDING '
sound_file = os.path.join(base_dir, sounds_dir, 'dangerzone' + '.' + filetype)
command = player + ' ' + sound_file
action(command, message)
elif add_sound_match:
message = user + ' adds sound ' + add_sound_match.group(1) + ' from youtube video ' + add_sound_match.group(2)
command = os.path.join(base_dir, 'yt-add-sound.sh') + ' ' + add_sound_match.group(1) + ' ' + add_sound_match.group(2)
if add_sound_match.group(3): command += add_sound_match.group(3)
action(command, message)
time.sleep(1);
else:
print ('Connection failed, invalid token?') | python |
"""
Produces Fig. A1 of Johnson & Weinberg (2020), a single axis plot showing the
abundance data of several dwarf galaxies taken from Kirby et al. (2010) in
comparison to a smooth and single-burst model simulated in VICE.
"""
import visuals # visuals.py -> matplotlib subroutines in this directory
import matplotlib.pyplot as plt
import vice
import sys
import warnings
warnings.filterwarnings("ignore")
_NAMES_ = {
"Scl": "Sculptor",
"LeoI": "Leo I",
"Sex": "Sextans",
"LeoII": "Leo II",
"CVnI": "Canes Venatici I",
"UMi": "Ursa Minor",
"Dra": "Draco"
}
_COLORS_ = {
"Scl": "crimson",
"LeoI": "grey",
"Sex": "lime",
"LeoII": "deepskyblue",
"CVnI": "darkviolet",
"UMi": "black",
"Dra": "gold"
}
_MARKERS_ = {
"Scl": "circle",
"LeoI": "square",
"Sex": "star",
"LeoII": "thin_diamond",
"CVnI": "pentagon",
"UMi": "hexagon2",
"Dra": "triangle_up"
}
_SIZES_ = {
"Scl": 30,
"LeoI": 10,
"Sex": 80,
"LeoII": 30,
"CVnI": 60,
"UMi": 50,
"Dra": 40
}
def setup_axis():
"""
Sets up the axis with the proper labels and ranges
Returns
=======
axis :: matplotlib subplot
The axis to plot the data on
"""
fig = plt.figure(figsize = (10, 7))
ax = fig.add_subplot(111, facecolor = "white")
ax.set_xlabel("[Fe/H]")
ax.set_ylabel("[Mg/Fe]")
ax.set_xlim([-3.2, -0.4])
ax.set_ylim([-0.9, 1.4])
return ax
def read_data(filename = "../../data/kirby2010processed.dat"):
"""
Import the data from the associated file.
Args
====
filename :: str [default :: ../data/kirby2010processed.dat]
The path to the data file
Returns
=======
An 2D-ascii list containing the data as it appears in the file
"""
data = 849 * [None]
with open(filename, 'r') as f:
f.readline() # header
for i in range(len(data)):
data[i] = f.readline().split()
for j in range(2, len(data[i])):
data[i][j] = float(data[i][j])
f.close()
return data
def plot_data(ax, data, dwarf):
"""
Plots an individual dwarf galaxy's abundance data on the subplot.
Parameters
==========
ax :: matplotlib subplot
The axis to plot the abundance data on
data :: 2D-list
The raw data itself
dwarf :: str
A key denoting which dwarf is being plotted. These appear in the first
column of the argument data.
"""
FeH_column = 12
MgFe_column = 14
fltrd = list(filter(lambda x: x[0] == dwarf, data))
kwargs = {
"c": visuals.colors()[_COLORS_[dwarf]],
"marker": visuals.markers()[_MARKERS_[dwarf]],
"linestyle": "None",
"label": _NAMES_[dwarf],
"s": _SIZES_[dwarf]
}
if dwarf == "LeoI": kwargs["zorder"] = 0
ax.scatter(
[row[FeH_column] for row in fltrd],
[row[MgFe_column] for row in fltrd],
**kwargs
)
def plot_representative_errorbar(ax, data, dwarf):
"""
Plots a representative error bar in the lower-left corner of the figure
Parameters
==========
ax :: matplotlib subplot
The axis object to put the errorbar on
data :: 2D-list
The raw data itself
dwarf :: str
The name of the dwarf to take the median errors from
"""
err_FeH_column = 13
err_MgFe_column = 15
fltrd = list(filter(lambda x: x[0] == dwarf, data))
ax.errorbar(-2.8, -0.4,
xerr = sorted([row[err_FeH_column] for row in fltrd])[len(fltrd) // 2],
yerr = sorted([row[err_MgFe_column] for row in fltrd])[len(fltrd) // 2],
ms = 0, color = visuals.colors()[_COLORS_[dwarf]])
def plot_vice_comparison(ax, name):
"""
Plots the [Mg/Fe]-[Fe/H] track of a given VICE model on the subplot.
Parameters
==========
ax :: matplotlib subplot
The axis to plot on
name :: str
The relative path to the VICE output
"""
out = vice.output(name)
ax.plot(out.history["[fe/h]"], out.history["[mg/fe]"],
c = visuals.colors()["black"],
linestyle = '--')
def main():
"""
Produces the figure and saves it as a PDF.
"""
plt.clf()
ax = setup_axis()
data = read_data()
for i in _NAMES_.keys():
plot_data(ax, data, i)
plot_vice_comparison(ax, "../../simulations/kirby2010_smooth_enh1")
plot_vice_comparison(ax, "../../simulations/kirby2010_smooth")
plot_vice_comparison(ax, "../../simulations/kirby2010_burst")
plot_representative_errorbar(ax, data, "UMi")
ax.legend(loc = visuals.mpl_loc()["upper left"], ncol = 1, frameon = False,
bbox_to_anchor = (1.02, 0.98), fontsize = 18)
plt.tight_layout()
plt.savefig(sys.argv[1])
plt.clf()
if __name__ == "__main__":
main()
| python |
# Copyright © 2021 Lynx-Userbot (LLC Company (WARNING))
# GPL-3.0 License From Github (General Public License)
# Ported From Cat Userbot For Lynx-Userbot By Alvin/LiuAlvinas.
# Based On Plugins
# Credits @Cat-Userbot by Alvin from Lord-Userbot
from userbot.events import register
from userbot import CMD_HELP, bot
from telethon.errors.rpcerrorlist import YouBlockedUserError
# Ported by KENZO @TeamSecret_Kz
@register(outgoing=True, pattern=r"^\.detect(?: |$)(.*)")
async def detect(event):
if event.fwd_from:
return
input_str = "".join(event.text.split(maxsplit=1)[1:])
reply_message = await event.get_reply_message()
if not event.reply_to_msg_id:
await event.edit("```Please reply to the user or type .detect (ID/Username) that you want to detect.```")
return
if input_str:
try:
lynxuser = int(input_str)
except ValueError:
try:
u = await event.client.get_entity(input_str)
except ValueError:
await edit.event("`Please Give ID/Username to Find History.`"
)
lynxuser = u.id
else:
lynxuser = reply_message.sender_id
chat = "@tgscanrobot"
event = await event.edit("`Currently Doing Account Detection...`")
event = await event.edit("__Connecting to server telegram.__")
event = await event.edit("__Connecting to server telegram..__")
event = await event.edit("__Connecting to server telegram...__")
event = await event.edit("__Connecting to server telegram.__")
event = await event.edit("__Connecting to server telegram..__")
event = await event.edit("__Connecting to server telegram...__")
event = await event.edit("__Connecting to server telegram.__")
event = await event.edit("__Connecting to server telegram..__")
event = await event.edit("__Connecting to server telegram...__")
event = await event.edit("__Connecting to server telegram.__")
event = await event.edit("__Connecting to server telegram..__")
event = await event.edit("__Connecting to server telegram...__")
async with bot.conversation(chat) as conv:
try:
await conv.send_message(f"{lynxuser}")
except YouBlockedUserError:
await steal.reply(
"```Please Unblock @tgscanrobot And Try Again.```"
)
response = await conv.get_response()
await event.client.send_read_acknowledge(conv.chat_id)
await event.edit(response.text)
def inline_mention(user):
full_name = user_full_name(user) or "No Name"
return f"[{full_name}](tg://user?id={user.id})"
def user_full_name(user):
names = [user.first_name, user.last_name]
names = [i for i in list(names) if i]
return " ".join(names)
CMD_HELP.update({
"detection": "✘ Pʟᴜɢɪɴ : Detection\
\n\n⚡𝘾𝙈𝘿⚡: `.detect` <Reply/Username/ID>\
\n↳ : Melihat Riwayat Group Yang Pernah/Sedang Dimasuki."
})
| python |
"""https://de.dariah.eu/tatom/topic_model_python.html"""
import os
import numpy as np # a conventional alias
import sklearn.feature_extraction.text as text
from sklearn import decomposition
class TM_NMF:
def __init__(self, all_documents, num_topics, num_top_words, min_df, max_df, isblock):
self.all_documents = all_documents
self.num_topics = num_topics
self.num_top_words = num_top_words
self.min_df = min_df
self.max_df = max_df
path = os.getcwd() + '/' #'/IEami/'
#self.file = open(path + 'Topic_huge.txt', 'w')
if isblock:
self.file = open(path + 'result_ami/' + 'Topic_modeling_nmf_block_' + str(num_topics) + '_topics.txt', 'w')
else:
self.file = open(path + 'result_ami/' + 'Topic_modeling_nmf_' + str(num_topics) + '_topic_scenario.txt', 'w')
def find_NMF_topics(self):
"""
:param num_topics:
:param num_top_words: a list of top words for each topic
:return:
"""
vectorizer = text.CountVectorizer(input='filename', stop_words='english', min_df= self.min_df, max_df= self.max_df)
dtm = vectorizer.fit_transform(self.all_documents).toarray()
vocab = np.array(vectorizer.get_feature_names())
clf = decomposition.NMF(n_components = self.num_topics, random_state=1)
# it shows for how many proability each corpus is related to a word in topic results
self.doctopic = clf.fit_transform(dtm)
self.topic_words = []
for topic in clf.components_:
word_idx = np.argsort(topic)[::-1][0:self.num_top_words]
self.topic_words.append([vocab[i] for i in word_idx])
return
def show_corpus_vs_topics(self):
# ***************************************
self.file.write('******************************************************\n')
# they normaloze doctopic w.r.t its rows
doctopic = (self.doctopic) / (np.sum(self.doctopic, axis=1, keepdims=True))
corpus_names = []
for fn in self.all_documents:
name = os.path.basename(fn)
# name = name.rstrip('0123456789')
corpus_names.append(name)
# turn this into an array so we can use NumPy functions
novel_names = np.asarray(corpus_names)
doctopic_orig = doctopic.copy()
# use method described in preprocessing section
doctopic_grouped = np.zeros((len(corpus_names), self.num_topics))
# self.file.write('\t\t\t\t\t')
# for i in range(self.num_topics):
# self.file.write( 'topic'+ str(i+1) + '\t')
self.file.write('\n')
for i, name in enumerate(sorted(set(novel_names))):
tempo = np.mean(doctopic[novel_names == name, :], axis=0)
doctopic_grouped[i, :] = tempo
#self.file.write(name + " " + str(doctopic_grouped[i, :]) + '\n')
self.file.write('\n')
self.file.write("meetings\t\t\t\t\t")
self.file.write("top topics\t\t\t\t\t\t")
self.file.write("probabilities for top topics\n")
corpus = corpus_names
for i in range(len(doctopic)):
top_topics = np.argsort(doctopic[i, :])[::-1][0:5]
top_topics_str = ' '.join(str(t) for t in top_topics)
top_probabilities = ' '.join(str(doctopic[i][t]) for t in top_topics)
self.file.write("{}: {} {}".format(corpus[i], top_topics_str, top_probabilities) + '\n')
self.file.flush()
return
def show_topic_words(self):
self.file.write('\n')
for t in range(len(self.topic_words)):
self.file.write("Topic {}: {}".format(t, ' '.join(self.topic_words[t][:self.num_top_words]) + '\n'))
self.file.flush()
return
| python |
import RoothPath
import os
import re
import yaml
import json
if __name__ == '__main__':
yaml_dic = {}
with open(os.path.join(os.path.join(RoothPath.get_root(), 'Benchmarks'), 'den312d.map')) as ascii_map:
ascii_map.readline()
h = int(re.findall(r'\d+', ascii_map.readline())[0])
w = int(re.findall(r'\d+', ascii_map.readline())[0])
yaml_dic['agents'] = [{'start': [48, 10], 'name': 'agent0'}]
yaml_dic['map'] = {'dimensions': [w, h], 'obstacles': [], 'non_task_endpoints': [[48, 10]],
'start_locations': [[50, 10]], 'goal_locations': [[54, 10]]}
yaml_dic['n_tasks'] = 1
yaml_dic['task_freq'] = 1
yaml_dic['n_delays_per_agent'] = 10
ascii_map.readline()
for i in range(h - 1, -1, -1):
line = ascii_map.readline()
print(line)
for j in range(w):
if line[j] == '@' or line[j] == 'T':
yaml_dic['map']['obstacles'].append((j, i))
with open(os.path.join(RoothPath.get_root(), 'config.json'), 'r') as json_file:
config = json.load(json_file)
with open(os.path.join(os.path.join(RoothPath.get_root(), config['input_path']), 'dragon_age_map.yaml'), 'w') as param_file:
yaml.dump(yaml_dic, param_file)
| python |
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from six.moves import StringIO
from tower_cli import models, resources, exceptions as exc
from tower_cli.api import client
from tower_cli.utils import debug
from tower_cli.constants import CUR_API_VERSION
from tests.compat import unittest, mock
class ResourceMetaTests(unittest.TestCase):
"""A set of tests to establish that the ResourceMeta metaclass works
in the way we expect.
"""
def test_commands(self):
"""Establish that commands are appropriately classified within
the resource, and that the stock commands are not present on a
BaseResource subclass.
"""
# Create the resource.
class MyResource(models.BaseResource):
endpoint = '/bogus/'
@resources.command
def foo(self):
pass
@resources.command
def bar(self):
pass
def boring_method(self):
pass
# Establish that the commands are present on the resource where
# we expect, and that the defined methods are still plain methods.
#
# Note: We can use something like types.FunctionType or
# types.UnboundMethodType to test against directly, but using a
# regular method is preferable because of differences between
# the type internals in Python 2 vs. Python 3.
#
# By just getting the desirable control type from another method
# on the resource, we are ensuring that it "just matches" regardless
# of which version of Python is in use.
self.assertIsInstance(MyResource.foo, type(MyResource.boring_method))
self.assertIsInstance(MyResource.bar, type(MyResource.boring_method))
self.assertEqual(set(MyResource.commands), set(['foo', 'bar', 'list', 'delete', 'get']))
def test_inherited_commands(self):
"""Establish that the stock commands are automatically present
on classes inherited from Resource.
"""
# Create the resource.
class MyResource(models.Resource):
endpoint = '/bogus/'
# Establish it has the commands we expect.
self.assertEqual(set(MyResource.commands),
set(['create', 'copy', 'modify', 'list', 'get',
'delete']))
def test_subclassed_commands(self):
"""Establish that commands overridden in subclasses retain their
superclass implementation options.
"""
# Create the subclass resource, overriding a superclass command.
class MyResource(models.Resource):
endpoint = '/bogus/'
@resources.command
def list(self, **kwargs):
return super(MyResource, self).list(**kwargs)
# Establish that it has one of the options added to the
# superclass list command.
self.assertEqual(MyResource.list.__click_params__, models.Resource.list.__click_params__)
def test_multiple_inheritance(self):
"""
Establish that click decoration from all parent class chains are
preserved in a subclass.
"""
class MyMixin(models.Resource):
endpoint = '/bogus/'
def list(self, **kwargs):
return super(MyMixin, self).list(**kwargs)
class MyResource(MyMixin, models.Resource):
endpoint = '/bogus/'
def list(self, **kwargs):
return super(MyResource, self).list(**kwargs)
self.assertTrue(hasattr(MyResource.list, '__click_params__'))
self.assertEqual(MyResource.list.__click_params__, models.Resource.list.__click_params__)
def test_no_duplicate_options_from_inheritance(self):
"""
Test that metaclass does not duplicate options from multiple parents
"""
class MyMixin1(models.Resource):
endpoint = '/bogus/'
class MyMixin2(models.Resource):
endpoint = '/boguser/'
class MyResource(MyMixin1, MyMixin2):
endpoint = '/boguser/'
def list(self, **kwargs):
return super(MyResource, self).list(**kwargs)
self.assertTrue(hasattr(MyResource.list, '__click_params__'))
self.assertEqual(MyResource.list.__click_params__, models.Resource.list.__click_params__)
def test_fields(self):
"""Establish that fields are appropriately classified within
the resource.
"""
# Create the resource.
class MyResource(models.Resource):
endpoint = '/bogus/'
foo = models.Field(unique=True)
bar = models.Field()
# Establish that our fields lists are the length we expect.
self.assertEqual(len(MyResource.fields), 2)
self.assertEqual(len(MyResource.unique_fields), 1)
# Establish that the fields are present in fields.
self.assertEqual(MyResource.fields[0].name, 'foo')
self.assertEqual(MyResource.fields[1].name, 'bar')
self.assertEqual(MyResource.unique_fields, set(['foo']))
def test_error_no_endpoint(self):
"""Establish that Resource subclasses are required to have
an endpoint, and attempting to create one that lacks an endpoint
raises TypeError.
"""
with self.assertRaises(TypeError):
class MyResource(models.Resource):
pass
def test_endpoint_normalization(self):
"""Establish that the endpoints have leading and trailing slashes
added if they are not present on a resource.
"""
class MyResource(models.Resource):
endpoint = 'foo'
self.assertEqual(MyResource.endpoint, '/foo/')
def test_disabled_property(self):
"""Establish that disabled_methods of derived classes disable specified
attributes derived from base classes.
"""
class MyRes(models.Resource):
endpoint = 'foo'
foobar = 'baz'
class MyDerivedRes(MyRes):
endpoint = 'bar'
disabled_methods = set(['foobar'])
res = MyDerivedRes()
with self.assertRaises(AttributeError):
getattr(res, 'foobar')
res.foobar = 'hey'
self.assertEqual(res.foobar, 'hey')
del res.foobar
with self.assertRaises(AttributeError):
getattr(res, 'foobar')
class ResourceTests(unittest.TestCase):
"""A set of tests to establish that the Resource class works in the
way that we expect.
"""
def setUp(self):
# Create a resource class that can be used across this particular
# suite.
class FooResource(models.Resource):
endpoint = '/foo/'
name = models.Field(unique=True)
description = models.Field(required=False)
self.res = FooResource()
def test_get(self):
"""Establish that the Resource class' `get` method works in the
way that we expect.
"""
with client.test_mode as t:
t.register_json('/foo/42/', {'id': 42, 'description': 'bar',
'name': 'foo'})
result = self.res.get(42)
self.assertEqual(result['id'], 42)
self.assertEqual(result['name'], 'foo')
def test_list_no_kwargs(self):
"""Establish that the Resource class' `list` method correctly
requests the resource and parses out a list of results.
"""
with client.test_mode as t:
t.register_json('/foo/', {'count': 2, 'results': [
{'id': 1, 'name': 'foo', 'description': 'bar'},
{'id': 2, 'name': 'spam', 'description': 'eggs'},
], 'next': None, 'previous': None})
result = self.res.list()
self.assertEqual(t.requests[0].url,
'https://20.12.4.21/api/%s/foo/' % CUR_API_VERSION)
self.assertEqual(result['count'], 2)
self.assertEqual(result['results'][0]['id'], 1)
def test_list_all_pages(self):
"""Establish that the Resource class' `list` method correctly
accepts the --all-pages flag and checks follow-up pages.
"""
with client.test_mode as t:
# Register the first, second, and third page.
t.register_json('/foo/', {'count': 3, 'results': [
{'id': 1, 'name': 'foo', 'description': 'bar'},
], 'next': '/foo/?page=2', 'previous': None})
t.register_json('/foo/?page=2', {'count': 3, 'results': [
{'id': 2, 'name': 'spam', 'description': 'eggs'},
], 'next': '/foo/?page=3', 'previous': None})
t.register_json('/foo/?page=3', {'count': 3, 'results': [
{'id': 3, 'name': 'bacon', 'description': 'cheese'},
], 'next': None, 'previous': None})
# Get the list
result = self.res.list(all_pages=True)
# Assert that there are three results, and three requests.
self.assertEqual(len(t.requests), 3)
self.assertEqual(len(result['results']), 3)
def test_list_with_page_1_special_case(self):
"""Establish that the list function works even if the server gives
/foo/ as the relative link for page 1.
"""
with client.test_mode as t:
# Register the 2nd page in order to test this.
t.register_json('/foo/?page=2', {'count': 2, 'results': [
{'id': 2, 'name': 'spam', 'description': 'eggs'},
], 'next': None, 'previous': '/foo/'})
# Get the list
result = self.res.list(page=2)
# Check that the function knows that /foo/ is page 1
self.assertEqual(result['previous'], 1)
def test_list_custom_kwargs(self):
"""Establish that if we pass custom keyword arguments to list, that
they are included in the final request.
"""
with client.test_mode as t:
t.register_json('/foo/?bar=baz', {'count': 0, 'results': [],
'next': None, 'previous': None})
self.res.list(query=[('bar', 'baz')])
self.assertTrue(t.requests[0].url.endswith('bar=baz'))
def test_get_unexpected_zero_results(self):
"""Establish that if a read method gets 0 results when it should have
gotten one or more, that it raises NotFound.
"""
with client.test_mode as t:
t.register_json('/foo/?name=spam', {'count': 0, 'results': []})
with self.assertRaises(exc.NotFound):
self.res.get(name='spam')
def test_get_no_debug_header(self):
"""Establish that if get is called with include_debug_header=False,
no debug header is issued.
"""
with mock.patch.object(type(self.res), 'read') as read:
with mock.patch.object(debug, 'log') as dlog:
read.return_value = {'results': [True]}
result = self.res.get(42, include_debug_header=False)
self.assertEqual(dlog.call_count, 0)
self.assertTrue(result)
def test_get_unexpected_multiple_results(self):
"""Establish that if a read method gets more than one result when
it should have gotten one and exactly one, that it raises
MultipleResults.
"""
# Register the response to the request URL.
# Note that this response should represent bad data, since name is
# generally unique within Tower. This doesn't matter for the purpose
# of this test; what's important is that if we expected one and exactly
# one result and we get two or more, that we complain in an expected
# (and later, handled) way.
with client.test_mode as t:
t.register_json('/foo/?name=spam', {'count': 2, 'results': [
{'id': 1, 'name': 'spam'},
{'id': 2, 'name': 'spam'},
], 'next': None, 'previous': None})
with self.assertRaises(exc.MultipleResults):
self.res.get(name='spam')
def test_list_with_none_kwargs(self):
"""Establish that if `list` is called with keyword arguments with
None values, that these are ignored.
This is to ensure that click's eagerness to send None values doesn't
cause problems.
"""
# Register the request and make the call.
with client.test_mode as t:
t.register_json('/foo/?name=foo', {'count': 1, 'results': [
{'id': 1, 'name': 'foo', 'description': 'bar'},
], 'next': None, 'previous': None})
self.res.list(name='foo', description=None)
self.assertEqual(len(t.requests), 1)
# Ensure that there are no other query param arguments other
# than `?name=foo` in the request URL.
self.assertNotIn('&', t.requests[0].url)
self.assertTrue(t.requests[0].url.endswith('?name=foo'))
def test_list_with_pagination(self):
"""Establish that the `list` method returns pages as integers
if it is given pages at all.
"""
with client.test_mode as t:
t.register_json('/foo/', {'count': 10, 'results': [
{'id': 1, 'name': 'bar'},
], 'next': '/api/%s/foo/?page=2' % CUR_API_VERSION, 'previous': None})
result = self.res.list()
self.assertEqual(result['next'], 2)
def test_reading_with_file(self):
"""Establish that if we get a file-like object, that it is
appropriately read.
"""
# Note: This is primarily for a case of longer input that belongs
# in files (such as SSH RSA/DSA private keys), but in this case we're
# using something trivial; we need only provide a proof of concept
# to test against.
sio = StringIO('bar')
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 0, 'results': [],
'next': None, 'previous': None})
self.res.list(name=sio)
self.assertTrue(t.requests[0].url.endswith('?name=bar'))
def test_create(self):
"""Establish that a standard create call works in the way that
we expect.
"""
with client.test_mode as t:
# `create` will attempt to see if the record already exists;
# mock this to state that it does not.
t.register_json('/foo/?name=bar', {'count': 0, 'results': [],
'next': None, 'previous': None})
t.register_json('/foo/', {'changed': True, 'id': 42},
method='POST')
self.res.create(name='bar')
self.assertEqual(t.requests[0].method, 'GET')
self.assertEqual(t.requests[1].method, 'POST')
def test_create_already_existing(self):
"""Establish that if we attempt to create a record that already exists,
that no action ends up being taken.
"""
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 1, 'results': [
{'id': 42, 'name': 'bar'},
], 'next': None, 'previous': None})
result = self.res.create(name='bar')
self.assertEqual(len(t.requests), 1)
self.assertFalse(result['changed'])
def test_create_missing_required_fields(self):
"""Establish that if we attempt to create a record and don't specify
all required fields, that we raise BadRequest.
"""
# Create a resource with a required field that isn't the name
# field.
class BarResource(models.Resource):
endpoint = '/bar/'
name = models.Field(unique=True)
required = models.Field()
res = BarResource()
# Attempt to write the resource and prove that it fails.
with client.test_mode as t:
t.register_json('/bar/?name=foo', {'count': 0, 'results': [],
'next': None, 'previous': None})
with self.assertRaises(exc.BadRequest):
res.create(name='foo')
def test_modify(self):
"""Establish that the modify method works in the way we expect,
given a normal circumstance.
"""
with client.test_mode as t:
t.register_json('/foo/42/', {'id': 42, 'name': 'bar',
'description': 'baz'})
t.register_json('/foo/42/',
{'changed': True, 'id': 42}, method='PATCH')
result = self.res.modify(42, description='spam')
self.assertTrue(result['changed'])
self.assertEqual(t.requests[1].body, '{"description": "spam"}')
def test_modify_no_changes(self):
"""Establish that the modify method does not actually attempt
a modification if there are no changes.
"""
with client.test_mode as t:
t.register_json('/foo/42/', {'id': 42, 'name': 'bar',
'description': 'baz'})
result = self.res.modify(42, description='baz')
self.assertFalse(result['changed'])
self.assertEqual(len(t.requests), 1)
def test_modify_ignore_kwargs_none(self):
"""Establish that we ignore keyword arguments set to None when
performing writes.
"""
with client.test_mode as t:
t.register_json('/foo/42/', {'id': 42, 'name': 'bar',
'description': 'baz'})
result = self.res.modify(42, name=None, description='baz')
self.assertFalse(result['changed'])
self.assertEqual(len(t.requests), 1)
self.assertNotIn('name', t.requests[0].url)
def test_write_file_like_object(self):
"""Establish that our write method, if it gets a file-like object,
correctly reads it and uses the file's value as what it sends.
"""
sio = StringIO('bar')
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 1, 'results': [
{'id': 42, 'name': 'bar', 'description': 'baz'},
], 'next': None, 'previous': None})
result = self.res.modify(name=sio, description='baz')
self.assertFalse(result['changed'])
self.assertIn('name=bar', t.requests[0].url)
def test_write_with_null_field(self):
"""Establish that a resource with 'null' field is written."""
with client.test_mode as t:
t.register_json('/foo/42/', {'id': 42, 'name': 'bar',
'description': 'baz'}, method='GET')
t.register_json('/foo/42/', {'name': 'bar', 'id': 42,
'inventory': 'null'}, method='PATCH')
self.res.write(42, inventory='null')
self.assertEqual(json.loads(t.requests[1].body)['inventory'], None)
def test_delete_with_pk(self):
"""Establish that calling `delete` and providing a primary key
works in the way that we expect.
"""
with client.test_mode as t:
t.register('/foo/42/', '', method='DELETE')
result = self.res.delete(42)
self.assertTrue(result['changed'])
def test_delete_without_pk(self):
"""Establish that calling `delete` with keyword arguments works
in the way that we expect.
"""
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 1, 'results': [
{'id': 42, 'name': 'bar', 'description': 'baz'},
], 'next': None, 'previous': None})
t.register('/foo/42/', '', method='DELETE')
result = self.res.delete(name='bar')
self.assertEqual(len(t.requests), 2)
self.assertTrue(t.requests[1].url.endswith('/foo/42/'))
self.assertTrue(result['changed'])
def test_delete_with_pk_already_missing(self):
"""Establish that calling `delete` on a record that does not exist
returns back an unchanged response.
"""
with client.test_mode as t:
t.register_json('/foo/42/', '', method='DELETE', status_code=404)
result = self.res.delete(42)
self.assertFalse(result['changed'])
def test_delete_with_pk_already_missing_exc(self):
"""Establish that calling `delete` on a record that does not
exist raises an exception if requested.
"""
with client.test_mode as t:
t.register_json('/foo/42/', '', method='DELETE', status_code=404)
with self.assertRaises(exc.NotFound):
self.res.delete(42, fail_on_missing=True)
def test_delete_without_pk_already_missing(self):
"""Establish that calling `delete` on a record without a primary
key correctly sends back an unchanged response.
"""
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 0, 'results': []})
result = self.res.delete(name='bar')
self.assertFalse(result['changed'])
def test_delete_without_pk_already_missing_exc(self):
"""Establish that calling `delete` on a record without a primary
key correctly sends back an unchanged response.
"""
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 0, 'results': []})
with self.assertRaises(exc.NotFound):
self.res.delete(name='bar', fail_on_missing=True)
def test_assoc_already_present(self):
"""Establish that the _assoc method returns an unchanged status
message if it attempts to associate two records that are already
associated.
"""
with client.test_mode as t:
t.register_json('/foo/42/bar/?id=84', {'count': 1, 'results': [
{'id': 84},
], 'next': None, 'previous': None})
result = self.res._assoc('bar', 42, 84)
self.assertFalse(result['changed'])
def test_assoc_not_already_present(self):
"""Establish that the _assoc method returns an changed status
message and associates objects if appropriate.
"""
with client.test_mode as t:
t.register_json('/foo/42/bar/?id=84', {'count': 0, 'results': []})
t.register_json('/foo/42/bar/', {}, method='POST')
result = self.res._assoc('bar', 42, 84)
self.assertEqual(json.loads(t.requests[1].body),
{'associate': True, 'id': 84})
self.assertTrue(result['changed'])
def test_disassoc_not_already_present(self):
"""Establish that the _disassoc method returns an unchanged status
message if it attempts to associate two records that are not
associated.
"""
with client.test_mode as t:
t.register_json('/foo/42/bar/?id=84', {'count': 0, 'results': []})
result = self.res._disassoc('bar', 42, 84)
self.assertFalse(result['changed'])
def test_disassoc_already_present(self):
"""Establish that the _assoc method returns an changed status
message and associates objects if appropriate.
"""
with client.test_mode as t:
t.register_json('/foo/42/bar/?id=84', {'count': 1, 'results': [
{'id': 84},
], 'next': None, 'previous': None})
t.register_json('/foo/42/bar/', {}, method='POST')
result = self.res._disassoc('bar', 42, 84)
self.assertEqual(json.loads(t.requests[1].body),
{'disassociate': True, 'id': 84})
self.assertTrue(result['changed'])
def test_lookup_with_unique_field_not_present(self):
"""Establish that a if _lookup is invoked without any unique
field specified, that BadRequest is raised.
"""
with client.test_mode:
with self.assertRaises(exc.BadRequest):
self.res._lookup(description='abcd')
def test_lookup_errant_found(self):
"""Establish that if _lookup is invoked and finds a record when it
should not, that an appropriate exception is raised.
"""
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 1, 'results': [
{'id': 42, 'name': 'bar'},
], 'next': None, 'previous': None})
with self.assertRaises(exc.Found):
self.res._lookup(name='bar', fail_on_found=True)
def test_copy_with_multiples(self):
"""
A resource with fields marked `multiple` has those fields copied fully
"""
class BarResource(models.Resource):
endpoint = '/bar/'
name = models.Field(unique=True)
variables = models.Field(multiple=True)
res = BarResource()
with mock.patch.object(res, 'read') as read_mock:
read_mock.return_value = {
"count": 1,
"results": [
{
"id": 42,
"name": "foobarin",
"variables": "foobar: barfood"
}
]
}
with mock.patch.object(res, 'write') as write_mock:
res.copy()
name, args, kwargs = write_mock.mock_calls[0]
self.assertEqual(kwargs['name'][:len("foobarin")], "foobarin")
self.assertEqual(kwargs['variables'], ('foobar: barfood',))
self.assertNotIn('id', kwargs)
class MonitorableResourcesTests(unittest.TestCase):
"""Estblaish that the MonitorableResource abstract class works in the
way that we expect.
"""
def test_status_not_implemented(self):
"""Establish that the abstract MonitorableResource's status
method raises NotImplementedError.
"""
with self.assertRaises(NotImplementedError):
models.MonitorableResource().status(None)
class SurveyResourceTests(unittest.TestCase):
"""Test methods specific to survey models."""
def setUp(self):
self.res = models.SurveyResource()
self.res.endpoint = '/job_templates/'
def test_survey_no_op(self):
with mock.patch.object(models.base.BaseResource, 'write') as w:
self.res.modify(name='foobar')
w.assert_called_once_with(
create_on_missing=False, force_on_exists=True,
name='foobar', pk=None)
def test_survey_create(self):
with mock.patch.object(models.base.BaseResource, 'write') as w:
w.return_value = {'id': 42, 'survey_enabled': True}
survey_data = {'foobar': 'foo'}
with client.test_mode as t:
t.register_json(
'/job_templates/42/survey_spec/', {},
method='POST'
)
self.res.modify(survey_spec=survey_data, verbose=True)
self.assertEqual(t.requests[0].body, json.dumps(survey_data))
def test_survey_delete(self):
with mock.patch.object(models.base.BaseResource, 'write') as w:
w.return_value = {'id': 42, 'survey_enabled': True}
with client.test_mode as t:
t.register_json(
'/job_templates/42/survey_spec/', {},
method='DELETE'
)
self.res.modify(survey_spec={}, verbose=True)
self.assertEqual(t.requests[0].method, 'DELETE')
| python |
# -*- coding: utf-8 -*-
import json
from typing import Iterable
from pyrus_nn.rust.pyrus_nn import PyrusSequential
from pyrus_nn import layers
class Sequential:
# This is the actual Rust implementation with Python interface
_model: PyrusSequential
def __init__(self, lr: float, n_epochs: int, batch_size: int = 32, cost_func: str = "mse"):
"""
Initialize the model.
Parameters
----------
lr: float
The learning rate of the model
n_epochs: int
How many epochs shall it do for training
"""
self._model = PyrusSequential(lr, n_epochs, batch_size, cost_func)
self.lr = lr
self.n_epochs = n_epochs
self.batch_size = batch_size
self.cost_func = cost_func
def fit(self, X: Iterable[Iterable[float]], y: Iterable[Iterable[float]]):
"""
Fit the model using X and y. Each of which would be a 2d iterable.
For example::
X = [[1, 2, 3], [4, 5, 6]]
y = [[1], [2]]
Parameters
----------
X: Iterable
2d iterable
y: Iterable
2d iterable
Returns
-------
self
"""
self._model.fit(X, y)
return self
def predict(self, X: Iterable[Iterable[float]]) -> Iterable[Iterable[float]]:
"""
Apply the model to input data
Parameters
----------
X: Iterable
2d iterable
Returns
-------
Iterable[Iterable[float]]
"""
return self._model.predict(X)
def add(self, layer: layers.Layer):
"""
Add a layer to this network
Parameters
----------
layer: pyrus_nn.layers.Layer
A layer compatible with the previous layer
Returns
-------
None
"""
if isinstance(layer, layers.Dense):
self._model.add_dense(layer.n_input, layer.n_output, layer.activation)
def to_dict(self):
"""
Serialize this network as a dictionary of primitives suitable
for further serialization into json, yaml, etc.
Returns
-------
dict
"""
return dict(
params=self.get_params(),
model=json.loads(self._model.to_json())
)
@classmethod
def from_dict(cls, conf: dict):
"""
Re-construct the model from a serialized version of itself
Parameters
----------
conf: dict
Configuration resulting from a previous call to ``.to_dict()``
Returns
-------
Sequential
"""
model = cls(**conf['params'])
model._model = PyrusSequential.from_json(json.dumps(conf['model']))
return model
def get_params(self, deep=False):
return dict(
lr=self.lr,
n_epochs=self.n_epochs
)
def __eq__(self, other: "Sequential"):
return other.to_dict() == self.to_dict()
| python |
from django import template
from cart.utils import get_or_set_order_session
register = template.Library()
@register.filter
def cart_item_count(request):
order = get_or_set_order_session(request)
count = order.items.count()
return count
| python |
from visions.utils.monkeypatches import imghdr_patch, pathlib_patch
__all__ = [
"imghdr_patch",
"pathlib_patch",
]
| python |
from pprint import pprint
from ayesaac.services.common import QueueManager
from ayesaac.utils.logger import get_logger
logger = get_logger(__file__)
class Interpreter(object):
"""
The Interpreter class purpose is a simple comparison with what the vision part find
and what the user asked for.
(Which object was found and not found)
"""
def __init__(self):
self.queue_manager = QueueManager(
[self.__class__.__name__, "NaturalLanguageGenerator"]
)
self.memory = {}
logger.info(f"{self.__class__.__name__} ready")
def filter_objects(self, body):
return body["objects"]
def filter_texts(self, body):
return body["texts"]
def callback(self, body, **_):
data = None
key = ""
if "objects" in body:
key = "objects"
data = self.filter_objects(body)
body["objects"] = data
elif "texts" in body:
key = "texts"
data = self.filter_texts(body)
body["texts"] = data
if body["wait_package"] == 1:
body["path_done"].append(self.__class__.__name__)
del body["vision_path"]
# pprint(body)
# TODO: uncomment if you wanna test the NLG, it could be text, objects,
# objects + colour, objects + lateral position
self.queue_manager.publish("NaturalLanguageGenerator", body)
else:
if body["intern_token"] not in self.memory:
self.memory[body["intern_token"]] = {key: data}
elif (
body["intern_token"] in self.memory
and body["wait_package"] < len(self.memory[body["intern_token"]]) - 1
):
self.memory[body["intern_token"]][key] = data
else:
for key in self.memory[body["intern_token"]]:
body[key] = self.memory[body["intern_token"]][key]
del self.memory[body["intern_token"]][key]
# pprint(body)
# TODO: uncomment if you wanna test the NLG
self.queue_manager.publish("NaturalLanguageGenerator", body)
def run(self):
self.queue_manager.start_consuming(self.__class__.__name__, self.callback)
def main():
interpreter = Interpreter()
interpreter.run()
if __name__ == "__main__":
main()
| python |
from core.models import MedicalCare, Pets, Tutor, Vet
from django.contrib import admin
admin.site.register(Vet)
class MedicalCareAdmin(admin.ModelAdmin):
list_display = ('id', 'date', 'time', 'pet_name', 'procedure', 'report')
admin.site.register(MedicalCare, MedicalCareAdmin)
class PetsAdmin(admin.ModelAdmin):
list_display = ('id', 'pet_name', 'species', 'breed',
'gender', 'date_of_birth', 'castrated', 'weight')
admin.site.register(Pets, PetsAdmin)
class TutorAdmin(admin.ModelAdmin):
list_display = ('tutor_name', 'cpf', 'phone', 'email',
'street', 'number', 'district', 'state', 'cep')
admin.site.register(Tutor, TutorAdmin)
| python |
#Test Array Implementation
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyds import array
#test array
print("01 : ======= Creating Array of size 5 =======")
arr = array(5)
print("02: ======= Traversing Array =======")
arr.print()
print("03: ======= Insert 5 Items =======")
arr.insert(0,1)
arr.insert(1,2)
arr.insert(2,3)
arr.insert(3,4)
arr.insert(4,5)
print("======= Traversing Array =======")
arr.print()
print("04: ======= Exceeding Items =======")
try:
arr.insert(5,6)
except Exception as err:
print(err)
print("05: ======= Delete Item at index 0 =======")
print(arr.delete(0))
print("06: ======= Re-Traversing Array =======")
arr.print()
| python |
import json
import time
import logging
import requests
import functools
class WechatAppPush:
"""
WechatAppPush decorator
Push the msg of the decorated function
Example 1:
@WechatAppPush(corpid, corpsecret, agentid)
def func():
return 'xxx'
Example 2:
def func():
return 'xxx'
WechatAppPush(corpid, corpsecret, agentid)(func())()
Example 3:
WechatAppPush(corpid, corpsecret, agentid)('xxx')()
Then wechat app will push xxx
:param corpid: wechat app corpid
:param corpsecret: wechat app corpsecret
:param agentid: wechat app agentid
:param touser: wechat app @ touser (optional, default: @all )
:param message: wechat push message (optional, default: Wechat push message tset)
:return func:
docs: https://developer.work.weixin.qq.com/document/path/90236
"""
def __init__(self, corpid: str, corpsecret: str, agentid: str, touser: str = '@all', message: str = 'Wechat push message tset') -> None:
self._corpid = corpid
self._corpsecret = corpsecret
self._agentid = agentid
self._touser = touser
self._message = message
def __call__(self, func=None):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# before func
try:
self._message = func(*args, **kwargs)
except:
if func != None:
self._message = func
# after func
response = self.send_text()
if response != "ok":
print(f'Wechat push error: {response}')
return self._message
return wrapper
def get_access_token(self) -> str:
send_url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
send_values = {
"corpid": self._corpid,
"corpsecret": self._corpsecret,
}
response = requests.post(send_url, params=send_values).json()
return response["access_token"]
def send_text(self) -> str:
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": self._touser,
"msgtype": "text",
"agentid": self._agentid,
"text": {"content": self._message},
"safe": "0",
}
send_msges = bytes(json.dumps(send_values), "utf-8")
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
class Debug:
"""
Debug decorator
:param level:
:param func_time:
:param func_info:
"""
def __init__(self, level=logging.DEBUG, func_time=True, func_info=True) -> None:
self._func_timer = func_time
self._level = level
self._func_info = func_info
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
logging.basicConfig(level=self._level, format=LOG_FORMAT, datefmt=DATE_FORMAT)
def __call__(self, func):
@self.func_time
@self.func_info
@functools.wraps(func)
def wrapper(*args, **kwargs):
value = func(*args, **kwargs)
return value
return wrapper
def func_time(self, func):
"""Print the runtime of the decorated function"""
if self._func_timer != True:
return func
@functools.wraps(func)
def wrapper(*args, **kwargs):
start_time = time.perf_counter()
# before func
value = func(*args, **kwargs)
# after func
end_time = time.perf_counter()
run_time = end_time - start_time
logging.log(msg=f"Finished {func.__name__!r} in {run_time:.4f} secs", level=self._level)
return value
return wrapper
def func_info(self, func):
"""Print the function signature and return value"""
if self._func_info != True:
return func
@functools.wraps(func)
def wrapper(*args, **kwargs):
args_repr = [repr(a) for a in args]
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()]
signature = ", ".join(args_repr + kwargs_repr)
logging.log(msg=f"Calling {func.__name__}({signature})", level=self._level)
# before func
value = func(*args, **kwargs)
# after func
logging.log(msg=f"{func.__name__!r} returned {value!r}", level=self._level)
return value
return wrapper
| python |
N = int(input())
print(f'{((N + 1) // 2 / N):.10f}') | python |
try:
from datetime import datetime
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.linear_model import BayesianRidge
from sklearn import preprocessing
except:
pass
from environmental_data_modules import PostProcessor, AurnModule, DateRangeProcessor
class AurnPostProcessor(PostProcessor, AurnModule, DateRangeProcessor):
"""
Class used for post-processing data that has been extracted from AURN server.
"""
# Define 'absolute' constants
BASE_FILE_OUT = '{}/aurn_processed_daily_{}.csv'
# Define default constants
DEFAULT_OUT_DIR = 'Aurn_processed_data'
DEFAULT_EMEP_FILENAME = None
# Calculation defaults
DEFAULT_MIN_YEARS_REFERENCE = 1
DEFAULT_MIN_YEARS = 1
DEFAULT_IMPUTER_RANDOM_STATE = 0
DEFAULT_IMPUTER_ADD_INDICATOR = False
DEFAULT_IMPUTER_INITIAL_STRATEGY = 'mean'
DEFAULT_IMPUTER_MAX_ITER = 100
try:
DEFAULT_IMPUTER_ESTIMATOR = BayesianRidge()
except:
DEFAULT_IMPUTER_ESTIMATOR = None
DEFAULT_TRANSFORMER_OUTPUT_DISTRIBUTION = 'normal'
DEFAULT_TRANSFORMER_METHOD = 'box-cox'
DEFAULT_TRANSFORMER_STANDARDIZE = False
def __init__(self, metadata_filename=AurnModule.DEFAULT_METADATA_FILE, metadata_url=AurnModule.DEFAULT_METADATA_URL,
out_dir=DEFAULT_OUT_DIR, verbose=PostProcessor.DEFAULT_VERBOSE):
""" Initialise instance of the AurnPostProcessor class.
Initialises the private class variables
Args:
metadata_filename: filename of the metadata used in Aurn data extraction
metadata_url: alternative source of AURN metadata, if metadata_filename is None
out_dir: (string) directory to be used for all outputs
verbose: (integer) level of verbosity in output.
Returns:
Initialised instance of AurnPostProcessor
"""
super(AurnPostProcessor, self).__init__(out_dir, verbose)
AurnModule.__init__(self, metadata_filename=metadata_filename, metadata_url=metadata_url)
DateRangeProcessor.__init__(self)
self._emep_data = None
self.min_years_reference = AurnPostProcessor.DEFAULT_MIN_YEARS_REFERENCE
self.min_years = AurnPostProcessor.DEFAULT_MIN_YEARS
self.impute_data = False
self._imputer = None
self._transformer = None
@PostProcessor.transformer.setter
def transformer(self, transformer):
if transformer is None or type(transformer).__name__ in ['QuantileTransformer','PowerTransformer']:
self._transformer = transformer
else:
raise ValueError('Error setting transformer, incorrect object type: {}'.format(type(transformer).__name__))
@PostProcessor.station_data.setter
def station_data(self, raw_data):
if self.verbose > 0:
print('Loading stations data metadata')
try:
station_data = raw_data.drop_duplicates()
station_data = station_data.set_index('site_id')
except Exception as err:
raise ValueError('Unable to get correct site data from Metadata input file. Check metadata file content.')
self._station_data = station_data
def impute_method_setup(self, random_state=DEFAULT_IMPUTER_RANDOM_STATE, add_indicator=DEFAULT_IMPUTER_ADD_INDICATOR,
initial_strategy=DEFAULT_IMPUTER_INITIAL_STRATEGY,
max_iter=DEFAULT_IMPUTER_MAX_ITER, estimator=DEFAULT_IMPUTER_ESTIMATOR,
output_distribution=DEFAULT_TRANSFORMER_OUTPUT_DISTRIBUTION,
transformer_method=DEFAULT_TRANSFORMER_METHOD, transformer_standardize=DEFAULT_TRANSFORMER_STANDARDIZE):
""" Initialises the IterativeImputer, QuantileTransformer and PowerTransformer methods required
if missing data is to be imputed.
Parameters are passed to the sklearn routines. Where this is being done it is noted below.
For further documentation on how these functions work, and what the parameters denote,
please refer to the sklearn documentation.
IterativeImputer: https://scikit-learn.org/stable/modules/generated/sklearn.impute.IterativeImputer.html
QuantileTransformer: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.QuantileTransformer.html
PowerTransformer: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html
Args:
random_state: (int) (IterativeImputer & QuantileTransformer) seed for pseudo random number generator
add_indicator: (boolean) (IterativeImputer) if True adds a `MissingIndicator` transform to the stack
initial_strategy: (str) (IterativeImputer) define strategy to use for initialising missing values
max_iter: (int) (IterativeImputer) maximum number of imputation rounds to perform
estimator: (str) (IterativeImputer) estimator method to be used
output_distribution: (str) (QuantileTransformer) Marginal distribution for the transformed data
transformer_method (str) (PowerTransformer) method to use, 'box-cox' is default
transformer_standardize (boolean) (PowerTransformer) select if zero-mean, unit-variance normalisation is applied, default is True
Returns: None
"""
# set the imputer options (if we are using them)
self.imputer = IterativeImputer(random_state=random_state, add_indicator=add_indicator,
initial_strategy=initial_strategy, max_iter=max_iter, verbose=self.verbose,
estimator=estimator)
# set the power transform options
self.transformer_quantile = preprocessing.QuantileTransformer(output_distribution=output_distribution,
random_state=random_state)
# set the power transform options
self.transformer_power = preprocessing.PowerTransformer(method=transformer_method,
standardize=transformer_standardize)
def process(self, in_file, date_range=None,
site_list=AurnModule.DEFAULT_SITE_LIST,
emep_filename=DEFAULT_EMEP_FILENAME,
min_years_reference=DEFAULT_MIN_YEARS_REFERENCE,
min_years=DEFAULT_MIN_YEARS,
impute_data=PostProcessor.DEFAULT_IMPUTE_DATA,
save_to_csv=PostProcessor.DEFAULT_SAVE_TO_CSV,
outfile_suffix='',
species_list=AurnModule.SPECIES_LIST_EXTRACTED):
""" Post process the data extracted from the AURN dataset, based on the parameters given.
Args:
in_file: (str) The file spec of the input file (required)
date_range: (list of 2 datetime) The date range of interest
site_list: (list of string/number) Site IDs of interest
emep_filename: (str) The file spec of the EMEP file to be used to help calculate #Todo Doug
min_years_reference: (float) The minimum number of years of data for any site that we are going to
use as a reference site later. (this cannot be less than min_years)
min_years: (float) The minimum number of years of data that a site must have
impute_data: (boolean) Whether to attempt to impute missing data
save_to_csv: (boolean) Whether to save the output dateframes to CSV file(s)
outfile_suffix: (str) The suffix to appended to the end of output file names.
Returns:
daily_dataframe: daily dataset, for all measurements, as pandas.Dataframe
Required MultiIndex:
'time_stamp' (datetime object): date (only) (e.g. 2017-06-01)
'sensor_name' (string): ID string for site (e.g. 'LIN3 [AQ]')
Required columns:
'O3.max' (float): daily maximum value
'O3.mean' (float): daily mean value
'O3.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'PM10.max' (float): daily maximum value
'PM10.mean' (float): daily mean value
'PM10.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'PM2.5.max' (float): daily maximum value
'PM2.5.mean' (float): daily mean value
'PM2.5.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'NO2.max' (float): daily maximum value
'NO2.mean' (float): daily mean value
'NO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'NOXasNO2.max' (float): daily maximum value
'NOXasNO2.mean' (float): daily mean value
'NOXasNO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'SO2.max' (float): daily maximum value
'SO2.mean' (float): daily mean value
'SO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
"""
if not isinstance(in_file, str):
raise ValueError('in_file must be a string')
# Process inputs
if date_range is not None:
self.date_range = [datetime.strptime(date_range[0], DateRangeProcessor.INPUT_DATE_FORMAT),
datetime.strptime(date_range[1], DateRangeProcessor.INPUT_DATE_FORMAT)]
else:
self.date_range = [self.get_available_start(), self.get_available_end()]
self.file_out = AurnPostProcessor.BASE_FILE_OUT.format(self.out_dir, outfile_suffix)
self._emep_data = self.load_emep_data(emep_filename)
self.min_years = min_years
self.min_years_reference = min_years_reference
self.species_list = species_list
self.site_list = site_list
self.station_data = self.metadata['AURN_metadata'][['site_id', 'latitude', 'longitude', 'site_name']]
if self.verbose > 1: print('Station data: \n {}'.format(self.station_data))
self.impute_data = impute_data
# load and prepare the hourly dataset
hourly_dataframe = self.load_aurn_data(in_file)
print('filter for minimum data lengths, and reduce dataset to only stations of interest')
hourly_dataframe_filtered, reference_sites, required_sites, site_list_internal = \
self.list_required_and_reference_sites(hourly_dataframe)
# get the list of required sites from what is available, and what was requested
site_list_internal = set(site_list_internal).intersection(self.site_list)
if len(hourly_dataframe_filtered.index) == 0:
print('Exiting post-processing: Metadata is empty after initial filtering processes')
return
if self.impute_data:
print('imputation of data, returning hourly data')
hourly_dataframe = self.organise_data_imputation(
hourly_dataframe_filtered, reference_sites, required_sites, site_list_internal)
else:
print('sorting data (no imputation), returning hourly data')
hourly_dataframe = self.organise_data(hourly_dataframe_filtered, site_list_internal)
# calculate the daily max and mean for each station
daily_dataframe = self.combine_and_organise_mean_max(hourly_dataframe)
if save_to_csv:
# write this dataset to file
daily_dataframe.to_csv(self.file_out, index=True, header=True, float_format='%.2f')
return daily_dataframe
def combine_and_organise_mean_max(self, hourly_dataframe):
"""
Combine and organise the daily mean, maximum, and count information.
Args:
hourly_dataframe: hourly dataset, for all measurements, as pandas.Dataframe
Required Index:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
imputed O3 (int): flag indicating imputed data (0=original,1=imputed)
imputed PM10 (int):
imputed PM2.5 (int):
imputed NO2 (int):
imputed NOXasNO2 (int):
imputed SO2 (int):
Returns:
final_dataframe: daily dataset, for all measurements, as pandas.Dataframe
Required MultiIndex:
'time_stamp' (datetime object): date (only) (e.g. 2017-06-01)
'sensor_name' (string): ID string for site (e.g. 'LIN3 [AQ]')
Required columns:
'O3.max' (float): daily maximum value
'O3.mean' (float): daily mean value
'O3.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'PM10.max' (float): daily maximum value
'PM10.mean' (float): daily mean value
'PM10.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'PM2.5.max' (float): daily maximum value
'PM2.5.mean' (float): daily mean value
'PM2.5.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'NO2.max' (float): daily maximum value
'NO2.mean' (float): daily mean value
'NO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'NOXasNO2.max' (float): daily maximum value
'NOXasNO2.mean' (float): daily mean value
'NOXasNO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'SO2.max' (float): daily maximum value
'SO2.mean' (float): daily mean value
'SO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
"""
#### group by date and site
daily_grouped_data = hourly_dataframe.groupby([pd.Grouper(level=self._timestamp_string, freq='1D'), self._site_string])
spc_list = self.species_list
#### loop by spc through grouped data, and calculate the mean, max, and flag values
for spc in spc_list:
temp_dataframe = pd.DataFrame()
temp_dataframe['{}_mean'.format(spc)] = daily_grouped_data.mean()[spc]
temp_dataframe['{}_max'.format(spc)] = daily_grouped_data.max()[spc]
temp_dataframe['{}_flag'.format(spc)] = daily_grouped_data.mean()['{}_flag'.format(spc)]
try:
final_dataframe = final_dataframe.merge(temp_dataframe, how='outer', left_index=True, right_index=True)
except:
final_dataframe = temp_dataframe.copy()
#### rename the sites, to include AQ flag
final_dataframe.index = final_dataframe.index.set_levels(
['{} [AQ]'.format(x) for x in final_dataframe.index.levels[1]], level=1)
#### return output dataframe
return(final_dataframe)
def load_aurn_data(self, file_in):
"""
Loading the AURN dataset.
Args:
file_in (Path object or string): path for the file to be read in
Returns:
hourly_dataframe: hourly dataset, for all measurements, as pandas.Dataframe
Index: none
Required Columns:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
"""
# Read in hourly dataframe file
try:
hourly_dataframe = pd.read_csv(file_in,
sep=',',
usecols=[AurnModule.INDEX_EXTRACTED].append(AurnModule.NEW_FILE_COLS),
index_col=AurnModule.INDEX_EXTRACTED,
parse_dates=[self._timestamp_string])
except Exception as err:
raise ValueError('Unable to read Met extracted data file {}. {}'.format(file_in, err))
if self.verbose > 1:
print('Hourly dataframe: \n {}'.format(hourly_dataframe))
print('Hourly dataframe data types: \n {}'.format(hourly_dataframe.dtypes))
return(hourly_dataframe)
def load_emep_data(self, filename):
"""
Loads the EMEP model data, or create an empty dataframe (required for logic checks in the workflow)
Args:
filename (str): location of the EMEP file. This should be empty if there is no EMEP data
Returns:
emep_dataframe: pandas Dataframe, containing the EMEP model data. If no EMEP data
is to be used then this will be an empty Dataframe.
Index: none
Required Columns:
timestamp (datetime object):
site_id (string):
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
"""
if filename is not None:
filename = Path(filename)
print('reading emep file')
try:
emep_dataframe = pd.read_csv(filename)
except Exception as err:
raise ValueError('Error loading the emap data from filename: {} . {}'.format(filename, err))
try:
return emep_dataframe.rename(columns={'NOx': 'NOXasNO2'})
except Exception as err:
raise ValueError('EMEP file does not contain an \'NOx\' column')
else:
return pd.DataFrame()
def list_required_and_reference_sites(self, data_in):
"""
This function creates the lists of required sites, and reference sites, for the
final dataset.
Args:
data_in: hourly dataset, for all measurements, as pandas.Dataframe
Index: none
Required Columns:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
Returns:
met_data_filtered: pandas dataframe, as above, containing hourly dataset for only
the required station datasets
reference_sites: (dict, keys are species):
items: (list of strings) the site_id's for our reference sites for each `spc`
required_sites: (dict, keys are species):
items: (list of strings) required sites for `spc`
combined_req_site_list: (list, strings) a single list of required sites
"""
print(' get the lists of required and reference stations for each measurement variable')
tempgroups = data_in.groupby([self._site_string, pd.Grouper(key=self._timestamp_string, freq='1D')])
daily_hour_counts = tempgroups.count()
spc_list = daily_hour_counts.columns.values
required_sites = {}
reference_sites = {}
combined_req_site_list = []
for spc in spc_list:
print('site day counts for {}'.format(spc))
req_days_counts = daily_hour_counts[spc]
req_days_counts = req_days_counts[req_days_counts > 0]
required_sites[spc], reference_sites[spc] = self.station_listing(req_days_counts)
combined_req_site_list = combined_req_site_list + required_sites[spc]
print('VERBOSE: ', self.verbose)
if self.verbose > 0: print('\t\treq sites {}:'.format(spc), required_sites[spc])
if self.verbose > 0: print('\t\tuse sites {}:'.format(spc), reference_sites[spc])
# get a list of all sites which are required for at least one measurement set
combined_req_site_list = list(dict.fromkeys(combined_req_site_list))
data_filtered = data_in[data_in[self._site_string].isin(combined_req_site_list)]
return data_filtered, reference_sites, required_sites, combined_req_site_list
def organise_data_imputation(self, hourly_dataframe_filtered, reference_sites, required_sites, site_list_internal):
"""
Function for organising the imputation of the datasets. This runs the
'transform_and_impute_data' function for each of the variables of interest.
Args:
hourly_dataframe_filtered: hourly dataset, for all measurements, as pandas.Dataframe
Index: none
Required Columns:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
reference_sites (list, string or int): sites to use for reference when imputing datasets
required_sites: (dict, keys are species):
items: (list of strings) required sites for `spc`
site_list_internal (list, string or int): combined list of sites to retain
Returns:
output_dataframe: hourly dataset, for all measurements, as pandas.Dataframe
Required Index:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
O3_flag (int): flag indicating imputed data (0=original,1=imputed)
PM10_flag (int):
PM2.5_flag (int):
NO2_flag (int):
NOXasNO2_flag (int):
SO2_flag (int):
"""
transformer = self.transformer_quantile
output_dataframe = pd.DataFrame()
date_index = pd.date_range(start=self.start, end=self.end, freq='1H', name=self._timestamp_string)
# Set the number of reference stations to request
ref_station_numbers = [len(reference_sites[x]) for x in reference_sites.keys()]
print(ref_station_numbers)
station_number = min([5] + [x - 1 for x in ref_station_numbers])
hourly_dataframe_internal = hourly_dataframe_filtered.set_index(self._timestamp_string)
spc_list = self.species_list
if not self._emep_data.empty:
if self.verbose > 0: print('Loading EMEP data')
emep_dataframe_internal = self._emep_data.set_index(self._timestamp_string)
if self.verbose > 1: print('1. Site list internal: ', site_list_internal)
for site in site_list_internal:
if self.verbose > 1: print('2. Site: ', site)
# get list of chemical species that we need to impute for this site (including Date info)
req_spc = []
for spc in spc_list:
if site in required_sites[spc]:
req_spc.append(spc)
# copy these to a new dataframe
working_hourly_dataframe = pd.DataFrame([], index=date_index)
working_hourly_dataframe[req_spc] = \
hourly_dataframe_internal[hourly_dataframe_internal[self._site_string] == site][req_spc]
copy_hourly_dataframe = working_hourly_dataframe.copy()
copy_hourly_dataframe[self._site_string] = site
# get list of neighbouring sites for each of the chemical species of interest
for spc in spc_list:
if self.verbose > 1: print('3. Species: ', spc)
station_distances = self.get_station_distances(site, reference_sites[spc])
if self.verbose > 1: print('4. Station number:', station_number)
if self.verbose > 1: print('5. distances:', station_distances)
if self.verbose > 1: print('6.', len(station_distances))
for ii in range(0, min(station_number, len(station_distances))):
if self.verbose > 1: print('7. ii', ii)
station_code = station_distances.index[ii]
working_hourly_dataframe['{}_{}'.format(spc, station_code)] = \
hourly_dataframe_internal[hourly_dataframe_internal[self._site_string] == station_code][spc]
# get EMEP predictions of chemical species of interest (if needed)
if self.verbose > 1: print('EMEP data: {}'.format(self._emep_data))
if not self._emep_data.empty:
if self.verbose > 0: print('Using EMEP data')
for spc in spc_list:
working_hourly_dataframe['{}_{}'.format(spc, 'EMEP')] = \
emep_dataframe_internal[emep_dataframe_internal[self._site_string] == site][spc]
# run the imputation process
imputed_hourly_dataframe = self.transform_and_impute_data(working_hourly_dataframe,transformer=transformer)
# copy imputed data of interest into copy of original dataframe (without EMEP and neighbouring sites)
for spc in spc_list:
copy_hourly_dataframe['{}_flag'.format(spc)] = 0
if spc in req_spc:
copy_hourly_dataframe['{}_flag'.format(spc)] = copy_hourly_dataframe[spc].isna() * 1
copy_hourly_dataframe[spc] = imputed_hourly_dataframe[spc]
else:
copy_hourly_dataframe[spc] = np.nan
output_dataframe = output_dataframe.append(copy_hourly_dataframe)
output_dataframe = output_dataframe.reset_index().set_index([self._timestamp_string,self._site_string])
return(output_dataframe)
def organise_data(self, hourly_dataframe_filtered, site_list_internal):
"""
Function for organising the required datasets. This mirrors the imputation function.
Args:
hourly_dataframe_filtered: hourly dataset, for all measurements, as pandas.Dataframe
Index: none
Required Columns:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
site_list_internal (list, string or int): combined list of sites to retain
Returns:
hourly_dataframe: hourly dataset, for all measurements, as pandas.Dataframe
Required Index:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
O3_flag (int): flag indicating imputed data (0=original,1=imputed)
PM10_flag (int):
PM2.5_flag (int):
NO2_flag (int):
NOXasNO2_flag (int):
SO2_flag (int):
"""
date_index = pd.date_range(start=self.start, end=self.end, freq='1H', name=self._timestamp_string)
output_dataframe = pd.DataFrame()
hourly_dataframe_internal = hourly_dataframe_filtered.set_index(self._timestamp_string)
spc_list = self.species_list
if self.verbose > 1: print('1. Site list internal: ', site_list_internal)
for site in site_list_internal:
if self.verbose > 1: print('2. Site: ', site)
# create new dataframe, with the dates that we are interested in
working_hourly_dataframe = pd.DataFrame([], index=date_index)
working_hourly_dataframe[self._site_string] = site
# copy these to a new dataframe
working_hourly_dataframe[spc_list] = \
hourly_dataframe_internal[hourly_dataframe_internal[self._site_string] == site][spc_list]
# copy imputed data of interest into copy of original dataframe (without EMEP and neighbouring sites)
for spc in spc_list:
working_hourly_dataframe['{}_flag'.format(spc)] = 0
# append data to the output dataframe
output_dataframe = output_dataframe.append(working_hourly_dataframe)
output_dataframe = output_dataframe.reset_index().set_index([self._timestamp_string,self._site_string])
return(output_dataframe)
def transform_and_impute_data(self, df_in, transformer):
"""
Function for organising the transformation of the dataset, then imputing missing
data, before detransforming the data and returning it.
Args:
df_in: pandas dataframe containing the datasets to impute
Required Index:
date (datetime64 objects): date / time for each reading
Optional Columns: Measurement data at the site for which we are imputing
the data. Only those pollutants which have been measured
at this site will be included.
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
Reference Columns: Reference data at the X nearest sites to the
measurement being processed. All datasets will be
included, even for those pollutants which were not
included in the optional columns above. So, if
5 reference stations are used, this will give 30 (5*6)
columns of reference data. If EMEP data is being used
then these are added for EMEP data too, but only at
the station of interest (so only another 6 columns are
added).
O3_[site_id] (float):
PM10_[site_id] (float):
PM2.5_[site_id] (float):
NO2_[site_id] (float):
NOXasNO2_[site_id] (float):
SO2_[site_id] (float):
transformer: the transform function to use, passed so that we can chose based
on the variable being operated on
Uses:
self.imputer
Returns:
df_out: pandas dataframe, containing the same datasets as above, but including
the imputed data too. All imputed data is included (including that for
the reference sites) - it is the task of the calling function to only
retain the imputed data for the station of interest, and to discard
the rest of the imputed data.
"""
# copy the input array, and note the columns
df_work = df_in.copy(deep=True)
cols = df_in.columns
# find missing datasets to remove
# also we note the columns that will be saved, and their order, for transferring data back!
col_remove = []
col_save = []
for col in cols:
if all(df_work[col].isna()):
col_remove.append(col)
else:
col_save.append(col)
df_work = df_work.drop(columns=col_remove)
if self.verbose > 2: print('df_work input to power transformer: \n {}'.format(df_work))
# power transformer fitting and transforming
transformer.fit(df_work.dropna())
if self.verbose > 2: print('Power transformer: Completed data fitting. Beginning power transformation')
np_out = transformer.transform(df_work)
if self.verbose > 2: print('Power transformer: Completed transformation. Beginning imputation')
# impute the missing values in this new dataframe
self.imputer.fit(np_out)
if self.verbose > 2: print('Imputer: Completed imputation fitting. Beginning imputer tranformation')
imp_out = self.imputer.transform(np_out)
if self.verbose > 2: print('Imputer Completed transformation. Beginning inverse transformation')
# apply the inverse transformation for our datasets (leaving out the indicator flags)
np_inv = transformer.inverse_transform(imp_out[:, :np_out.shape[1]])
if self.verbose > 2: print('Imputer Completed inverse transformation. Beginning copying and tranforming values')
# copy the transformed values to a new dataframe
df_out = df_in.copy(deep=True)
for pos, col in enumerate(col_save):
pos_out = list(cols).index(col)
df_out.iloc[:, pos_out] = np_inv[:, pos]
if self.verbose > 1: print('Imputation: copied transformed values into new dataframe')
return df_out
| python |
from .util import *
from .db import Database
from .optimizer import *
from .ops import Print, Yield
from .parseops import *
from .udfs import *
from .parse_sql import parse
from .tuples import *
from .tables import *
from .schema import Schema
from .exprs import Attr
from .compile import *
from .context import *
| python |
from __future__ import print_function
import getopt
def usage():
print("""Usage: check_asdis -i <pcap_file> [-o <wrong_packets.pcap>]
-v increase verbosity
-d hexdiff packets that differ
-z compress output pcap
-a open pcap file in append mode""", file=sys.stderr)
def main(argv):
PCAP_IN = None
PCAP_OUT = None
COMPRESS = False
APPEND = False
DIFF = False
VERBOSE = 0
try:
opts = getopt.getopt(argv, "hi:o:azdv")
for opt, parm in opts[0]:
if opt == "-h":
usage()
raise SystemExit
elif opt == "-i":
PCAP_IN = parm
elif opt == "-o":
PCAP_OUT = parm
elif opt == "-v":
VERBOSE += 1
elif opt == "-d":
DIFF = True
elif opt == "-a":
APPEND = True
elif opt == "-z":
COMPRESS = True
if PCAP_IN is None:
raise getopt.GetoptError("Missing pcap file (-i)")
except getopt.GetoptError as e:
print("ERROR: %s" % e, file=sys.stderr)
raise SystemExit
from scapy.config import conf
from scapy.utils import RawPcapReader, RawPcapWriter, hexdiff
from scapy.layers import all # noqa: F401
pcap = RawPcapReader(PCAP_IN)
pcap_out = None
if PCAP_OUT:
pcap_out = RawPcapWriter(PCAP_OUT, append=APPEND, gz=COMPRESS, linktype=pcap.linktype) # noqa: E501
pcap_out._write_header(None)
LLcls = conf.l2types.get(pcap.linktype)
if LLcls is None:
print(" Unknown link type [%i]. Can't test anything!" % pcap.linktype, file=sys.stderr) # noqa: E501
raise SystemExit
i = -1
differ = 0
failed = 0
for p1, meta in pcap:
i += 1
try:
p2d = LLcls(p1)
p2 = str(p2d)
except KeyboardInterrupt:
raise
except Exception as e:
print("Dissection error on packet %i: %s" % (i, e))
failed += 1
else:
if p1 == p2:
if VERBOSE >= 2:
print("Packet %i ok" % i)
continue
else:
print("Packet %i differs" % i)
differ += 1
if VERBOSE >= 1:
print(repr(p2d))
if DIFF:
hexdiff(p1, p2)
if pcap_out is not None:
pcap_out.write(p1)
i += 1
correct = i - differ - failed
print("%i total packets. %i ok, %i differed, %i failed. %.2f%% correct." % (i, correct, differ, # noqa: E501
failed, i and 100.0 * (correct) / i)) # noqa: E501
if __name__ == "__main__":
import sys
try:
main(sys.argv[1:])
except KeyboardInterrupt:
print("Interrupted by user.", file=sys.stderr)
| python |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for ViLT.
"""
from typing import List, Optional, Union
from transformers import BertTokenizerFast
from ...file_utils import TensorType
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from .feature_extraction_vilt import ViltFeatureExtractor
class ViltProcessor:
r"""
Constructs a ViLT processor which wraps a BERT tokenizer and ViLT feature extractor into a single processor.
[`ViltProcessor`] offers all the functionalities of [`ViltFeatureExtractor`] and [`BertTokenizerFast`]. See the
docstring of [`~ViltProcessor.__call__`] and [`~ViltProcessor.decode`] for more information.
Args:
feature_extractor (`ViltFeatureExtractor`):
An instance of [`ViltFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`BertTokenizerFast`):
An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
"""
def __init__(self, feature_extractor, tokenizer):
if not isinstance(feature_extractor, ViltFeatureExtractor):
raise ValueError(
f"`feature_extractor` has to be of type {ViltFeatureExtractor.__class__}, but is {type(feature_extractor)}"
)
if not isinstance(tokenizer, BertTokenizerFast):
raise ValueError(f"`tokenizer` has to be of type {BertTokenizerFast.__class__}, but is {type(tokenizer)}")
self.feature_extractor = feature_extractor
self.tokenizer = tokenizer
self.current_processor = self.feature_extractor
def save_pretrained(self, save_directory):
"""
Save a ViLT feature_extractor object and BERT tokenizer object to the directory `save_directory`, so that it
can be re-loaded using the [`~ViltProcessor.from_pretrained`] class method.
<Tip>
This class method is simply calling [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] and
[`~tokenization_utils_base.PreTrainedTokenizer.save_pretrained`]. Please refer to the docstrings of the methods
above for more information.
</Tip>
Args:
save_directory (`str` or `os.PathLike`):
Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will
be created if it does not exist).
"""
self.feature_extractor.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r"""
Instantiate a [`ViltProcessor`] from a pretrained ViLT processor.
<Tip>
This class method is simply calling ViltFeatureExtractor's
[`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] and BertTokenizerFast's
[`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`]. Please refer to the docstrings of the methods
above for more information.
</Tip>
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or
namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.
- a path to a *directory* containing a feature extractor file saved using the
[`~SequenceFeatureExtractor.save_pretrained`] method, e.g., `./my_model_directory/`.
- a path or url to a saved feature extractor JSON *file*, e.g.,
`./my_model_directory/preprocessor_config.json`.
**kwargs
Additional keyword arguments passed along to both [`SequenceFeatureExtractor`] and
[`PreTrainedTokenizer`]
"""
feature_extractor = ViltFeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs)
tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path, **kwargs)
return cls(feature_extractor=feature_extractor, tokenizer=tokenizer)
def __call__(
self,
images,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
) -> BatchEncoding:
"""
This method uses [`ViltFeatureExtractor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
"""
encoding = self.tokenizer(
text=text,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
return_tensors=return_tensors,
**kwargs,
)
# add pixel_values + pixel_mask
encoding_feature_extractor = self.feature_extractor(images, return_tensors=return_tensors)
encoding.update(encoding_feature_extractor)
return encoding
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
| python |
from mr_scraper.api import dispatch, ScraperMessage
def levels_fyi():
"""Scraper using Puppeter"""
message = ScraperMessage(
scraper="example.scrapers.levels_fyi",
type='companies',
payload={'url': '/company/'}
)
return dispatch(message)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
version = '0.12.0'
setup(
name='SerpScrap',
version=version,
description='''
SEO python scraper to extract data from major searchengine result pages.
Extract data like url, title, snippet, richsnippet and the type from searchresults for given keywords. Detect Ads or make automated screenshots.
You can also fetch text content of urls provided in searchresults or by your own.
It's usefull for SEO and business related research tasks.
''',
long_description=open('README.rst').read(),
author='Ronald Schmidt',
author_email='ronald.schmidt@zu-web.de',
doc_url='http://serpscrap.readthedocs.io/en/latest/',
url='https://github.com/ecoron/SerpScrap',
license='MIT',
packages=find_packages(),
install_requires=[
'PySocks==1.6.8',
'chardet==3.0.4',
'beautifulsoup4==4.6.3',
'html2text==2018.1.9',
'lxml==4.2.3',
'sqlalchemy==1.2.10',
'selenium==3.14.1',
'cssselect==1.0.3',
],
scripts=['install_chrome.sh'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Internet',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='seo scraper ad-detection scraping keywords',
)
| python |
import json
import subprocess
from oslo_log import log as logging
from magnum.common import exception
LOG = logging.getLogger(__name__)
class KubeCtl(object):
def __init__(self, bin='kubectl', global_flags=''):
super(KubeCtl, self).__init__()
self.kubectl = '{} {}'.format(bin, global_flags)
def execute(self, command, definition=None, namespace=None,
print_error=True):
if definition:
cmd = "cat <<'EOF' | {} {} -f -\n{}\nEOF".format(
self.kubectl, command, definition
)
else:
if namespace:
cmd = "{} -n {} {}".format(self.kubectl, namespace, command)
else:
cmd = "{} {}".format(self.kubectl, command)
try:
r = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
return r
# except subprocess.CalledProcessError as ex:
# # if print_error:
# if "delete" in command:
# LOG.warning("K8s: Delete failed.")
# else:
# exc_msg = "Failed to execute kubectl command, cmd={}, err={}".format(cmd, ex.stderr.decode())
# LOG.error(exc_msg)
# raise exception.MagnumException(message=exc_msg)
except Exception as ex:
# if print_error:
if "delete" in command:
LOG.warning("K8s: Delete failed.")
else:
exc_msg = "Failed to execute kubectl command, cmd={},\n STDOUT/STDERR={}".format(cmd, ex.stdout.decode())
LOG.error(exc_msg)
raise exception.MagnumException(message="Failed to execute kubectl command")
def apply(self, *args, **kwargs):
return self.execute('apply', *args, **kwargs)
def delete(self, *args, **kwargs):
return self.execute('delete', *args, **kwargs)
def get(self, resource, namespace=None, **kwargs):
result = self.execute(
'get %s -o json' % resource, namespace=namespace, **kwargs
).decode()
ret = json.loads(result)
if 'items' in ret:
return ret['items']
return ret
def describe(self, *args, **kwargs):
return self.execute('describe', *args, **kwargs)
def batch_delete(self, resource_mapping=[]):
"""Deletes Kubernetes resources.
Example for the resource_mapping param:
[{"service": ["srv1", "srv2"]}, {"deployment": ["deploy1"]}]
Be careful to the deletion order.
"""
for res in resource_mapping:
for res_type, items in res.items():
resources = " ".join(items)
self.execute("delete %s %s" % (res_type, resources))
| python |
import json
import cfnresponse
def lambda_handler(event, context):
print(json.dumps(event))
response_data = {}
response_data['Data'] = None
if event['RequestType'] != 'Create':
cfnresponse.send(event, context, cfnresponse.SUCCESS,
response_data, "CustomResourcePhysicalID")
return
password = event['ResourceProperties']['Password']
confirm_password = event['ResourceProperties']['ConfirmPassword']
if password == confirm_password:
cfnresponse.send(event, context, cfnresponse.SUCCESS,
response_data, "CustomResourcePhysicalID")
else:
print('Passwords do not match!')
cfnresponse.send(event, context, cfnresponse.FAILED,
response_data, "CustomResourcePhysicalID")
| python |
from typing import NamedTuple
from thundersnow.precondition import check_argument
from thundersnow.predicate import is_not_blank
class Version(NamedTuple):
"""Sematnic Version object"""
major: str
minort: str
patch: str
def __str__(self):
return '.'.join(self)
def from_string(s):
""" '1.2.3' -> Version('1','2','3')"""
s = str(s)
check_argument((s is not None) and is_not_blank(s), 'cannot create version from blank string')
parts = s.split('.')
if len(parts) == 1:
major, minor, patch = (parts[0], 0, 0)
elif len(parts) == 2:
major, minor, patch = (parts[0], parts[1], 0)
elif len(parts) == 3:
major, minor, patch = parts
else:
major, minor, patch = parts[:3]
major, minor, patch = [str(i) for i in (major, minor, patch)]
return Version(major, minor, patch)
Version.from_string = from_string | python |
import sys
from loguru import logger
logger.remove()
logger.add(sys.stderr, format="", colorize=True, backtrace=False, diagnose=True)
def div(x, y):
x / y
def cause(x, y):
try:
div(x, y)
except Exception:
raise ValueError("Division error")
def context(x, y):
try:
cause(x, y)
except Exception as e:
raise ValueError("Cause error") from e
try:
context(1, 0)
except ValueError:
logger.exception("")
| python |
import numpy as np
print("Did you know 2 + 2 = {}".format(2+2))
print("Of course I knew that, I have 4 fingers")
print("Well, I knew you had 4 fingers. I didn't know that you knew how to count!") | python |