content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_quest_stat(cards): # pylint: disable=R0912,R0915 """ Get quest statistics. """ res = {} encounter_sets = set() keywords = set() card_types = {} for card in cards: if card.get(lotr.CARD_KEYWORDS): keywords = keywords.union( lotr.extract_keywords(card[lotr.CARD_KEYWORDS])) if (card.get(lotr.CARD_TEXT) and (' Restricted.' in card[lotr.CARD_TEXT] or '\nRestricted.' in card[lotr.CARD_TEXT])): keywords.add('Restricted') if card.get(lotr.CARD_ENCOUNTER_SET): encounter_sets.add(card[lotr.CARD_ENCOUNTER_SET]) if card.get(lotr.CARD_ADDITIONAL_ENCOUNTER_SETS): encounter_sets = encounter_sets.union( [s.strip() for s in str(card[lotr.CARD_ADDITIONAL_ENCOUNTER_SETS]).split(';')]) card_type = card[lotr.CARD_TYPE] if card.get(lotr.CARD_SPHERE) in ('Boon', 'Burden'): card_type = '{} ({})'.format(card_type, card[lotr.CARD_SPHERE]) card_types[card_type] = ( card_types.get(card_type, 0) + card[lotr.CARD_QUANTITY]) if encounter_sets: res['encounter_sets'] = '*Encounter Sets*: {}\n'.format( ', '.join(sorted(encounter_sets))) else: res['encounter_sets'] = '' if keywords: res['keywords'] = '*Keywords*: {}\n'.format( ', '.join(sorted(keywords))) else: res['keywords'] = '' card_types = sorted(list(card_types.items()), key=lambda t: t[0]) card_types = sorted(card_types, key=lambda t: t[1], reverse=True) res['total'] = '*Cards*: {}\n'.format(sum(t[1] for t in card_types)) res['card_types'] = '\n'.join('*{}*: {}'.format( t[0], t[1]) for t in card_types) card_types = {} threat = 0 max_threat = 0 shadow = 0 surge = 0 res['encounter_deck'] = '' deck = [card for card in cards if card[CARD_DECK_SECTION] == 'Encounter'] for card in deck: card_type = card[lotr.CARD_TYPE] if card.get(lotr.CARD_SPHERE) in ('Boon', 'Burden'): card_type = '{} ({})'.format(card_type, card[lotr.CARD_SPHERE]) card_types[card_type] = ( card_types.get(card_type, 0) + card[lotr.CARD_QUANTITY]) if lotr.is_positive_int(card.get(lotr.CARD_THREAT)): threat += int(card[lotr.CARD_THREAT]) * card[lotr.CARD_QUANTITY] max_threat = max(max_threat, int(card[lotr.CARD_THREAT])) if card.get(lotr.CARD_SHADOW): shadow += card[lotr.CARD_QUANTITY] if card.get(lotr.CARD_KEYWORDS): if 'Surge' in lotr.extract_keywords(card[lotr.CARD_KEYWORDS]): surge += card[lotr.CARD_QUANTITY] if not card_types: return res card_types = sorted(list(card_types.items()), key=lambda t: t[0]) card_types = sorted(card_types, key=lambda t: t[1], reverse=True) total = sum(t[1] for t in card_types) card_types = [(t[0], '{} ({}%)'.format(t[1], round(t[1] * 100 / total))) for t in card_types] res['encounter_deck'] = '**Encounter Deck**\n*Cards*: {}\n\n{}\n\n'.format( total, '\n'.join('*{}*: {}'.format(t[0], t[1]) for t in card_types)) if shadow: res['encounter_deck'] += '*Shadow*: {} ({}%)\n'.format( shadow, round(shadow * 100 / total)) if surge: res['encounter_deck'] += '*Surge*: {} ({}%)\n'.format( surge, round(surge * 100 / total)) res['encounter_deck'] += '*Threat*: {} (Avg), {} (Max)\n\n'.format( round(threat / total, 1), max_threat) return res
33de1c65288a82c91dd3ee6f4e31c2ea54f938d8
221
def bind_type(python_value): """Return a Gibica type derived from a Python type.""" binding_table = {'bool': Bool, 'int': Int, 'float': Float} if python_value is None: return NoneType() python_type = type(python_value) gibica_type = binding_table.get(python_type.__name__) if gibica_type is None: raise TypeError('Impossible to recognize underlying type.') return gibica_type(python_value)
ff1ac8d907a90584694408b8e60996fb7be25eab
222
import traceback import requests def delete_server(hostname, instance_id): """ Deletes a server by hostname and instance_id. """ host = get_host_by_hostname(hostname) if not host or not instance_id: return None try: r = requests.delete("%s/servers/%i" % (host['uri'], instance_id), auth=HTTPDigestAuth(host['username'], host['password']), timeout=(CONNECT_TIMEOUT, READ_TIMEOUT)) if r.ok: return r.json() except requests.exceptions.ConnectionError as e: traceback.print_exc() return None return None
9456e45d49be61672b89427c93542374ff0359e2
223
def quote_ident(val): """ This method returns a new string replacing " with "", and adding a " at the start and end of the string. """ return '"' + val.replace('"', '""') + '"'
452058861fb5be138db3599755fbf3c6d715c0a8
224
def TFC_TDF(in_channels, num_layers, gr, kt, kf, f, bn_factor=16, bias=False): """ Wrapper Function: -> TDC_TIF in_channels: number of input channels num_layers: number of densely connected conv layers gr: growth rate kt: kernel size of the temporal axis. kf: kernel size of the freq. axis f: num of frequency bins below are params for TDF bn_factor: bottleneck factor. if None: single layer. else: MLP that maps f => f//bn_factor => f bias: bias setting of linear layers """ return TFC_TIF(in_channels, num_layers, gr, kt, kf, f, bn_factor, bias)
b1d4aa007b40c920f4c985d102a4094821fbf228
225
import json def barplot_data(gene_values, gene_names, cluster_name, x_label, title=None): """ Converts data for top genes into a json for building the bar plot. Output should be formatted in a way that can be plugged into Plotly. Args: gene_values (list): list of tuples (gene_id, gene_value) gene_names (list): list of gene names corresponding to the genes in gene_values. cluster_name: name of the cluster from which the top genes are drawn. x_label: label for the x-axis. title: plot title """ if gene_values is None: gene_values = [(1,1), (2,2), (3,3)] if gene_names is None: gene_names = ['placeholder 1', 'placeholder 2', 'placeholder 3'] if title is None: title = 'Top genes for cluster {0}'.format(cluster_name) return json.dumps({ 'data': [{ 'x': list(x[1] for x in gene_values), 'y': gene_names, 'orientation': 'h', 'type': 'bar', }], 'layout': { 'title': title, 'xaxis': {'title': x_label}, 'margin': {'t': 40}, }, }, cls=SimpleEncoder)
67879df5d4918dddc8f46fd6fa975f3bf53de2b4
226
def logic_not(operand: ValueOrExpression) -> Expression: """ Constructs a logical negation expression. """ return Not(operators.NotOperator.NOT, ensure_expr(operand))
9b3755e00afc9aa8a843358ef83442614bda0feb
227
def webpage_attribute_getter(attr): """ Helper function for defining getters for web_page attributes, e.g. ``get_foo_enabled = webpage_attribute_getter("foo")`` returns a value of ``webpage.foo`` attribute. """ def _getter(self): return getattr(self.web_page, attr) return _getter
3626f8e2d8c6fb7fbb490dc72f796599cdbc874e
228
def diff_with_step(a:np.ndarray, step:int=1, **kwargs) -> np.ndarray: """ finished, checked, compute a[n+step] - a[n] for all valid n Parameters ---------- a: ndarray, the input data step: int, default 1, the step to compute the difference kwargs: dict, Returns ------- d: ndarray: the difference array """ if step >= len(a): raise ValueError(f"step ({step}) should be less than the length ({len(a)}) of `a`") d = a[step:] - a[:-step] return d
8475ec66a983f32d4ed7c06348a8607d335dbdca
229
def rmse(y_true: np.ndarray, y_pred: np.ndarray): """ Returns the root mean squared error between y_true and y_pred. :param y_true: NumPy.ndarray with the ground truth values. :param y_pred: NumPy.ndarray with the ground predicted values. :return: root mean squared error (float). """ return np.sqrt(mean_squared_error(y_true, y_pred))
42d08e8bfd218d1a9dc9702ca45417b6c502d4c5
230
def party_name_from_key(party_key): """returns the relevant party name""" relevant_parties = {0: 'Alternativet', 1: 'Dansk Folkeparti', 2: 'Det Konservative Folkeparti', 3: 'Enhedslisten - De Rød-Grønne', 4: 'Liberal Alliance', 5: 'Nye Borgerlige', 6: 'Radikale Venstre', 7: 'SF - Socialistisk Folkeparti', 8: 'Socialdemokratiet', 9: 'Venstre, Danmarks Liberale Parti'} return relevant_parties[party_key]
86041235738017ae3dbd2a5042c5038c0a3ae786
231
def __imul__(self,n) : """Concatenate the bitstring to itself |n| times, bitreversed if n < 0""" if not isint(n) : raise TypeError("Can't multiply bitstring by non int"); if n <= 0 : if n : n = -n; l = self._l; for i in xrange(l//2) : self[i],self[l-1-i] = self[l-1-i],self[i]; else : self._x = 0; self._l = 0; if n > 1 : y = type(self)(self); for _ in xrange(n-1) : self.iconcat(y); return self;
3305fd98899d0444aea91056712bae1fd4a6db2f
233
from pyrado.environments.pysim.quanser_qube import QQubeSim def create_uniform_masses_lengths_randomizer_qq(frac_halfspan: float): """ Get a uniform randomizer that applies to all masses and lengths of the Quanser Qube according to a fraction of their nominal parameter values :param frac_halfspan: fraction of the nominal parameter value :return: `DomainRandomizer` with uniformly distributed masses and lengths """ dp_nom = QQubeSim.get_nominal_domain_param() return DomainRandomizer( UniformDomainParam( name="mass_pend_pole", mean=dp_nom["mass_pend_pole"], halfspan=dp_nom["mass_pend_pole"] / frac_halfspan, clip_lo=1e-3, ), UniformDomainParam( name="mass_rot_pole", mean=dp_nom["mass_rot_pole"], halfspan=dp_nom["mass_rot_pole"] / frac_halfspan, clip_lo=1e-3, ), UniformDomainParam( name="length_rot_pole", mean=dp_nom["length_rot_pole"], halfspan=dp_nom["length_rot_pole"] / frac_halfspan, clip_lo=1e-2, ), UniformDomainParam( name="length_pend_pole", mean=dp_nom["length_pend_pole"], halfspan=dp_nom["length_pend_pole"] / frac_halfspan, clip_lo=1e-2, ), )
87fc94d17b3fab77b175139d2329c0d67611d402
235
def compress_table(tbl, condition, blen=None, storage=None, create='table', **kwargs): """Return selected rows of a table.""" # setup storage = _util.get_storage(storage) names, columns = _util.check_table_like(tbl) blen = _util.get_blen_table(tbl, blen) _util.check_equal_length(columns[0], condition) length = len(columns[0]) nnz = count_nonzero(condition) # block iteration out = None for i in range(0, length, blen): j = min(i+blen, length) bcond = np.asanyarray(condition[i:j]) # don't access any data unless we have to if np.any(bcond): bcolumns = [np.asanyarray(c[i:j]) for c in columns] res = [np.compress(bcond, c, axis=0) for c in bcolumns] if out is None: out = getattr(storage, create)(res, names=names, expectedlen=nnz, **kwargs) else: out.append(res) return out
eb675913da51b48fc6a663ddb858e70abee3f1ce
236
def validate_schedule(): """Helper routine to report issues with the schedule""" all_items = prefetch_schedule_items() errors = [] for validator, _type, msg in SCHEDULE_ITEM_VALIDATORS: for item in validator(all_items): errors.append('%s: %s' % (msg, item)) all_slots = prefetch_slots() for validator, _type, msg in SLOT_VALIDATORS: for slot in validator(all_slots): errors.append('%s: %s' % (msg, slot)) return errors
8f6d0f9670b25c22e4518b53327dffc4fa897a6e
237
from typing import Any from typing import Dict from typing import Union from typing import Callable from typing import Tuple def train_gridsearchcv_model(base_model: Any, X: np.array, y: np.array, cv_splitter, hyperparameter_grid: Dict[str, Any], scoring: Union[str, Callable[[Any, np.array, np.array], int]]="f1_weighted", n_jobs: int=4, verbose: int=3, ) -> Tuple[Dict[str, Any], pd.DataFrame]: """Trains given model using gridsearch crossvalidation. X - numpy array of input vectors y - numpy array of input labels cv - spitter that splits X and y to train and validation splits hyperaparameter_grid - hyperparameters used for grid search scoring - scoring function which is used to evaluate n_jobs - number of cores to use verbose - level of verboseness used for GridSearchCV, see scikit-learn returns (best_parameters, scores_df) where best_parameters are best hyperparameters found scores_df is dataframe with scores over all hyperparameter combinations """ model = GridSearchCV( base_model, hyperparameter_grid, scoring=scoring, n_jobs=n_jobs, cv=cv_splitter, refit=False, verbose=verbose, return_train_score=True ) return train_cv_model(model, X, y)
7fe8677f985db7d3518c7b68e5005fde1dee91c6
238
def set_resolmatrix(nspec,nwave): """ Generate a Resolution Matrix Args: nspec: int nwave: int Returns: Rdata: np.array """ sigma = np.linspace(2,10,nwave*nspec) ndiag = 21 xx = np.linspace(-ndiag/2.0, +ndiag/2.0, ndiag) Rdata = np.zeros( (nspec, len(xx), nwave) ) for i in range(nspec): for j in range(nwave): kernel = np.exp(-xx**2/(2*sigma[i*nwave+j]**2)) kernel /= sum(kernel) Rdata[i,:,j] = kernel return Rdata
49aac12441c1ef793fa2ded4c5ac031df7ce8049
239
def assembleR(X, W, fct): """ """ M = W * fct(X) return M
c792da453b981cc3974e32aa353124f5a5e9c46d
240
import logging def generate_dictionary_variable_types( dict_name, key_name, search_dict, indent_level=0 ): """Generate a dictionary from config with values from either function, variable, or static""" out_str = [] # Don't escape these: types_used = ["None", "True", "False", None, True, False] if len(search_dict) < 1: logging.warning("Can't search 0 len dict") return None if key_exists("function", search_dict): logging.info("Found funciton in dict") out_str = f'{dict_name}["{key_name}"] = {search_dict["function"]}' elif key_exists("variable", search_dict): logging.info("Found variable in dict") out_str = f'{dict_name}["{key_name}"] = {search_dict["variable"]}' elif key_exists("static", search_dict): if ( isinstance(search_dict["static"], int) or search_dict["static"] in types_used ): logging.info("Found static (None / Bool) in dict") out_str = f'{dict_name}["{key_name}"] = {search_dict["static"]}' else: logging.info("Found static (string) in dict") out_str = f'{dict_name}["{key_name}"] = "{search_dict["static"]}"' else: logging.warning("Unable to find function, variable, or static string") return None return indent(out_str, indent_level)
82a7282bc999dcf049ff6fbe6329271577908775
241
import uuid def make_uuid(value): """Converts a value into a python uuid object.""" if isinstance(value, uuid.UUID): return value return uuid.UUID(value)
b65b5739151d84bedd39bc994441d1daa33d1b51
242
def create_config(config_data, aliases=False, prefix=False, multiple_displays=False, look_info=None, custom_output_info=None, custom_lut_dir=None): """ Create the *OCIO* config based on the configuration data Parameters ---------- config_data : dict Colorspaces and transforms converting between those colorspaces and the reference colorspace, *ACES*, along with other data needed to generate a complete *OCIO* configuration aliases : bool, optional Whether or not to include Alias colorspaces prefix : bool, optional Whether or not to prefix the colorspace names with their Family names multiple_displays : bool, optional Whether to create a single display named *ACES* with Views for each Output Transform or multiple displays, one for each Output Transform look_info : array of str or unicode, optional Paths and names for look data custom_lut_dir : str or unicode, optional Directory to use for storing custom look files Returns ------- *OCIO* config The constructed OCIO configuration """ if look_info is None: look_info = [] if custom_output_info is None: custom_output_info = [] prefixed_names = {} alias_colorspaces = [] config = ocio.Config() config.setDescription('An ACES config generated from python') search_path = ['luts'] if custom_lut_dir: search_path.append('custom') config.setSearchPath(':'.join(search_path)) reference_data = config_data['referenceColorSpace'] # Adding the colorspace *Family* into the name which helps with # applications that present colorspaces as one a flat list. if prefix: prefixed_name = colorspace_prefixed_name(reference_data) prefixed_names[reference_data.name] = prefixed_name reference_data.name = prefixed_name print('Adding the reference color space : %s' % reference_data.name) reference = ocio.ColorSpace( name=reference_data.name, bitDepth=reference_data.bit_depth, description=reference_data.description, equalityGroup=reference_data.equality_group, family=reference_data.family, isData=reference_data.is_data, allocation=reference_data.allocation_type, allocationVars=reference_data.allocation_vars) config.addColorSpace(reference) if aliases: if reference_data.aliases: # Deferring adding alias colorspaces until end, which helps with # applications listing the colorspaces in the order that they were # defined in the configuration: alias colorspaces are usually named # lower case with spaces but normal colorspaces names are longer # and more verbose, thus it becomes harder for user to visually # parse the list of colorspaces when there are names such as # "crv_canonlog" interspersed with names like # "Input - Canon - Curve - Canon-Log". # Moving the alias colorspace definitions to the end of the # configuration avoids the above problem. alias_colorspaces.append( [reference_data, reference_data, reference_data.aliases]) print('') if look_info: print('Adding looks') config_data['looks'] = [] for look in look_info: add_look(config, look, custom_lut_dir, reference_data.name, config_data) add_looks_to_views(look_info, reference_data.name, config_data, multiple_displays) print('') if custom_output_info: print('Adding custom output transforms') for custom_output in custom_output_info: add_custom_output(config, custom_output, custom_lut_dir, reference_data, config_data, alias_colorspaces, prefix) print('') print('Adding regular colorspaces') for colorspace in sorted(config_data['colorSpaces'], cmp=lambda x,y: cmp(x.family.lower(), y.family.lower())): # Adding the colorspace *Family* into the name which helps with # applications that present colorspaces as one a flat list. if prefix: prefixed_name = colorspace_prefixed_name(colorspace) prefixed_names[colorspace.name] = prefixed_name colorspace.name = prefixed_name print('Creating new color space : %s' % colorspace.name) description = colorspace.description if colorspace.aces_transform_id: description += ( '\n\nACES Transform ID : %s' % colorspace.aces_transform_id) ocio_colorspace = ocio.ColorSpace( name=colorspace.name, bitDepth=colorspace.bit_depth, description=description, equalityGroup=colorspace.equality_group, family=colorspace.family, isData=colorspace.is_data, allocation=colorspace.allocation_type, allocationVars=colorspace.allocation_vars) if colorspace.to_reference_transforms: print('\tGenerating To-Reference transforms') ocio_transform = create_ocio_transform( colorspace.to_reference_transforms) ocio_colorspace.setTransform( ocio_transform, ocio.Constants.COLORSPACE_DIR_TO_REFERENCE) if colorspace.from_reference_transforms: print('\tGenerating From-Reference transforms') ocio_transform = create_ocio_transform( colorspace.from_reference_transforms) ocio_colorspace.setTransform( ocio_transform, ocio.Constants.COLORSPACE_DIR_FROM_REFERENCE) config.addColorSpace(ocio_colorspace) if aliases: if colorspace.aliases: # Deferring adding alias colorspaces until end, which helps # with applications listing the colorspaces in the order that # they were defined in the configuration. alias_colorspaces.append( [reference_data, colorspace, colorspace.aliases]) print('') print('') # Adding roles early so that alias colorspaces can be created # with roles names before remaining colorspace aliases are added # to the configuration. print('Setting the roles') if prefix: set_config_roles( config, color_picking=prefixed_names[ config_data['roles']['color_picking']], color_timing=prefixed_names[config_data['roles']['color_timing']], compositing_log=prefixed_names[ config_data['roles']['compositing_log']], data=prefixed_names[config_data['roles']['data']], default=prefixed_names[config_data['roles']['default']], matte_paint=prefixed_names[config_data['roles']['matte_paint']], reference=prefixed_names[config_data['roles']['reference']], scene_linear=prefixed_names[config_data['roles']['scene_linear']], compositing_linear=prefixed_names[config_data['roles']['scene_linear']], rendering=prefixed_names[config_data['roles']['scene_linear']], texture_paint=prefixed_names[ config_data['roles']['texture_paint']]) # Add the aliased colorspaces for each role for role_name, role_colorspace_name in config_data['roles'].iteritems(): role_colorspace_prefixed_name = prefixed_names[role_colorspace_name] #print( 'Finding colorspace : %s' % role_colorspace_prefixed_name ) # Find the colorspace pointed to by the role role_colorspaces = [colorspace for colorspace in config_data['colorSpaces'] if colorspace.name == role_colorspace_prefixed_name] role_colorspace = None if len(role_colorspaces) > 0: role_colorspace = role_colorspaces[0] else: if reference_data.name == role_colorspace_prefixed_name: role_colorspace = reference_data if role_colorspace: # The alias colorspace shouldn't match the role name exactly role_name_alias1 = "role_%s" % role_name role_name_alias2 = "Role - %s" % role_name print( 'Adding a role colorspace named %s, pointing to %s' % ( role_name_alias2, role_colorspace.name)) alias_colorspaces.append( (reference_data, role_colorspace, [role_name_alias1])) add_colorspace_aliases( config, reference_data, role_colorspace, [role_name_alias2], 'Utility/Roles') else: set_config_roles( config, color_picking=config_data['roles']['color_picking'], color_timing=config_data['roles']['color_timing'], compositing_log=config_data['roles']['compositing_log'], data=config_data['roles']['data'], default=config_data['roles']['default'], matte_paint=config_data['roles']['matte_paint'], reference=config_data['roles']['reference'], scene_linear=config_data['roles']['scene_linear'], compositing_linear=config_data['roles']['scene_linear'], rendering=config_data['roles']['scene_linear'], texture_paint=config_data['roles']['texture_paint']) # Add the aliased colorspaces for each role for role_name, role_colorspace_name in config_data['roles'].iteritems(): # Find the colorspace pointed to by the role role_colorspaces = [colorspace for colorspace in config_data['colorSpaces'] if colorspace.name == role_colorspace_name] role_colorspace = None if len(role_colorspaces) > 0: role_colorspace = role_colorspaces[0] else: if reference_data.name == role_colorspace_name: role_colorspace = reference_data if role_colorspace: # The alias colorspace shouldn't match the role name exactly role_name_alias1 = "role_%s" % role_name role_name_alias2 = "Role - %s" % role_name print('Adding a role colorspace named %s, pointing to %s' % ( role_name_alias2, role_colorspace.name)) alias_colorspaces.append( (reference_data, role_colorspace, [role_name_alias1])) add_colorspace_aliases( config, reference_data, role_colorspace, [role_name_alias2], 'Utility/Roles') print('') # Adding alias colorspaces at the end as some applications use # colorspaces definitions order of the configuration to order # the colorspaces in their selection lists, some applications # use alphabetical ordering. # This should keep the alias colorspaces out of the way for applications # using the configuration order. print('Adding the alias colorspaces') for reference, colorspace, aliases in alias_colorspaces: add_colorspace_aliases(config, reference, colorspace, aliases, 'Utility/Aliases') print('') print('Adding the diplays and views') # Setting the *color_picking* role to be the first *Display*'s # *Output Transform* *View*. default_display_name = config_data['defaultDisplay'] default_display_views = config_data['displays'][default_display_name] default_display_colorspace = default_display_views['Output Transform'] # Defining *Displays* and *Views*. displays, views = [], [] # Defining a generic *Display* and *View* setup. if multiple_displays: looks = config_data['looks'] if ('looks' in config_data) else [] looks = ', '.join(looks) print('Creating multiple displays, with looks : %s' % looks) # *Displays* are not reordered to put the *defaultDisplay* first # because *OCIO* will order them alphabetically when the configuration # is written to disk. for display, view_list in config_data['displays'].iteritems(): for view_name, colorspace in view_list.iteritems(): config.addDisplay(display, view_name, colorspace.name, looks) if 'Output Transform' in view_name and looks != '': # *Views* without *Looks*. config.addDisplay(display, view_name, colorspace.name) # *Views* with *Looks*. view_name_with_looks = '%s with %s' % (view_name, looks) config.addDisplay(display, view_name_with_looks, colorspace.name, looks) else: config.addDisplay(display, view_name, colorspace.name) if not (view_name in views): views.append(view_name) displays.append(display) # *Displays* and *Views* useful in a *GUI* context. else: single_display_name = 'ACES' displays.append(single_display_name) # Ensuring the *defaultDisplay* is first. display_names = sorted(config_data['displays']) display_names.insert(0, display_names.pop( display_names.index(default_display_name))) looks = config_data['looks'] if ('looks' in config_data) else [] look_names = ', '.join(looks) displays_views_colorspaces = [] for display in display_names: view_list = config_data['displays'][display] for view_name, colorspace in view_list.iteritems(): if 'Output Transform' in view_name: # We use the *Display* names as the *View* names in this # case as there is a single *Display* containing all the # *Views*. # This works for more applications than not,as of the time # of this implementation. # Autodesk Maya 2016 doesn't support parentheses in # *View* names. sanitised_display = replace(display, {')': '', '(': ''}) # *View* with *Looks*. if 'with' in view_name: sanitised_display = '%s with %s' % ( sanitised_display, look_names) views_with_looks_at_end = False # Storing combo of *Display*, *View* and *Colorspace* # name so they can be added to the end of the list. if views_with_looks_at_end: displays_views_colorspaces.append( [single_display_name, sanitised_display, colorspace.name]) else: config.addDisplay(single_display_name, sanitised_display, colorspace.name) if not (sanitised_display in views): views.append(sanitised_display) # *View* without *Looks*. else: config.addDisplay(single_display_name, sanitised_display, colorspace.name) if not (sanitised_display in views): views.append(sanitised_display) # Adding to the configuration any *Display*, *View* combinations that # were saved for later. # This list should be empty unless `views_with_looks_at_end` is # set `True` above. for display_view_colorspace in displays_views_colorspaces: single_display_name, sanitised_display, colorspace_name = ( display_view_colorspace) config.addDisplay(single_display_name, sanitised_display, colorspace_name) if not (sanitised_display in views): views.append(sanitised_display) raw_display_space_name = config_data['roles']['data'] log_display_space_name = config_data['roles']['compositing_log'] if prefix: raw_display_space_name = prefixed_names[raw_display_space_name] log_display_space_name = prefixed_names[log_display_space_name] config.addDisplay(single_display_name, 'Raw', raw_display_space_name) views.append('Raw') config.addDisplay(single_display_name, 'Log', log_display_space_name) views.append('Log') config.setActiveDisplays(','.join(sorted(displays))) config.setActiveViews(','.join(views)) print('') # Ensuring the configuration is valid. config.sanityCheck() # Resetting colorspace names to their non-prefixed versions. if prefix: prefixed_names_inverse = {} for original, prefixed in prefixed_names.iteritems(): prefixed_names_inverse[prefixed] = original reference_data.name = prefixed_names_inverse[reference_data.name] try: for colorspace in config_data['colorSpaces']: colorspace.name = prefixed_names_inverse[colorspace.name] except: print('Error with Prefixed names') for original, prefixed in prefixed_names.iteritems(): print('%s, %s' % (original, prefixed)) print('\n') print('Inverse Lookup of Prefixed names') for prefixed, original in prefixed_names_inverse.iteritems(): print('%s, %s' % (prefixed, original)) raise return config
c036094b3a8a3debc80d2e66141f8b95e51a41d0
243
import re from pathlib import Path import json def parse_json_with_comments(pathlike): """ Parse a JSON file after removing any comments. Comments can use either ``//`` for single-line comments or or ``/* ... */`` for multi-line comments. The input filepath can be a string or ``pathlib.Path``. Parameters ---------- filename : str or os.PathLike Path to the input JSON file either as a string or as a ``pathlib.Path`` object. Returns ------- obj : dict JSON object representing the input file. Note ---- This code was adapted from: https://web.archive.org/web/20150520154859/http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html """ # Regular expression to identify comments comment_re = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?', re.DOTALL | re.MULTILINE) # if we passed in a string, convert it to a Path if isinstance(pathlike, str): pathlike = Path(pathlike) with open(pathlike, 'r') as file_buff: content = ''.join(file_buff.readlines()) # Looking for comments match = comment_re.search(content) while match: # single line comment content = content[:match.start()] + content[match.end():] match = comment_re.search(content) # Return JSON object config = json.loads(content) return config
e79a461c210879d66b699fe49e84d0d2c58a964b
244
def _unpack_available_edges(avail, weight=None, G=None): """Helper to separate avail into edges and corresponding weights""" if weight is None: weight = "weight" if isinstance(avail, dict): avail_uv = list(avail.keys()) avail_w = list(avail.values()) else: def _try_getitem(d): try: return d[weight] except TypeError: return d avail_uv = [tup[0:2] for tup in avail] avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1]) for tup in avail] if G is not None: # Edges already in the graph are filtered flags = [not G.has_edge(u, v) for u, v in avail_uv] avail_uv = list(it.compress(avail_uv, flags)) avail_w = list(it.compress(avail_w, flags)) return avail_uv, avail_w
0c4ac0afc209544e385f9214f141cde6f75daa4a
245
import json def reddit_data(subreddit, time_request = -9999): """ @brief function to retrieve the metadata of a gutenberg book given its ID :param subreddit: the name of the subreddit :param time_request: unix timestamp of when requested subreddit was generated :return: a list of reddit objects with the data of the posts """ base_url = get_reddit_url() url = f"{base_url}/cache?subreddit={subreddit}&time_resquest={time_request}" content = server_request(url) data = json.loads(content.decode("utf-8")) reddit_posts = [] for n in data: post = reddit.reddit post.id = data[n]["id"] post.title = data[n]["title"] post.author = data[n]["author"] post.score = int(data[n]["score"]) post.vote_ratio = int(data[n]["vote_ratio"]) post.comment_count = int(data[n]["comment_count"]) post.subreddit = data[n]["subreddit"] post.post_time = int(data[n]["post_time"]) post.url = data[n]["url"] post.text = data[n]["text"] reddit_posts.append(post) return reddit_posts
78d79e2e917aaa892b4122d657c30a7cd13dfc4b
247
def write_velocity_files(U_25_RHS_str, U_50_RHS_str, U_100_RHS_str, U_125_RHS_str, U_150_RHS_str, U_25_LHS_str, U_50_LHS_str, U_100_LHS_str, U_125_LHS_str, U_150_LHS_str, path_0_100, path_0_125, path_0_150, path_0_25, path_0_50): """Create the details file for the surrounding cases, and write the velocities in line two""" fname = "details" # Filename file_25_path = path_0_25 file_50_path = path_0_50 file_100_path = path_0_100 file_125_path = path_0_125 file_150_path = path_0_150 details_file_25 = file_25_path + fname details_file_50 = file_50_path + fname details_file_100 = file_100_path + fname details_file_125 = file_125_path + fname details_file_150 = file_150_path + fname with open(details_file_25, 'w+') as f: f.write('Velocity' +'\n') f.write(U_25_RHS_str) with open(details_file_50, 'w+') as f: f.write('Velocity' +'\n') f.write(U_50_RHS_str) with open(details_file_100, 'w+') as f: f.write('Velocity' +'\n') f.write(U_100_RHS_str) with open(details_file_125, 'w+') as f: f.write('Velocity' +'\n') f.write(U_125_RHS_str) with open(details_file_150, 'w+') as f: f.write('Velocity' +'\n') f.write(U_150_RHS_str) return details_file_25, details_file_50, details_file_100, details_file_125, details_file_150
6c4af67ea659c09669f7294ec453db5e4e9fb9df
248
def datetime_to_bytes(value): """Return bytes representing UTC time in microseconds.""" return pack('>Q', int(value.timestamp() * 1e6))
e8b1d78615a84fb4279563d948ca807b0f0f7310
249
from typing import Sequence from typing import List def _f7(seq: Sequence) -> List: """order preserving de-duplicate sequence""" seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))]
f4dde886503754a09ac4ff545638750bf8fc6d94
250
def get_config(cfg): """ Sets the hypermeters for the optimizer and experiment using the config file Args: cfg: A YACS config object. """ config_params = { "train_params": { "adapt_lambda": cfg.SOLVER.AD_LAMBDA, "adapt_lr": cfg.SOLVER.AD_LR, "lambda_init": cfg.SOLVER.INIT_LAMBDA, "nb_adapt_epochs": cfg.SOLVER.MAX_EPOCHS, "nb_init_epochs": cfg.SOLVER.MIN_EPOCHS, "init_lr": cfg.SOLVER.BASE_LR, "batch_size": cfg.SOLVER.TRAIN_BATCH_SIZE, "optimizer": { "type": cfg.SOLVER.TYPE, "optim_params": { "momentum": cfg.SOLVER.MOMENTUM, "weight_decay": cfg.SOLVER.WEIGHT_DECAY, "nesterov": cfg.SOLVER.NESTEROV } } }, "data_params": { "dataset_group": cfg.DATASET.NAME, "dataset_name": cfg.DATASET.SOURCE + '2' + cfg.DATASET.TARGET, "source": cfg.DATASET.SOURCE, "target": cfg.DATASET.TARGET, "size_type": cfg.DATASET.SIZE_TYPE, "weight_type": cfg.DATASET.WEIGHT_TYPE } } return config_params
a88bc3c8057d969998ab286aaa15ee5e8768c838
251
def get_nas_transforms(): """ Returns trajectory transformations for NAS. """ return [ PadActions(), AsArray(), RewardsAsValueTargets(), TileValueTargets() ]
f323ef2cd40af81fdd230e4bbb53cfa2ba6e4450
252
from datetime import datetime def epoch_to_datetime(epoch): """ :param epoch: str of epoch time :return: converted datetime type """ return datetime.datetime.fromtimestamp(float(epoch) / 1000)
59d9b85489320f5b1db93e6513fc375b9b58b151
253
def qe_m4(px,mlmax,Talm=None,fTalm=None): """ px is a pixelization object, initialized like this: px = pixelization(shape=shape,wcs=wcs) # for CAR px = pixelization(nside=nside) # for healpix output: curved sky multipole=4 estimator """ ells = np.arange(mlmax) #prepare temperature map rmapT=px.alm2map(np.stack((Talm,Talm)),spin=0,ncomp=1,mlmax=mlmax)[0] #find tbarf t_alm=cs.almxfl(fTalm,np.sqrt((ells-3.)*(ells-2.)*(ells-1.)*ells*(ells+1.)*(ells+2.)*(ells+3.)*(ells+4.))) alms=np.stack((t_alm,t_alm)) rmap=px.alm2map_spin(alms,0,4,ncomp=2,mlmax=mlmax) #multiply the two fields together rmap=np.nan_to_num(rmap) prodmap=rmap*rmapT prodmap=np.nan_to_num(prodmap) if not(px.hpix): prodmap=enmap.enmap(prodmap,px.wcs) realsp2=prodmap[0] #spin +4 real space real space field if not(px.hpix): realsp2 = enmap.enmap(realsp2,px.wcs) #convert the above spin4 fields to spin pm 4 alms res1 = px.map2alm_spin(realsp2,mlmax,4,4) #will return pm4 #spin 4 ylm ttalmsp2=rot2dalm(res1,4)[0] #pick up the spin 4 alm of the first one ttalmsm2=rot2dalm(res1,4)[1] #pick up the spin -4 alm of the second one m4_alm=ttalmsp2+ttalmsm2 return m4_alm
f2b6c7fe03a5dae34aaa58738684e37cf30efc01
254
def seasonality_plot_df(m, ds): """Prepare dataframe for plotting seasonal components. Parameters ---------- m: Prophet model. ds: List of dates for column ds. Returns ------- A dataframe with seasonal components on ds. """ df_dict = {'ds': ds, 'cap': 1., 'floor': 0.} for name in m.extra_regressors: df_dict[name] = 0. # Activate all conditional seasonality columns for props in m.seasonalities.values(): if props['condition_name'] is not None: df_dict[props['condition_name']] = True df = pd.DataFrame(df_dict) df = m.setup_dataframe(df) return df
ae362631659ec1652eb1798a73dce786cd269ee5
255
async def response(request: DiscoveryRequest, xds_type: DiscoveryTypes, host: str = 'none'): """ A Discovery **Request** typically looks something like: .. code-block:: json { "version_info": "0", "node": { "cluster": "T1", "build_version": "<revision hash>/<version>/Clean/RELEASE", "metadata": { "auth": "..." } } } When we receive this, we give the client the latest configuration via a Discovery **Response** that looks something like this: .. code-block:: json { "version_info": "abcdef1234567890", "resources": [] } The version_info is derived from :func:`sovereign.discovery.version_hash` :param request: An envoy Discovery Request :param xds_type: what type of XDS template to use when rendering :param host: the host header that was received from the envoy client :return: An envoy Discovery Response """ template: XdsTemplate = XDS_TEMPLATES.get(request.envoy_version, default_templates)[xds_type] context = make_context( node_value=extract_node_key(request.node), template=template, ) # If the discovery request came from a mock, it will # typically contain this metadata key. # This means we should prevent any decryptable data # from ending up in the response. if request.node.metadata.get('hide_private_keys'): context['crypto'] = disabled_suite config_version = '0' if config.cache_strategy == 'context': config_version = version_hash(context, template.checksum, request.node.common, request.resources) if config_version == request.version_info: return {'version_info': config_version} kwargs = dict( discovery_request=request, host_header=host, resource_names=request.resources, **context ) if template.is_python_source: content = {'resources': list(template.code.call(**kwargs))} else: content = await template.content.render_async(**kwargs) if config.cache_strategy == 'content': config_version = version_hash(content) if config_version == request.version_info: return {'version_info': config_version} # This is the most expensive operation, I think, so it's performed as late as possible. if not template.is_python_source: content = deserialize_config(content) content['version_info'] = config_version return remove_unwanted_resources(content, request.resources)
3ffa2ec8c64dd479ea6ecf3494a2db23b95f2ef2
256
def count_inner_bags(content, start_color): """Count inner bags""" rules = process_content(content) bags = rules[start_color] count = len(bags) while len(bags) != 0: new_bags = [] for bag in bags: count += len(rules[bag]) new_bags += rules[bag] bags = new_bags return count
f6e188d548beaa5f1b24d96e6394c2bdbfaefd0b
257
def build_generation_data( egrid_facilities_to_include=None, generation_years=None ): """ Build a dataset of facility-level generation using EIA923. This function will apply filters for positive generation, generation efficiency within a given range, and a minimum percent of generation from the primary fuel (if set in the config file). The returned dataframe also includes the balancing authority for every power plant. Parameters ---------- egrid_facilities_to_include : list, optional List of plant codes to include (default is None, which builds a list) generation_years : list, optional Years of generation data to include in the output (default is None, which builds a list from the inventories of interest and eia_gen_year parameters) Returns ---------- DataFrame Dataframe columns include: ['FacilityID', 'Electricity', 'Year'] """ if not generation_years: # Use the years from inventories of interest generation_years = set( list(inventories_of_interest.values()) + [eia_gen_year] ) df_list = [] for year in generation_years: gen_fuel_data = eia923_download_extract(year) primary_fuel = eia923_primary_fuel(gen_fuel_data) gen_efficiency = calculate_plant_efficiency(gen_fuel_data) final_gen_df = gen_efficiency.merge(primary_fuel, on="Plant Id") if not egrid_facilities_to_include: if include_only_egrid_facilities_with_positive_generation: final_gen_df = final_gen_df.loc[ final_gen_df["Net Generation (Megawatthours)"] >= 0, : ] if filter_on_efficiency: final_gen_df = efficiency_filter(final_gen_df) if filter_on_min_plant_percent_generation_from_primary_fuel and not keep_mixed_plant_category: final_gen_df = final_gen_df.loc[ final_gen_df["primary fuel percent gen"] >= min_plant_percent_generation_from_primary_fuel_category, :, ] # if filter_non_egrid_emission_on_NAICS: # # Check with Wes to see what the filter here is supposed to be # final_gen_df = final_gen_df.loc[ # final_gen_df['NAICS Code'] == '22', : # ] else: final_gen_df = final_gen_df.loc[ final_gen_df["Plant Id"].isin(egrid_facilities_to_include), : ] ba_match = eia860_balancing_authority(year) ba_match["Plant Id"] = ba_match["Plant Id"].astype(int) final_gen_df["Plant Id"] = final_gen_df["Plant Id"].astype(int) final_gen_df = final_gen_df.merge(ba_match, on="Plant Id", how="left") final_gen_df["Year"] = int(year) df_list.append(final_gen_df) all_years_gen = pd.concat(df_list) all_years_gen = all_years_gen.rename( columns={ "Plant Id": "FacilityID", "Net Generation (Megawatthours)": "Electricity", } ) all_years_gen = all_years_gen.loc[:, ["FacilityID", "Electricity", "Year"]] all_years_gen.reset_index(drop=True, inplace=True) all_years_gen["Year"] = all_years_gen["Year"].astype("int32") return all_years_gen
32a2f1757419e52b7d8ea5b198a70bfd36f7dd4c
258
def get_nessus_scans(): """Return a paginated list of Nessus scan reports. **Example request**: .. sourcecode:: http GET /api/1.0/analysis/nessus?page=1 HTTP/1.1 Host: do.cert.europa.eu Accept: application/json **Example response**: .. sourcecode:: http HTTP/1.0 200 OK Content-Type: application/json Link: <.../api/1.0/analysis/nessus?page=1&per_page=20>; rel="First", <.../api/1.0/analysis/nessus?page=0&per_page=20>; rel="Last" { "count": 3, "items": [ { "created": "2016-03-21T16:52:52", "id": 4, "report": "...", "type": "Nessus scan" }, { "created": "2016-03-21T16:51:49", "id": 3, "report": "...", "type": "Nessus scan" }, { "created": "2016-03-20T17:09:03", "id": 2, "report": "...", "type": "Nessus scan" } ], "page": 1 } :reqheader Accept: Content type(s) accepted by the client :resheader Content-Type: this depends on `Accept` header or request :resheader Link: Describe relationship with other resources :>json array items: Nessus scan reports :>jsonarr integer id: Scan unique ID :>jsonarr object report: Scan report :>json integer page: Current page number :>json integer count: Total number of items :status 200: Reports found :status 404: Resource not found """ return ApiPagedResponse(Report.query.filter_by(type_id=4))
1828d42baff7e16c8dac6b1e5ab6c66c14834c3c
259
from doctest import TestResults from _doctest26 import TestResults def count_failures(runner): """Count number of failures in a doctest runner. Code modeled after the summarize() method in doctest. """ try: except: return [TestResults(f, t) for f, t in runner._name2ft.values() if f > 0 ]
0e755114f5c23be0bdac11876f32bd2ac4ad9625
260
def fnv1_64(data, hval_init=FNV1_64_INIT): """ Returns the 64 bit FNV-1 hash value for the given data. """ return fnv(data, hval_init, FNV_64_PRIME, 2**64)
3981677c02317f63ae62cab75ee0d5db2fec7dc2
261
from pathlib import Path import tqdm def parse_data_sp(source_root, parallel_roots, glob_str="**/*.wav", add_source=False): """ assert that parallel_root wil contain folders of following structure: PARALLEL_ROOT/record_17/IPhone 12 Pro Max/JBL CLIP3/distance=60-loudness=15-recording_mode=default/RELATIVE_PATH_TO_WAV_FROM_SOURCE """ data = defaultdict(list) source_root = Path(source_root).resolve() parallel_roots = [Path(parallel_root) for parallel_root in parallel_roots] # print(parallel_roots) _class_ind_maps = defaultdict(list) source_pathes = list(source_root.glob(glob_str)) if add_source: _class_ind_maps["spoofing"] = ["genuine", "spoof"] for source_path in tqdm(source_pathes): for parallel_root in parallel_roots: playback_device = parallel_root.parts[-2].lower().replace(" ", "") recording_device = parallel_root.parts[-3].lower().replace(" ", "") # print(f"{playback_device}, {recording_device}") if not (playback_device in _class_ind_maps["playback_device"]): _class_ind_maps["playback_device"].append(playback_device) if not (recording_device in _class_ind_maps["recording_device"]): _class_ind_maps["recording_device"].append(recording_device) source_rlp = source_path.relative_to(source_root) parallel_path = parallel_root / source_rlp if parallel_path.exists(): data[source_path].append({ "path": parallel_path, "spoofing": "spoof", "playback_device": playback_device, "recording_device": recording_device }) if add_source: if len(data[source_path]) > 0: data[source_path].insert( 0, { "path": source_path, "spoofing": "genuine", "playback_device": None, "recording_device": None }) class_ind_maps = defaultdict(dict) print(_class_ind_maps) for task_name, task_classes in _class_ind_maps.items(): for cls_ind, cls_name in enumerate(sorted(task_classes)): class_ind_maps[task_name][cls_name] = cls_ind return data, class_ind_maps
d23613c63d904ca5adb23c10c670ddab2d4148e7
263
def heat_diffusion(A, t, L, k, eps=0.0001): """ Computes the heat diffusion equation Parameters ---------- A : Tensor or SparseTensor the (N,N,) density matrix t : float the diffusion time L : Tensor or SparseTensor the (N,N,) Laplacian matrix k : Tensor the (N,D,) initial heat tensor eps : float (optional) a regularizer value (default is 0.0001) Returns ------- Tensor the (N,D,) heat tensor """ return poisson_equation(A+t*L, k, eps=eps)
56ee07ed473463116b045700e4923218d72b5aca
264
def unpack_ad_info(ad_info: dict, param_name: str) -> bytes: """Проверяет наличие ожидаемой структуры и возвращает значение.""" # Красиво не сработает, потому что применение условий должно быть последовательным if ( isinstance(ad_info, dict) and ad_info.get(param_name) # noqa: W503 and isinstance(ad_info[param_name], list) # noqa: W503 and isinstance(ad_info[param_name][0], bytes) # noqa: W503 ): return ad_info[param_name][0] return None
85a6c95bac7e35bed4f478b352b2a56203818139
265
def _read_file(file, sheet_name=0): """ Helper function used to read the file and return a pandas dataframe. Checks if file type is a .csv or excel. If not, returns a ValueError. Parameters ---------- file : str the name of the file, including the filetype extension sheet_name : int, optional if passing an excel file, the name of the sheet to analyze, by default 0 Returns ------- pandas.Dataframe pandas dataframe containing data from file """ if file.endswith('.csv'): df = pd.read_csv(file) else: try: df = pd.read_excel(file, sheet_name=sheet_name) except XLRDError: raise ValueError("Please use a valid csv or excel file.") return df
fbe9212084062233ca2073af57b401afc9532701
266
def get_school_total_students(school_id, aug_school_info): """ Gets total number of students associated with a school. Args: district_id (str): NCES ID of target district (e.g. '0100005'). aug_school_info (pandas.DataFrame): Target augmented school information (as formatted by `auxiliary.data_handler.DataHandler`). Returns: int: Single number comprising school-level data. """ return int(aug_school_info.loc[school_id]["total_students"])
d0d2ea36a2e3f4b47992aea9cc0c18c5ba7e0ff3
267
def loci_adjust(ds, *, group, thresh, interp): """LOCI: Adjust on one block. Dataset variables: hist_thresh : Hist's equivalent thresh from ref sim : Data to adjust """ sth = u.broadcast(ds.hist_thresh, ds.sim, group=group, interp=interp) factor = u.broadcast(ds.af, ds.sim, group=group, interp=interp) with xr.set_options(keep_attrs=True): scen = (factor * (ds.sim - sth) + thresh).clip(min=0) return scen.rename("scen").to_dataset()
2bb833a33bf32ed308137342007f7c62acfabe82
268
def _ClientThread(client_ip, client_user, client_pass, mvip, username, password, purge): """delete the volumes for a client, run as a thread""" log = GetLogger() SetThreadLogPrefix(client_ip) log.info("Connecting to client") client = SFClient(client_ip, client_user, client_pass) account_name = client.HostnameToAccountName() cluster = SFCluster(mvip, username, password) try: match_volumes = cluster.SearchForVolumes(accountName=account_name) except UnknownObjectError: log.passed("Account is already deleted") return True if len(list(match_volumes.keys())) <= 0: log.passed("No volumes to delete") return True log.info("Deleting {} volumes".format(len(list(match_volumes.keys())))) cluster.DeleteVolumes(volumeIDs=list(match_volumes.keys()), purge=purge) log.passed("Successfully deleted volumes")
258531ac383271c1a637b38ed3ff4f7a358c1dbc
269
import json def objective(args: Namespace, trial: optuna.trial._trial.Trial) -> float: """Objective function for optimization trials. Args: args (Namespace): Input arguments for each trial (see `config/args.json`) for argument names. trial (optuna.trial._trial.Trial): Optuna optimization trial. Returns: F1 score from evaluating the trained model on the test data split. """ # Paramters (to tune) args.embedding_dim = trial.suggest_int("embedding_dim", 128, 512) args.num_filters = trial.suggest_int("num_filters", 128, 512) args.hidden_dim = trial.suggest_int("hidden_dim", 128, 512) args.dropout_p = trial.suggest_uniform("dropout_p", 0.3, 0.8) args.lr = trial.suggest_loguniform("lr", 5e-5, 5e-4) # Train (can move some of these outside for efficiency) logger.info(f"\nTrial {trial.number}:") logger.info(json.dumps(trial.params, indent=2)) artifacts = run(args=args, trial=trial) # Set additional attributes args = artifacts["args"] performance = artifacts["performance"] logger.info(json.dumps(performance["overall"], indent=2)) trial.set_user_attr("threshold", args.threshold) trial.set_user_attr("precision", performance["overall"]["precision"]) trial.set_user_attr("recall", performance["overall"]["recall"]) trial.set_user_attr("f1", performance["overall"]["f1"]) return performance["overall"]["f1"]
629711996034664654430fba8af541fc934e143a
270
def read_gdwarfs(file=_GDWARFALLFILE,logg=False,ug=False,ri=False,sn=True, ebv=True,nocoords=False): """ NAME: read_gdwarfs PURPOSE: read the spectroscopic G dwarf sample INPUT: logg= if True, cut on logg, if number, cut on logg > the number (>4.2) ug= if True, cut on u-g, if list/array cut to ug[0] < u-g< ug[1] ri= if True, cut on r-i, if list/array cut to ri[0] < r-i< ri[1] sn= if False, don't cut on SN, if number cut on SN > the number (15) ebv= if True, cut on E(B-V), if number cut on EBV < the number (0.3) nocoords= if True, don't calculate distances or transform coordinates OUTPUT: cut data, returns numpy.recarray HISTORY: 2011-07-08 - Written - Bovy@MPIA (NYU) """ raw= _load_fits(file) #First cut on r indx= (raw.field('dered_r') < 20.2)*(raw.field('dered_r') > 14.5) raw= raw[indx] #Then cut on g-r indx= ((raw.field('dered_g')-raw.field('dered_r')) < 0.55)\ *((raw.field('dered_g')-raw.field('dered_r')) > .48) raw= raw[indx] #Cut on velocity errs indx= (raw.field('pmra_err') > 0.)*(raw.field('pmdec_err') > 0.)\ *(raw.field('vr_err') > 0.) raw= raw[indx] #Cut on logg? if (isinstance(logg,bool) and logg): indx= (raw.field('logga') > 4.2) raw= raw[indx] elif not isinstance(logg,bool): indx= (raw.field('logga') > logg) raw= raw[indx] if isinstance(ug,bool) and ug: indx= ((raw.field('dered_u')-raw.field('dered_g')) < 2.)\ *((raw.field('dered_u')-raw.field('dered_g')) > .6) raw= raw[indx] if not isinstance(ug,bool): indx= ((raw.field('dered_u')-raw.field('dered_g')) < ug[1])\ *((raw.field('dered_u')-raw.field('dered_g')) > ug[0]) raw= raw[indx] if isinstance(ri,bool) and ri: indx= ((raw.field('dered_r')-raw.field('dered_i')) < .4)\ *((raw.field('dered_r')-raw.field('dered_i')) > -.1) raw= raw[indx] elif not isinstance(ri,bool): indx= ((raw.field('dered_r')-raw.field('dered_i')) < ri[1])\ *((raw.field('dered_r')-raw.field('dered_i')) > ri[0]) raw= raw[indx] if (isinstance(sn,bool) and sn): indx= (raw.field('sna') > 15.) raw= raw[indx] elif not isinstance(sn,bool): indx= (raw.field('sna') > sn) raw= raw[indx] if isinstance(ebv,bool) and ebv: indx= (raw.field('ebv') < .3) raw= raw[indx] elif not isinstance(ebv,bool): indx= (raw.field('ebv') < ebv) raw= raw[indx] if nocoords: return raw raw= _add_distances(raw) raw= _add_velocities(raw) return raw
da073917d825dac283d8157dec771036588b0cec
271
def add_item(category_slug=None): """ Add a new Item Form. :param category_slug: The category slug """ # Get the current category using the slug current_category = Category.where('slug', category_slug).first() return render_template( 'items/add.html', categories=Category.all(), current_category=current_category )
6bbaabdab6da6290c1de17ecbc36e849b7fd4c5e
272
def track_type(time, lat, tmax=1): """ Determines ascending and descending tracks. Defines unique tracks as segments with time breaks > tmax, and tests whether lat increases or decreases w/time. """ # Generate track segment tracks = np.zeros(lat.shape) # Set values for segment tracks[0:np.argmax(np.abs(lat))] = 1 # Output index array i_asc = np.zeros(tracks.shape, dtype=bool) # Loop trough individual tracks for track in np.unique(tracks): # Get all points from an individual track i_track, = np.where(track == tracks) # Test tracks length if len(i_track) < 2: continue # Test if lat increases (asc) or decreases (des) w/time i_min = time[i_track].argmin() i_max = time[i_track].argmax() lat_diff = lat[i_track][i_max] - lat[i_track][i_min] # Determine track type if lat_diff > 0: i_asc[i_track] = True # Output index vector's return i_asc, np.invert(i_asc)
5872deaf6ff5d5e651705f40b8ad3df192ec98de
273
def get_installed_procnames(): """Get a list of procs currently on the file system.""" return set(get_procs())
6f3f83d579033ef407c72ccbd8d973f39d41ae22
274
def pam_bw_as_matrix(buff, border): """\ Returns the QR code as list of [0, 1] lists. :param io.BytesIO buff: Buffer to read the matrix from. :param int border: The QR code border """ res = [] data, size = _image_data(buff) for i, offset in enumerate(range(0, len(data), size)): if i < border: continue if i >= size - border: break row_data = bytearray(data[offset + border:offset + size - border]) # Invert bytes since PAM uses 0x0 = black, 0x1 = white res.append([b ^ 0x1 for b in row_data]) return res
0360ee9d9e22fd667bc80063bd799fbaa2cb3a44
275
def delete_task(task_id: int): """Remove task with associated ID from the database.""" send_to_login = ensure_login() if send_to_login: return send_to_login else: old_task = Task.delete(task_id) flash(f'You deleted "{old_task.title}"', "info") return redirect(url_for("task.view_task_list"))
976c8aedc47ca342809d82904c4a1eab31e8886f
276
def get_version(): # noqa: E501 """API version The API version # noqa: E501 :rtype: str """ return '1.0.0'
75df6627bb2aaec205a0679d86c190d7b861baf5
278
import tqdm import torch def bert_evaluate(model, eval_dataloader, device): """Evaluation of trained checkpoint.""" model.to(device) model.eval() predictions = [] true_labels = [] data_iterator = tqdm(eval_dataloader, desc="Iteration") for step, batch in enumerate(data_iterator): input_ids, input_mask, labels = batch input_ids = input_ids.to(device) input_mask = input_mask.to(device) with torch.no_grad(): outputs = model(input_ids, token_type_ids=None, attention_mask=input_mask) #loss is only output when labels are provided as input to the model ... real smooth logits = outputs[0] print(type(logits)) logits = logits.to('cpu').numpy() label_ids = labels.to('cpu').numpy() for label, logit in zip(label_ids, logits): true_labels.append(label) predictions.append(np.argmax(logit)) #print(predictions) #print(true_labels) metrics = get_metrics(true_labels, predictions) return metrics
3534796f06a89378dec9c23788cb52f75d088423
279
def get_cached_scts(hex_ee_hash): """ get_cached_scts returns previously fetched valid SCT from this certificate. The key to perform this search is the hex-encoded hash of the end-entity certificate :param hex_ee_hash: the hex-encoded hash of the end-entity certificate :return: a dictionary of SCTs where the keys are the log URL """ c = dbconn.cursor() c.execute(''' SELECT logs.log, scts.sct FROM certs INNER JOIN scts ON certs.id = scts.cert_id INNER JOIN logs ON scts.log_id = logs.id WHERE certs.ee_hash = ? AND scts.valid = 1 ''', (hex_ee_hash,)) return { log: {'sct': sct, 'valid': True} for (log, sct) in c.fetchall() }
5f86f1ccd7488f9712b16d1c71077ceb73098ea3
280
import torch def all_gather_multigpu( output_tensor_lists, input_tensor_list, group=None, async_op=False ): """ Gathers tensors from the whole group in a list. Each tensor in ``tensor_list`` should reside on a separate GPU Only nccl backend is currently supported tensors should only be GPU tensors Complex tensors are supported. Args: output_tensor_lists (List[List[Tensor]]): Output lists. It should contain correctly-sized tensors on each GPU to be used for output of the collective, e.g. ``output_tensor_lists[i]`` contains the all_gather result that resides on the GPU of ``input_tensor_list[i]``. Note that each element of ``output_tensor_lists`` has the size of ``world_size * len(input_tensor_list)``, since the function all gathers the result from every single GPU in the group. To interpret each element of ``output_tensor_lists[i]``, note that ``input_tensor_list[j]`` of rank k will be appear in ``output_tensor_lists[i][k * world_size + j]`` Also note that ``len(output_tensor_lists)``, and the size of each element in ``output_tensor_lists`` (each element is a list, therefore ``len(output_tensor_lists[i])``) need to be the same for all the distributed processes calling this function. input_tensor_list (List[Tensor]): List of tensors(on different GPUs) to be broadcast from current process. Note that ``len(input_tensor_list)`` needs to be the same for all the distributed processes calling this function. group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. async_op (bool, optional): Whether this op should be an async op Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group """ if _rank_not_in_group(group): return output_tensor_lists = [ [t if not t.is_complex() else torch.view_as_real(t) for t in l] for l in output_tensor_lists ] input_tensor_list = [ t if not t.is_complex() else torch.view_as_real(t) for t in input_tensor_list ] if group is None: default_pg = _get_default_group() work = default_pg.allgather(output_tensor_lists, input_tensor_list) else: work = group.allgather(output_tensor_lists, input_tensor_list) if async_op: return work else: work.wait()
e948709a209877c0d994699106e06bd13ddb46a7
281
import uuid def get_uuid_from_str(input_id: str) -> str: """ Returns an uuid3 string representation generated from an input string. :param input_id: :return: uuid3 string representation """ return str(uuid.uuid3(uuid.NAMESPACE_DNS, input_id))
51ce9ceab7c4f9d63d45fbee93286711bcba3093
282
import requests def extend_request(request_id=None, workload_id=None, lifetime=30): """ extend an request's lifetime. :param request_id: The id of the request. :param workload_id: The workload_id of the request. :param lifetime: The life time as umber of days. """ return requests.extend_request(request_id=request_id, workload_id=workload_id, lifetime=lifetime)
4b5c523f1af2b1c7c6f55bf522bb2c32e0f14995
283
def from_bytes(buf: bytes) -> str: """Return MIME type from content in form of bytes-like type. Example: >>> import defity >>> defity.from_bytes(b'some-binary-content') 'image/png' """ _guard_buf_arg(buf) # We accept many input data types just for user's convenience. We still convert # it to immutable bytes to pass down to Rust function. return _mod.from_bytes(bytes(buf))
5b997bc8d9b6d5e3fc7e38c5956bcefe3f0244cc
284
def pl__5__create_train_frame_sequences(ctvusts_by_tcp__lte_1, frame_sequences__by__tcpctvustsfs, train_tcpctvustsfs__gt__1): """ returns: train_tcpctvustsfs__all ( <TokenID>, <CameraPerspective>, <ASLConsultantID>, <TargetVideoFilename>, <UtteranceSequence>, <TokenSequence>, <FrameSequence> ) """ train__ctvusts_by_tcp__lte_1__keys = ( ctvusts_by_tcp__lte_1 | "Beam PL: extract ((TokenID,CameraPerspective,ASLConsultantID,TargetVideoFilename,UtteranceSequence,TokenSequence), '<ctvusts_by_tcp__lte_1_tpl__has_key>') for join to tcpctvustsfs" >> beam.Map( lambda ctvusts_by_tcp__lte_1_tpl : ( ( ctvusts_by_tcp__lte_1_tpl[0], # TokenID ctvusts_by_tcp__lte_1_tpl[1], # CameraPerspective ctvusts_by_tcp__lte_1_tpl[2], # ASLConsultantID ctvusts_by_tcp__lte_1_tpl[3], # TargetVideoFilename ctvusts_by_tcp__lte_1_tpl[4], # UtteranceSequence ctvusts_by_tcp__lte_1_tpl[5] # TokenSequence ), "<ctvusts_by_tcp__lte_1_tpl__has_key>" ) ) ) train_tcpctvustsfs__lte_1 = ( ({ 'has_key': train__ctvusts_by_tcp__lte_1__keys, 'frame_sequences': frame_sequences__by__tcpctvustsfs }) | "Beam PL: join ctvusts_by_tcp__lte_1 to tcpctvustsfs" >> beam.CoGroupByKey() # the above produces tuples of the form: # ( # ( # <TokenID>, # <CameraPerspective>, # <ASLConsultantID>, # <TargetVideoFilename>, # <UtteranceSequence>, # <TokenSequence> # ), # { # 'has_key': listof('<ctvusts_by_tcp__lte_1_tpl__has_key>'), # should have only one/single element # 'frame_sequences': listof(<FrameSequence>) # many # } # ) | "Beam PL: filter out mismatches from joined train__ctvusts_by_tcp__lte_1 to tcpctvustsfs" >> beam.Filter( lambda joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl: len(joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[1]['has_key'])>0 and \ len(joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[1]['frame_sequences'])>0 ) | "Beam PL: 'explode' listof(<FrameSequence>) from joined train__ctvusts_by_tcp__lte_1 to tcpctvustsfs to list of tuples" >> beam.Map( lambda joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl: [ ( joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][0], # TokenID joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][1], # CameraPerspective joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][2], # ASLConsultantID joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][3], # TargetVideoFilename joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][4], # UtteranceSequence joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][5], # TokenSequence frame_seq ) for frame_seq in sorted(joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[1]['frame_sequences']) ] ) | "Beam PL: 'explode' listof((TokenID,CameraPerspective,ASLConsultantID,TargetVideoFilename,UtteranceSequence,TokenSequence, FrameSequence)) from joined ttrain__ctvusts_by_tcp__lte_1 to tcpctvustsfs" >> beam.FlatMap( lambda list_joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl: list_joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl ) ) train_tcpctvustsfs__all = ( (train_tcpctvustsfs__gt__1, train_tcpctvustsfs__lte_1) | f"Beam PL: merge train_tcpctvustsfs__gt__1 with train_tcpctvustsfs__lte_1" >> beam.Flatten() ) return train_tcpctvustsfs__all
1824b26a449a24ae02e72c1fe1fc6931c9658875
285
def createList(value, n): """ @param value: value to initialize the list @param n: list size to be created @return: size n list initialized to value """ return [value for i in range (n)]
ff419e6c816f9b916a156e21c68fd66b36de9cfb
286
def label_class_num(label): """ 标签的种类 :param label: :return: """ return label.shape[1]
d3b9f6e7b84c10af289878587d7f36bf18147b9e
287
def heur(puzzle, item_total_calc, total_calc): """ Heuristic template that provides the current and target position for each number and the total function. Parameters: puzzle - the puzzle item_total_calc - takes 4 parameters: current row, target row, current col, target col. Returns int. total_calc - takes 1 parameter, the sum of item_total_calc over all entries, and returns int. This is the value of the heuristic function """ t = 0 for row in range(3): for col in range(3): val = puzzle.peek(row, col) - 1 target_col = val % 3 target_row = val / 3 # account for 0 as blank if target_row < 0: target_row = 2 t += item_total_calc(row, target_row, col, target_col) return total_calc(t)
bed67110858733a20b89bc1aacd6c5dc3ea04e13
288
def make_argparse_help_safe(s): """Make strings safe for argparse's help. Argparse supports %{} - templates. This is sometimes not needed. Make user supplied strings safe for this. """ return s.replace('%', '%%').replace('%%%', '%%')
3a1e6e072a8307df884e39b5b3a0218678d08462
289
def create_pysm_commands( mapfile, nside, bandcenter_ghz, bandwidth_ghz, beam_arcmin, coord, mpi_launch, mpi_procs, mpi_nodes, ): """ Return lines of shell code to generate the precomputed input sky map. """ mpistr = "{}".format(mpi_launch) if mpi_procs != "": mpistr = "{} {} 1".format(mpistr, mpi_procs) if mpi_nodes != "": mpistr = "{} {} 1".format(mpistr, mpi_nodes) outstr = "# Create sky model\n" outstr = '{}if [ ! -e "{}" ]; then\n'.format(outstr, mapfile) outstr = '{} echo "Creating sky model {} ..."\n'.format(outstr, mapfile) outstr = '{} {} ./pysm_sky.py --output "{}" --nside {} --bandcenter_ghz {} --bandwidth_ghz {} --beam_arcmin {} --coord {}\n'.format( outstr, mpistr, mapfile, nside, bandcenter_ghz, bandwidth_ghz, beam_arcmin, coord, ) outstr = "{}fi\n".format(outstr) outstr = "{}\n".format(outstr) return outstr
f0528968096f41a291a369477d8e2071f4b52339
292
def edits1(word): """ All edits that are one edit away from `word`. """ letters = 'abcdefghijklmnopqrstuvwxyz' splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [L + R[1:] for L, R in splits if R] transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1] replaces = [L + c + R[1:] for L, R in splits if R for c in letters] inserts = [L + c + R for L, R in splits for c in letters] return set(deletes + transposes + replaces + inserts)
ec78ba3648e04c59b380cd37760b7865e5f364ea
293
def process_text_cn(text: str): """中文文本处理""" text = del_white_chars(text) text = sub_punctuation(text) return text
18fd44ee2c5929fd6fe3c4aef9fde332773b016c
294
import math def total_elastic_cross_section_browning1994_cm2(atomic_number, energy_keV): """ From browning1994 Valid in the range 100 eV to 30 keV for elements 1 to 92. """ Z = atomic_number E = energy_keV factor = 3.0e-18 power_z = math.pow(Z, 1.7) power_e = math.pow(E, 0.5) nominator = factor*power_z denominator = E + 0.005 * power_z * power_e + 0.0007 * Z * Z / power_e cross_section_cm2 = nominator/denominator return cross_section_cm2
bf12a49e3aba07a44e44bfb6df87212745fd5ed3
295
def plot_fancy(nodes, elems, phi=None, charge=None, u=None, charge_max=None, show=False, save=None, num_intp=100, title=None, clabel=None, animation_mode=True, latex=False): """ Plots fancily. """ if animation_mode: fig = Figure(colorbar=False, tight_layout=True, show=show, xlabel="", ylabel="", save=save, ticks=False, latex=latex) else: fig = Figure(colorbar=True, tight_layout=False, show=show, xlabel=tex_escape("x"), ylabel=tex_escape("y"), save=save, ticks=True, latex=latex) if phi is None: phi = -np.ones(len(nodes)) if charge is None: charge = np.zeros(len(nodes)) if charge_max is None: charge_max = max(np.max(np.abs(charge)), 1e-10) cmap = plt.cm.get_cmap('Greys') cmap._init() cmap._lut[:, :] = 0. length = len(cmap._lut[:, -1]) # cmap._lut[:, -1] = np.linspace(0., 1.0, length) cmap._lut[:length//2, -1] = 0. cmap._lut[length//2:, -1] = 1. phi[phi > 1.] = 1. phi[phi < -1.] = -1. plt.tripcolor(nodes[:, 0], nodes[:, 1], elems, charge, cmap=plt.get_cmap("coolwarm"), shading="gouraud", vmin=-charge_max, vmax=charge_max) plt.tricontourf(nodes[:, 0], nodes[:, 1], elems, phi, cmap=cmap, levels=[-2.0, 0., 2.0], antialiased=True) if u is not None: Lx = nodes[:, 0].max()-nodes[:, 0].min() Ly = nodes[:, 1].max()-nodes[:, 1].min() dx = max(Lx, Ly)/num_intp Nx = int(Lx/dx) Ny = int(Ly/dx) x_i, y_i = np.meshgrid( np.linspace(dx+nodes[:, 0].min(), nodes[:, 0].max()-dx, Nx), np.linspace(dx+nodes[:, 1].min(), nodes[:, 1].max()-dx, Ny)) triang = mtri.Triangulation(nodes[:, 0], nodes[:, 1], elems) ux_interp = mtri.LinearTriInterpolator(triang, u[:, 0]) uy_interp = mtri.LinearTriInterpolator(triang, u[:, 1]) phi_interp = mtri.LinearTriInterpolator(triang, phi) ux_i = ux_interp(x_i, y_i) uy_i = uy_interp(x_i, y_i) phi_i = phi_interp(x_i, y_i) ux_i = np.array(ux_i.filled(0.)) uy_i = np.array(uy_i.filled(0.)) phi_i = np.array(phi_i.filled(0.)) u_norm = np.sqrt(ux_i**2 + uy_i**2) lw = np.zeros_like(ux_i) lw[:] += 5*u_norm/(u_norm.max() + 1e-10) mask = np.zeros(ux_i.shape, dtype=bool) mask[phi_i > 0.] = True ux_i_2 = np.ma.array(ux_i, mask=mask) fig.ax.streamplot(x_i, y_i, ux_i_2, uy_i, color="k", density=0.6, linewidth=lw) mask = np.zeros(ux_i.shape, dtype=bool) mask[phi_i < 0.] = True ux_i_2 = np.ma.array(ux_i, mask=mask) fig.ax.streamplot(x_i, y_i, ux_i_2, uy_i, color="w", density=0.6, linewidth=lw) return fig
a334c581b9601c73a1b003aec14f4f179dab5202
296
def buildModelGPT(modelType='gpt2-medium'): """ This function builds the model of the function und returns it based on GPT """ ## Create Model # Load pre-trained model tokenizer (vocabulary) tokenizer = GPT2Tokenizer.from_pretrained(modelType) # Load pre-trained model (weights) model = GPT2LMHeadModel.from_pretrained(modelType) # Set the model in evaluation mode to deactivate the DropOut modules # This is IMPORTANT to have reproducible results during evaluation! model.eval() return model, tokenizer
51b2dca333a06ed9168d3056b681b1ed192c5761
297
def vid_to_list(filepath): """ Converts a video file to a list of 3d arrays of dim (h, w, c) Input: filepath: (str) full filepath of video Output: vid: (ndarray) list of 3d numpy arrays, of shape (height, width, color) """ cap = cv.VideoCapture(filepath) list_of_frames = [] while True: ret, frame = cap.read() if ret: frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB) list_of_frames.append(frame) else: break return list_of_frames
062423bcf10749705e767edf6721dd20903653ef
298
import pickle def from_pickle(fname=interpolator_path): """Loads grid inperpolator from pickle located at `fname`. """ with open(fname, "rb") as f: grid = pickle.load(f) return grid
5b4f2a94ba3024ea63a5859284f1b6877bac1623
299
import torch def get_prob_and_custom_prob_per_crops( logits_for_patches, img_size_work_px_space, n_pixels_in_crop, descendent_specifier, target_list, rf, DEVICE, ): """Determine the probability and the custom probability (i.e. the non-Deep-Learning "logit", cf. Appendix C.2) for crops according to the descendent_specifier, i.e. either each crop or only the four corner crops. Note that for the grouping of patches into one crop, each directly neighboring patch is considered (stride 1: logits_for_patches_reshaped[start_row:stop_row:stride_1, start_col:stop_col:stride_1]). This enables us to both either select all data for all crops or only the data for the corner crops. This is in contrast to the value that was used to train and evaluate BagNet-33 (stride = 8). Args: logits_for_patches: logit predictions for each patch torch tensor, dtype = torch.float32 np_array of dimensions n_patches x 1000 img_size_work_px_space: number of image pixels in latest parent n_pixels_in_crop: size of child crop descendent_specifier: choice between selecting all crops ("stride1") or only four corner crops ("Ullman4") target_list: list of targets rf: number of pixels in image crop for BagNet-33 rf stands for receptive field size Returns: prob_per_crop: list of length n_crops^2 containing the probabilities per relevant crop custom_prob_per_crop: list of length n_crops^2 containing the custom probabilities per relevant crop """ # When the crop is larger than 33x33 (or in fact 37x37 because that's the # next larger pixel size appearing in the decreasing order of pixels when # decreasing by 80% for each crop from 224 pixels), group patches into # crops to calculate the probaiblities and the custom probabilities if img_size_work_px_space > 37: # calculate how many crops there are n_crops = img_size_work_px_space - n_pixels_in_crop + 1 # calculate how many patches contribute to one crop in one dimensions # (i.e. width or height) n_patches_contribute_to_crop = n_pixels_in_crop - rf + 1 # make matrix square instead of one-dimensional along the patch-axis patch_square_length = int(np.sqrt(logits_for_patches.size()[0])) logits_for_patches_reshaped = torch.reshape( logits_for_patches, (patch_square_length, patch_square_length, logits_for_patches.shape[1]), ) # loop through each crop prob_per_crop = [] custom_prob_per_crop = [] for start_row in range(n_crops): stop_row = start_row + n_patches_contribute_to_crop for start_col in range(n_crops): stop_col = start_col + n_patches_contribute_to_crop # average logits over patches logit_avg_of_cur_patch = torch.mean( torch.mean( logits_for_patches_reshaped[ start_row:stop_row, start_col:stop_col ], dim=0, ), dim=0, ) # calculate probabilities prob_for_targets_summed = get_prob_for_logits( logit_avg_of_cur_patch[None, :], target_list ) prob_per_crop.append(prob_for_targets_summed) # calculate custom probabilities cur_custom_prob_per_crop = get_custom_prob( logit_avg_of_cur_patch[None, :], target_list, DEVICE ) custom_prob_per_crop.append(cur_custom_prob_per_crop[0]) # patches correspond to crops else: custom_prob_per_crop = get_custom_prob( logits_for_patches, target_list, DEVICE) prob_per_crop = list( get_prob_for_logits( logits_for_patches, target_list)) # if only the four corner crops are of interest ("Ullman4"), get that data # only if descendent_specifier == "Ullman4": prob_per_crop, custom_prob_per_crop = extract_corner_data_for_Ullman4( prob_per_crop, custom_prob_per_crop ) return prob_per_crop, custom_prob_per_crop
18a4c166334c6e5c3fa3300ddd255992677d412b
300
def xywh_to_xyxy(boxes): """Convert [x y w h] box format to [x1 y1 x2 y2] format.""" if boxes is None or len(boxes) == 0: return boxes boxes = np.array(boxes) return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1))
391c55ddd2e84cf60073cbd02d0e5d595f3ea3b1
301
def vscat(a,fig=None,ls=None,marker='o',nmin=2,mhmin=-3,density=False,out=None) : """ Make histograms of VSCATTER for different bins of Teff H], given min NVISITS, and min [M/H] """ if fig == None : fig,ax=plots.multi(4,6,hspace=0.001,wspace=0.4,figsize=(12,8)) else : fig,ax=fig tbins=[3000,3500,4000,4500,5500,8000,30000] hbins=[8,11,12,13,15] try: snr = a['SNREV'] except: snr=a['SNR'] j=np.where(snr > 300) [0] snr[j] = 300 for i in range(len(tbins)-1) : ax[i,0].text(0.9,0.9,'{:d}<=RV_TEFF<{:d}'.format(tbins[i],tbins[i+1]),ha='right',transform=ax[i,0].transAxes,fontsize=8) for j in range(len(hbins)-1) : ax[0,j].set_title('{:d}<=H<{:d}'.format(hbins[j],hbins[j+1])) gd = np.where((a['RV_TEFF']>=tbins[i]) & (a['RV_TEFF']<tbins[i+1]) & (a['H']>=hbins[j]) & (a['H']<hbins[j+1]) & (a['NVISITS']>nmin) & (a['RV_FEH']>mhmin) & (a['VSCATTER'] > 0)) [0] print(tbins[i],tbins[i+1],hbins[j],hbins[j+1],nmin,len(gd)) try : #plots.plotc(ax[i,2],snr[gd],a['VSCATTER'][gd],a['RV_FEH'][gd],marker=marker,xr=[0,310],yr=[0,1],xt='S/N',yt='VSCATTER') ax[i,j].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),ls=ls,histtype='step',color=colors[j],normed=density) ax[i,j].set_xlabel('VSCATTER (km/s)') ax[i,j].plot([0.1,0.1],ax[i,j].get_ylim()) #ax[i,1].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),histtype='step',cumulative=True,normed=True,ls=ls,color=colors[j]) #ax[i,1].set_xlabel('VSCATTER') except : pass if out is not None : fig.savefig(out+'.png') plt.close() fig.suptitle('NVISITS>{:d} [M/H]>{:6.2f}'.format(nmin,mhmin)) return fig,ax
b302883263ef79682e697d4c82b0fc352eb597ec
302
def parse_unique_count_for_column(column_df, column): """ returns column specific distribution details. sample output, ``` "<column_df>": { "<>": 30 } ``` """ return {column: get_unique_counts_of_column(column_df)}
275375c012d8ffc2bd8f209bf57e1c1aa1d183f6
303
def transaction(): """ Get database transaction object :return: _TransactionContext object usage: with transaction(): # transactions operation pass >>> def update_profile(t_id, name, rollback): ... u = dict(id=t_id, name=name, email='%s@test.org' % name, password=name, last_modified=time.time()) ... insert('testuser', **u) ... update('update testuser set password=%s where id=%s', name.upper(), t_id) ... if rollback: ... raise StandardError('will cause rollback...') >>> with transaction(): ... update_profile(900301, 'Python', False) >>> select_one('select * from testuser where id=%s', 900301).name u'Python' >>> with transaction(): ... update_profile(900302, 'Ruby', True) Traceback (most recent call last): ... StandardError: will cause rollback... >>> select('select * from testuser where id=%s', 900302) [] """ return _TransactionContext()
d0b941dd9c2ce3e07079280edc74324a28d60509
304
from typing import Union from typing import Dict from typing import Any import typing def _BoundedIntRange( description: str = "", description_tooltip: str = None, layout: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]] = {}, max: int = 100, min: int = 0, style: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_description.DescriptionStyle]] = {}, value: tuple = (0, 1), on_description: typing.Callable[[str], Any] = None, on_description_tooltip: typing.Callable[[str], Any] = None, on_layout: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]]], Any] = None, on_max: typing.Callable[[int], Any] = None, on_min: typing.Callable[[int], Any] = None, on_style: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_description.DescriptionStyle]]], Any] = None, on_value: typing.Callable[[tuple], Any] = None, ) -> Element[ipywidgets.widgets.widget_int._BoundedIntRange]: """ :param description: Description of the control. :param description_tooltip: Tooltip for the description (defaults to description). :param max: Max value :param min: Min value :param style: Styling customizations :param value: Tuple of (lower, upper) bounds """ kwargs: Dict[Any, Any] = without_default(_BoundedIntRange, locals()) if isinstance(kwargs.get("layout"), dict): kwargs["layout"] = Layout(**kwargs["layout"]) if isinstance(kwargs.get("style"), dict): kwargs["style"] = DescriptionStyle(**kwargs["style"]) widget_cls = ipywidgets.widgets.widget_int._BoundedIntRange comp = react.core.ComponentWidget(widget=widget_cls) return Element(comp, **kwargs)
0f54f750da5000df2952298b0f6bec987a7b02b4
305
def get_benchmark_snapshot(benchmark_df, threshold=_MIN_FRACTION_OF_ALIVE_TRIALS_AT_SNAPSHOT): """Finds the latest time where |threshold| fraction of the trials were still running. In most cases, this is the end of the experiment. However, if less than |threshold| fraction of the trials reached the end of the experiment, then we will use an earlier "snapshot" time for comparing results. Returns a data frame that only contains the measurements of the picked snapshot time. """ # Allow overriding threshold with environment variable as well. threshold = environment.get('BENCHMARK_SAMPLE_NUM_THRESHOLD', threshold) num_trials = benchmark_df.trial_id.nunique() trials_running_at_time = benchmark_df.time.value_counts() criteria = trials_running_at_time >= threshold * num_trials ok_times = trials_running_at_time[criteria] latest_ok_time = ok_times.index.max() benchmark_snapshot_df = benchmark_df[benchmark_df.time == latest_ok_time] return benchmark_snapshot_df
6e7f887f3f720612013dfe06b5decbf2e092a2e2
307
import tqdm import timeit import warnings def get_results_seq_len(given_true_eig, hidden_dim, input_dim, min_seq_len, max_seq_len, num_sampled_seq_len, num_repeat, input_mean, input_stddev, output_noise_stddev, init_state_mean=0.0, init_state_stddev=0.0, generate_diagonalizable_only=False, random_seed=0): """Get results for varying sequence lengths. Args: given_true_eig: Ground truth of eigenvalues. If None, generate random eigenvalues from uniform [-1,1] in each repeat of experiment. hidden_dim: Assumed hidden dim. If 0, use true hidden dim. input_dim: The input dim. min_seq_len: Min seq len in experiments. max_seq_len: Max seq len in experiments. num_sampled_seq_len: Number of sampled seq len values in between min and max seq len. num_repeat: Number of repeated experiments for each seq_len. input_mean: Scalar or 1D array of length hidden state dim. input_stddev: Scalar of 1D array of length hidden state dim. output_noise_stddev: Scalar. init_state_mean: Scalar or 1D array of length hidden state dim. init_state_stddev: Scalar of 1D array of length hidden state dim. generate_diagonalizable_only: Whether to only use diagonalizable LDSs in simulations. random_seed: Random seed, integer. Returns: A pandas DataFrame with columns `method`, `seq_len`, `t_secs`, `failed_ratio`, and `l2_r_error`. The same method and seq_len will appear in num_repeat many rows. """ np.random.seed(random_seed) progress_bar = tqdm.tqdm(total=num_repeat * num_sampled_seq_len) gen = lds.SequenceGenerator( input_mean=input_mean, input_stddev=input_stddev, output_noise_stddev=output_noise_stddev, init_state_mean=init_state_mean, init_state_stddev=init_state_stddev) # seq_len_vals = np.linspace(min_seq_len, max_seq_len, num_sampled_seq_len) # seq_len_vals = [int(round(x)) for x in seq_len_vals] min_inv_sqrt_seq_len = 1. / np.sqrt(max_seq_len) max_inv_sqrt_seq_len = 1. / np.sqrt(min_seq_len) inv_sqrt_seq_len_vals = np.linspace(min_inv_sqrt_seq_len, max_inv_sqrt_seq_len, num_sampled_seq_len) seq_len_vals = [int(round(1. / (x * x))) for x in inv_sqrt_seq_len_vals] learning_fns = create_learning_fns(hidden_dim) metric_dict = { k: [] for k in [ 'method', 'seq_len', 't_secs', 'l2_a_error', 'l2_r_error', 'failed_convg' ] } for _ in xrange(num_repeat): if given_true_eig is not None: ground_truth = lds.generate_linear_dynamical_system( hidden_dim, input_dim, eigvalues=given_true_eig) else: ground_truth = lds.generate_linear_dynamical_system( hidden_dim, input_dim, diagonalizable=generate_diagonalizable_only) true_eig = ground_truth.get_spectrum() for seq_len in seq_len_vals: seq = gen.generate_seq(ground_truth, seq_len=seq_len) for k, fn in learning_fns.iteritems(): start_t = timeit.default_timer() with warnings.catch_warnings(record=True) as caught: warnings.filterwarnings( 'always', category=sm_exceptions.ConvergenceWarning) if FLAGS.hide_inputs: eig_pred = fn(seq.outputs, None) else: eig_pred = fn(seq.outputs, seq.inputs) t_elapsed = timeit.default_timer() - start_t metric_dict['seq_len'].append(seq_len) metric_dict['method'].append(k) metric_dict['t_secs'].append(t_elapsed) metric_dict['l2_a_error'].append(np.linalg.norm(true_eig - eig_pred)) metric_dict['l2_r_error'].append( np.linalg.norm(true_eig - eig_pred) / np.linalg.norm(true_eig)) metric_dict['failed_convg'].append(False) for w in caught: if w.category in [ RuntimeWarning, sm_exceptions.ConvergenceWarning, sm_exceptions.HessianInversionWarning ]: metric_dict['failed_convg'][-1] = True else: warnings.warn(w.message, w.category) progress_bar.update(1) progress_bar.close() return pd.DataFrame(data=metric_dict)
6d7439b4b9c5bca6010eaffc4c924ef8f09fbb4f
310
import functools def episode_to_timestep_batch( episode: rlds.BatchedStep, return_horizon: int = 0, drop_return_horizon: bool = False, flatten_observations: bool = False, calculate_episode_return: bool = False) -> tf.data.Dataset: """Converts an episode into multi-timestep batches. Args: episode: Batched steps as provided directly by RLDS. return_horizon: int describing the horizon to which we should accumulate the return. drop_return_horizon: bool whether we should drop the last `return_horizon` steps to avoid mis-calculated returns near the end of the episode. flatten_observations: bool whether we should flatten dict-based observations into a single 1-d vector. calculate_episode_return: Whether to calculate episode return. Can be an expensive operation on datasets with many episodes. Returns: rl_dataset.DatasetType of 3-batched transitions, with scalar rewards expanded to 1D rewards This means that for every step, the corresponding elements will be a batch of size 3, with the first batched element corresponding to *_t-1, the second to *_t and the third to *_t+1, e.g. you can access the previous observation as: ``` o_tm1 = el[types.OBSERVATION][0] ``` Two additional keys can be added: 'R_t' which corresponds to the undiscounted return for horizon `return_horizon` from time t (always present), and 'R_total' which corresponds to the total return of the associated episode (if `calculate_episode_return` is True). Rewards are converted to be (at least) one-dimensional, prior to batching (to avoid ()-shaped elements). In this example, 0-valued observations correspond to o_{t-1}, 1-valued observations correspond to o_t, and 2-valued observations correspond to s_{t+1}. This same structure is true for all keys, except 'R_t' and 'R_total' which are both scalars. ``` ipdb> el[types.OBSERVATION] <tf.Tensor: shape=(3, 11), dtype=float32, numpy= array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], [2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.]], dtype=float32)> ``` """ steps = episode[rlds.STEPS] if drop_return_horizon: episode_length = steps.cardinality() steps = steps.take(episode_length - return_horizon) # Calculate n-step return: rewards = steps.map(lambda step: step[rlds.REWARD]) batched_rewards = rlds.transformations.batch( rewards, size=return_horizon, shift=1, stride=1, drop_remainder=True) returns = batched_rewards.map(tf.math.reduce_sum) output = tf.data.Dataset.zip((steps, returns)).map(_append_n_step_return) # Calculate total episode return for potential filtering, use total # of steps # to calculate return. if calculate_episode_return: dtype = jnp.float64 if jax.config.jax_enable_x64 else jnp.float32 # Need to redefine this here to avoid a tf.data crash. rewards = steps.map(lambda step: step[rlds.REWARD]) episode_return = rewards.reduce(dtype(0), lambda x, y: x + y) output = output.map( functools.partial( _append_episode_return, episode_return=episode_return)) output = output.map(_expand_scalars) if flatten_observations: output = output.map(_flatten_observations) output = rlds.transformations.batch( output, size=3, shift=1, drop_remainder=True) return output
105c528772257b990897d35e8f8b82663e26e57c
311
def parse(authz_file, modules): """Parse a Subversion authorization file. Return a dict of modules, each containing a dict of paths, each containing a dict mapping users to permissions. Only modules contained in `modules` are retained. """ parser = UnicodeConfigParser(ignorecase_option=False) parser.read(authz_file) groups = {} aliases = {} sections = {} for section in parser.sections(): if section == 'groups': for name, value in parser.items(section): groups.setdefault(name, set()).update(to_list(value)) elif section == 'aliases': for name, value in parser.items(section): aliases[name] = value.strip() else: for name, value in parser.items(section): parts = section.split(':', 1) module, path = parts[0] if len(parts) > 1 else '', parts[-1] if module in modules: sections.setdefault((module, path), []) \ .append((name, value)) def resolve(subject, done): if subject.startswith('@'): done.add(subject) for members in groups[subject[1:]] - done: for each in resolve(members, done): yield each elif subject.startswith('&'): yield aliases[subject[1:]] else: yield subject authz = {} for (module, path), items in sections.iteritems(): section = authz.setdefault(module, {}).setdefault(path, {}) for subject, perms in items: readable = 'r' in perms # Ordering isn't significant; any entry could grant permission section.update((user, readable) for user in resolve(subject, set()) if not section.get(user)) return authz
9102d77ed5db05c582a0ecbc3eb26a63fd579ce6
312
def send_expiry_note(invite, request, user_name): """ Send a notification email to the issuer of an invitation when a user attempts to accept an expired invitation. :param invite: ProjectInvite object :param request: HTTP request :param user_name: User name of invited user :return: Amount of sent email (int) """ subject = ( SUBJECT_PREFIX + ' ' + SUBJECT_EXPIRY.format( user_name=user_name, project=invite.project.title ) ) message = get_email_header( MESSAGE_HEADER.format( recipient=invite.issuer.get_full_name(), site_title=SITE_TITLE ) ) message += MESSAGE_EXPIRY_BODY.format( role=invite.role.name, project=invite.project.title, user_name=user_name, user_email=invite.email, date_expire=localtime(invite.date_expire).strftime('%Y-%m-%d %H:%M'), site_title=SITE_TITLE, project_label=get_display_name(invite.project.type), ) if not settings.PROJECTROLES_EMAIL_SENDER_REPLY: message += NO_REPLY_NOTE message += get_email_footer() return send_mail(subject, message, [invite.issuer.email], request)
62602f670724630ff5aeb99ab28b7dfb18fc5233
313
def level_is_between(level, min_level_value, max_level_value): """Returns True if level is between the specified min or max, inclusive.""" level_value = get_level_value(level) if level_value is None: # unknown level value return False return level_value >= min_level_value and level_value <= max_level_value
b0bc1c4ea749d51af147bc1237aac5a5d1e5ba1b
314
def voronoiMatrix(sz=512,percent=0.1,num_classes=27): """ Create voronoi polygons. Parameters ---------- sz : int row and column size of the space in which the circle is placed percent : float Percent of the space to place down centers of the voronoi polygons. Smaller percent makes the polygons larger num_classes : int Number of classes to assign to each of the voronoi polygons Returns ------- X : 2D array Array containing all voronoi polygons """ X = np.zeros((sz,sz)) #fill in percentage of the space locs = np.random.rand(sz,sz)<=percent vals = np.random.randint(0,num_classes,size=(sz,sz)) X[locs]=vals[locs] #get all the indices of the matrix cc,rr = np.meshgrid(np.arange(0,sz),np.arange(0,sz)) f = np.zeros((sz**2,2)) f[:,0]=rr.ravel() #feature1 f[:,1]=cc.ravel() #feature2 t = X.ravel() #target train_ind = locs.ravel() f_train = f[train_ind] t_train = t[train_ind] clf = neighbors.KNeighborsClassifier(n_neighbors=1) clf.fit(f_train, t_train) preds = clf.predict(f) locs = f.astype(int) X[locs[:,0],locs[:,1]] = preds return X
c4dfb74ae5b9e26494cb8642eb96dca565d6497d
315
from typing import Counter def genVBOF2(virus_record, model, model_name=None): """New version of the genVBOF function by Hadrien. Builds a Virus Biomass Objective Function (basically a virus biomass production reaction, from aminoacids and nucleotides) from a genbank file. Params: - virus_record: genbank record of a virus (output from Bio.SeqIO.parse) - model: a cobra metabolic model (cobra.core.model.Model) Returns: - virus biomass objective function (cobra.core.reaction.Reaction) """ met_dict = load_metabolite_id_dict(model, model_name=model_name) # VIRUS IDENTIFICATION taxonomy = " ".join([taxon.lower() for taxon in virus_record.annotations["taxonomy"]]) if "betacoronavirus" not in taxonomy: raise NotImplementedError('Virus family is not supported: Unable to create VBOF. Consult _README') short_name, full_name = get_virus_names(virus_record) # AMINOACID COUNT all_cds = {feature for feature in virus_record.features if feature.type == "CDS"} # Check that our own virus_composition dict contain exactly the # proteins defined in the genbank file, no more, no less. protein_names_in_gb_file = {cds.qualifiers["product"][0] for cds in all_cds} protein_names_in_our_data = {protein_name for protein_name in virus_composition[short_name]["proteins"]} assert protein_names_in_gb_file == protein_names_in_our_data virus_aa_composition = Counter() # protein name -> number of atp involved in its peptide bonds formations # (accounting for the number of copies of protein) peptide_bond_formation = dict() for cds in all_cds: protein_name = cds.qualifiers["product"][0] aa_sequence = cds.qualifiers["translation"][0] aa_count = Counter(aa_sequence) copies_per_virus = virus_composition[short_name]["proteins"][protein_name] virus_aa_composition += multiply_counter(aa_count, copies_per_virus) peptide_bond_formation[protein_name] = (len(aa_sequence) * k_atp_protein - k_atp_protein) * copies_per_virus # [3] Precursor frequency # Genome [Nucleotides] Cg = virus_composition[short_name]["Cg"] # number of genome copies per virus virus_nucl_count = Counter(str(virus_record.seq)) countA = virus_nucl_count["A"] countC = virus_nucl_count["C"] countG = virus_nucl_count["G"] countU = virus_nucl_count["T"] # Base 'T' is pseudo for base 'U' antiA = countU antiC = countG antiG = countC antiU = countA # Count summation totNTPS = (Cg * (countA + countC + countG + countU + antiA + antiC + antiG + antiU)) totAA = sum(count for count in virus_aa_composition.values()) # [4] VBOF Calculations # Nucleotides # mol.ntps/mol.virus V_a = (Cg*(countA + antiA)) V_c = (Cg*(countC + antiC)) V_g = (Cg*(countG + antiG)) V_u = (Cg*(countU + antiU)) # g.ntps/mol.virus G_a = V_a * ntpsDict["atp"] G_c = V_c * ntpsDict["ctp"] G_g = V_g * ntpsDict["gtp"] G_u = V_u * ntpsDict["ttp"] # Amino Acids # g.a/mol.virus G_aa = {aa: count * aaDict[aa] for aa, count in virus_aa_composition.items()} # Total genomic and proteomic molar mass M_v = (G_a + G_c + G_g + G_u) + sum(G_aa.values()) # Stoichiometric coefficients # Nucleotides [mmol.ntps/g.virus] (for the genome) S_atp = 1000 * (V_a/M_v) S_ctp = 1000 * (V_c/M_v) S_gtp = 1000 * (V_g/M_v) S_utp = 1000 * (V_u/M_v) # Amino acids [mmol.aa/g.virus] S_aa = {aa: 1000 * V_aa / M_v for aa, V_aa in virus_aa_composition.items()} # Energy requirements # Genome: Phosphodiester bond formation products [Pyrophosphate] # SARS Cov 2 is a single stranded RNA virus: it has to first do an # intermediary reverse copy of itself and then replicate itself from # that intermediary strand. genTemp = (((countA + countC + countG + countU) * k_ppi) - k_ppi) genRep = (((antiA + antiC + antiG + antiU) * k_ppi) - k_ppi) genTot = genTemp + genRep V_ppi = genTot S_ppi = 1000 * (V_ppi / M_v) # Proteome: Peptide bond formation [ATP + H2O] # Note: ATP used in this process is denoated as ATPe/Ae [e = energy version] V_Ae = sum(peptide_bond_formation.values()) S_Ae = 1000 * (V_Ae / M_v) # [5] VBOF Reaction formatting and output # Left-hand terms: Nucleotides # Note: ATP term is a summation of genome and energy requirements S_ATP = (S_atp + S_Ae) * -1 S_CTP = S_ctp * -1 S_GTP = S_gtp * -1 S_UTP = S_utp * -1 # Left-hand terms: Amino Acids S_AAf = {aa: -coef for aa, coef in S_aa.items()} # Left-hand terms: Energy Requirements S_H2O = S_Ae * -1 # Right-hand terms: Energy Requirements S_ADP = S_Ae S_Pi = S_Ae S_H = S_Ae S_PPi = S_ppi reaction_name = short_name + '_prodrxn_VN' virus_reaction = Reaction(reaction_name) virus_reaction.name = full_name + ' production reaction' virus_reaction.subsystem = 'Virus Production' virus_reaction.lower_bound = 0 virus_reaction.upper_bound = 1000 virus_reaction.add_metabolites(({ met_dict['atp']: S_ATP, met_dict['ctp']: S_CTP, met_dict['gtp']: S_GTP, met_dict['utp']: S_UTP, met_dict['A']: S_AAf['A'], met_dict['R']: S_AAf['R'], met_dict['N']: S_AAf['N'], met_dict['D']: S_AAf['D'], met_dict['C']: S_AAf['C'], met_dict['Q']: S_AAf['Q'], met_dict['E']: S_AAf['E'], met_dict['G']: S_AAf['G'], met_dict['H']: S_AAf['H'], met_dict['I']: S_AAf['I'], met_dict['L']: S_AAf['L'], met_dict['K']: S_AAf['K'], met_dict['M']: S_AAf['M'], met_dict['F']: S_AAf['F'], met_dict['P']: S_AAf['P'], met_dict['S']: S_AAf['S'], met_dict['T']: S_AAf['T'], met_dict['W']: S_AAf['W'], met_dict['Y']: S_AAf['Y'], met_dict['V']: S_AAf['V'], met_dict['h2o']: S_H2O, met_dict['adp']: S_ADP, met_dict['Pi']: S_Pi, met_dict['h']: S_H, met_dict['PPi']: S_PPi})) return virus_reaction
98f0aafae9efa65d18b65e23eb9d7c0457641bb0
316
import time def get_current_ms_time() -> int: """ :return: the current time in milliseconds """ return int(time.time() * 1000)
3c037bffb486ebae3ffcfba5fe431bd9b69b3bda
318
def get_service_info(): # noqa: E501 """Get information about Workflow Execution Service. May include information related (but not limited to) the workflow descriptor formats, versions supported, the WES API versions supported, and information about general service availability. # noqa: E501 :rtype: ServiceInfo """ return adapter.get_service_info()
693d7c47a235dc96f9c44d993fba25607994f2e3
319
def str_of_tuple(d, str_format): """Convert tuple to str. It's just str_format.format(*d). Why even write such a function? (1) To have a consistent interface for key conversions (2) We want a KeyValidationError to occur here Args: d: tuple if params to str_format str_format: Auto fields format string. If you have manual fields, consider auto_field_format_str to convert. Returns: parametrized string >>> str_of_tuple(('hello', 'world'), "Well, {} dear {}!") 'Well, hello dear world!' """ try: return str_format.format(*d) except Exception as e: raise KeyValidationError(e)
b5612efb3b189754cb278f40c7f471284dfc1daa
320
def _intersect(bboxes1, bboxes2): """ bboxes: t x n x 4 """ assert bboxes1.shape[0] == bboxes2.shape[0] t = bboxes1.shape[0] inters = np.zeros((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32) _min = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32) _max = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32) w = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32) h = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32) for i in range(t): np.maximum.outer(bboxes1[i, :, 0], bboxes2[i, :, 0], out=_min) np.minimum.outer(bboxes1[i, :, 2], bboxes2[i, :, 2], out=_max) np.subtract(_max + 1, _min, out=w) w.clip(min=0, out=w) np.maximum.outer(bboxes1[i, :, 1], bboxes2[i, :, 1], out=_min) np.minimum.outer(bboxes1[i, :, 3], bboxes2[i, :, 3], out=_max) np.subtract(_max + 1, _min, out=h) h.clip(min=0, out=h) np.multiply(w, h, out=w) inters += w return inters
91056250d3adf829d1815a016a75423f93adb6c1
321
def convert_x_to_bbox(x,score=None): """ Takes a bounding box in the centre form [x,y,s,r] and returns it in the form [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right """ w = np.sqrt(np.abs(x[2] * x[3])) if(w<=0): w=1 h = x[2] / w if(score==None): return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.],np.float32) else: return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
f49a6b7c306087d92e99acb5bd679c59308f81b3
322
from typing import Tuple def decimal_to_boolean_list(num: int, padding: int = 0) -> Tuple[bool, ...]: """ Convert a decimal number into a tuple of booleans, representing its binary value. """ # Convert the decimal into binary binary = bin(num).replace('0b', '').zfill(padding) # Return a tuple of booleans, one for each element of the binary number (it's either '0' or '1' so we can convert # directly to boolean) return tuple(char == '1' for char in binary)
c13831214faece847960089f781cc1c6442205ec
323
def get_credentials(fn, url, username, allowed): """Call fn and return the credentials object""" url_str = maybe_string(url) username_str = maybe_string(username) creds = fn(url_str, username_str, allowed) credential_type = getattr(creds, 'credential_type', None) credential_tuple = getattr(creds, 'credential_tuple', None) if not credential_type or not credential_tuple: raise TypeError("credential does not implement interface") cred_type = credential_type if not (allowed & cred_type): raise TypeError("invalid credential type") ccred = ffi.new('git_cred **') if cred_type == C.GIT_CREDTYPE_USERPASS_PLAINTEXT: name, passwd = credential_tuple err = C.git_cred_userpass_plaintext_new(ccred, to_bytes(name), to_bytes(passwd)) elif cred_type == C.GIT_CREDTYPE_SSH_KEY: name, pubkey, privkey, passphrase = credential_tuple if pubkey is None and privkey is None: err = C.git_cred_ssh_key_from_agent(ccred, to_bytes(name)) else: err = C.git_cred_ssh_key_new(ccred, to_bytes(name), to_bytes(pubkey), to_bytes(privkey), to_bytes(passphrase)) else: raise TypeError("unsupported credential type") check_error(err) return ccred
931d01d2c8ea44e1f8522f5dedb14a66367f3d4f
324
def tpack(text, width=100): """Pack a list of words into lines, so long as each line (including intervening spaces) is no longer than _width_""" lines = [text[0]] for word in text[1:]: if len(lines[-1]) + 1 + len(word) <= width: lines[-1] += (' ' + word) else: lines += [word] return lines
e1b1b54a528c8dc2142a750156d3db1f754b4268
325
import torch def _log_evidence_func(arr): """Returns an estimate of the log evidence from a set of log importance wegiths in arr. arr has shape TxN where T is the number of trials and N is the number of samples for estimation. Args: arr (torch.FloatTensor of shape TxN): log importance weights Returns: A tensor of shape (T,) representing the estimates for each set of sampels. """ T, N = arr.shape log_evidence = torch.logsumexp(arr, dim=1) - np.log(N) return log_evidence
6fd0f7a3e6ad677300a1c2d342082417d6c1a2c8
326
import struct def parse_bgp_attr(atype, aval_buf): """Given a type and value buffer, parses a BGP attribute and returns the value parsed""" if atype == BGP_ATYPE_ORIGIN: attr = 'ORIGIN' if len(aval_buf) != 1: return None, None, -1 aval = struct.unpack('B', aval_buf)[0] aval = BGP_ORIGIN_TYPES[aval] return attr, aval, 1 elif atype == BGP_ATYPE_ASPATH: attr = 'ASPATH' segtype, seglen = struct.unpack('BB', aval_buf[:2]) ases = [] segproc = 2 for i in range(seglen): as_, = struct.unpack('>I', aval_buf[segproc:segproc+4]) segproc += 4 ases.append(as_) return attr, ases, len(aval_buf) elif atype == BGP_ATYPE_NEXTHOP: attr = 'NEXTHOP' aval = inet_ntoa(aval_buf) return attr, aval, 4 else: return None, None, len(aval_buf)
337ee8d0178759afead4ef1c55653639ed901fac
328
def getUsage(): """ Get usage information about running APBS via Python Returns (usage) usage: Text about running APBS via Python """ usage = "\n\n\ ----------------------------------------------------------------------\n\ This driver program calculates electrostatic potentials, energies,\n\ and forces using both multigrid methods.\n\ It is invoked as:\n\n\ python main.py apbs.in\n\ ----------------------------------------------------------------------\n\n" return usage
c21950b52106400cb20dd9d30a5cf742e98f9da9
330
def run_length_div_decode(x, n, divisor): """Decodes a run length encoded array and scales/converts integer values to float Parameters ---------- x : encoded array of integers (value, repeat pairs) n : number of element in decoded array """ y = np.empty(n, dtype=np.float32) start = 0 for i in range(0, x.shape[0] - 1, 2): end = x[i + 1] + start y[start:end] = x[i] / divisor start = end return y
434edfb44d1225277526233989ece2c91be14b0c
331
def modelFnBuilder(config): """Returns 'model_fn' closure for Estimator.""" def model_fn(features, labels, mode, params): print('*** Features ***') for name in sorted(features.keys()): tf.logging.info(' name = {}, shape = {}'.format(name, features[name].shape)) is_training = (mode == tf.estimator.ModeKeys.TRAIN) # get the data input_texts = features['input_texts'] input_texts_length = features['input_texts_length'] input_chars = features['input_chars'] input_chars_length = features['input_chars_length'] output_tags = labels['output_tags'] if is_training else None # build the model model = MultiTaskIntentModel(config, cg.BATCH_SIZE, is_training, input_texts=input_texts, input_texts_length=input_texts_length, input_chars=input_chars, input_chars_length=input_chars_length, output_tags=output_tags) # predict if mode == tf.estimator.ModeKeys.PREDICT: intent_logits = model.getResults('intent_logits') intent_probs = tf.nn.softmax(intent_logits, axis=-1) intent_labels = tf.math.argmax(intent_probs, axis=-1) tag_logits = model.getResults('tag_logits') viterbi_sequence, viterbi_score = model.decode(logit=tag_logits, sequence_lengths=input_texts_length) predictions = {'intent_labels': intent_labels, 'viterbi_sequence': viterbi_sequence, 'viterbi_score': viterbi_score} output_spec = tf.estimator.EstimatorSpec(mode, predictions) elif mode == tf.estimator.ModeKeys.TRAIN: gold_intent_labels = labels['output_indents'] intent_logits = model.getResults('intent_logits') # max_time = tf.shape(gold_intent_labels)[1] # target_weights = tf.sequence_mask(input_texts_length, max_time, dtype=intent_logits.dtype) batch_size = tf.cast(cg.BATCH_SIZE, dtype=tf.float32) intent_loss = tf.reduce_sum( tf.nn.sparse_softmax_cross_entropy_with_logits( labels=gold_intent_labels, logits=intent_logits)) / batch_size tag_log_likelihood = model.getResults('log_likelihood') tag_loss = tf.reduce_mean(-tag_log_likelihood) loss = intent_loss + tag_loss tvars = tf.trainable_variables() l2_loss = 1e-2 * (tf.reduce_mean([tf.nn.l2_loss(v) for v in tvars])) loss += l2_loss lr = tf.train.polynomial_decay( cg.LEARNING_RATE, tf.train.get_or_create_global_step(), cg.TRAIN_STEPS) lr = tf.maximum(tf.constant(cg.LEARNING_RATE_LIMIT), lr) # create optimizer and update optimizer = tf.train.AdamOptimizer(learning_rate=lr) gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True) clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0) train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step()) logging_hook = tf.train.LoggingTensorHook({'step': tf.train.get_global_step(), 'loss': loss, 'l2_loss': l2_loss, 'lr': lr, 'intent_loss': intent_loss, 'tag_loss': tag_loss}, every_n_iter=1) output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op, training_hooks=[logging_hook]) else: raise NotImplementedError return output_spec return model_fn
ea39f0ed099ec9455667ec79df0f91fdda425783
333
from notifications.utils import notify_people, unotify_people def accreds_validate(request, pk): """Validate an accred""" accreds = [get_object_or_404(Accreditation, pk=pk_, end_date=None) for pk_ in filter(lambda x: x, pk.split(','))] multi_obj = len(accreds) > 1 for accred in accreds: if not accred.rights_can('VALIDATE', request.user): raise Http404 if request.method == 'POST': for accred in accreds: accred.need_validation = False accred.save() accred.user.clear_rights_cache() AccreditationLog(accreditation=accred, who=request.user, type='validated').save() dest_users = accred.unit.users_with_access('ACCREDITATION', no_parent=True) notify_people(request, 'Accreds.Validated', 'accreds_validated', accred, dest_users) unotify_people('Accreds.ToValidate', accred) if multi_obj: messages.success(request, _(u'Accréditations validées !')) else: messages.success(request, _(u'Accréditation validée !')) return redirect('units-views-accreds_list') return render(request, 'units/accreds/validate.html', {'accreds': accreds, 'multi_obj': multi_obj})
9f539b4dacfc8bc4e824af9bff2acc4aad552c6c
334