content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _get_trigger_func(name, trigger_name): """ Given a valid vulnerability name, get the trigger function corresponding to the vulnerability name and trigger name. If the trigger function isn't found, raise NotFound. """ try: return get_trigger(name, trigger_name) except AttributeError: raise NotFound()
a93207cd7319aa10bfe853fe21bbb04a8c22f3c2
105
def _get_rotated_bounding_box(size, quaternion): """Calculates the bounding box of a rotated 3D box. Args: size: An array of length 3 specifying the half-lengths of a box. quaternion: A unit quaternion specifying the box's orientation. Returns: An array of length 3 specifying the half-lengths of the bounding box of the rotated box. """ corners = ((size[0], size[1], size[2]), (size[0], size[1], -size[2]), (size[0], -size[1], size[2]), (-size[0], size[1], size[2])) rotated_corners = tuple( transformations.quat_rotate(quaternion, corner) for corner in corners) return np.amax(np.abs(rotated_corners), axis=0)
7c70ea23051dacdf2447a252488456c5b4d23901
106
def Cos( a: float = 1., b: float = 1., c: float = 0.) -> InternalLayer: """Affine transform of `Cos` nonlinearity, i.e. `a cos(b*x + c)`. Args: a: output scale. b: input scale. c: input phase shift. Returns: `(init_fn, apply_fn, kernel_fn)`. """ return Sin(a=a, b=b, c=c + np.pi / 2)
7327c68bf5a182cf90fa1d0b0280128c99f323b3
107
def key_make_model(chip): """ Given a chip, return make and model string. Make and model are extracted from chip.misc using the keys "make" and "model". If they are missing it returns None for that value. If misc missing or not a dictionary, (None, None) is returned. Args: chip: A chip named tuple Returns: string: "make_model" from the chip. The string "None" may be returned for one of the positions (or both) if it is missing in the chip. """ output = [None, None] # Ensure we have a misc dictionary if hasattr(chip, "misc"): misc = chip.misc if hasattr(misc, "get"): output[0] = misc.get("make", None) output[1] = misc.get("model", None) return tuple_to_string(output)
8729d95537700be1a468411aa1b51b094a4bb34f
108
from typing import Sequence def alternating_epsilons_actor_core( policy_network: EpsilonPolicy, epsilons: Sequence[float], ) -> actor_core_lib.ActorCore[EpsilonActorState, None]: """Returns actor components for alternating epsilon exploration. Args: policy_network: A feedforward action selecting function. epsilons: epsilons to alternate per-episode for epsilon-greedy exploration. Returns: A feedforward policy. """ epsilons = jnp.array(epsilons) def apply_and_sample(params: networks_lib.Params, observation: networks_lib.Observation, state: EpsilonActorState): random_key, key = jax.random.split(state.rng) actions = policy_network(params, key, observation, state.epsilon) return (actions.astype(jnp.int32), EpsilonActorState(rng=random_key, epsilon=state.epsilon)) def policy_init(random_key: networks_lib.PRNGKey): random_key, key = jax.random.split(random_key) epsilon = jax.random.choice(key, epsilons) return EpsilonActorState(rng=random_key, epsilon=epsilon) return actor_core_lib.ActorCore( init=policy_init, select_action=apply_and_sample, get_extras=lambda _: None)
9a247d5e98cef7e653a074cb7007f5823ec686c7
109
from typing import List from typing import Optional from typing import Dict def get_keywords( current_user: models.User = Depends(deps.get_current_active_user), controller_client: ControllerClient = Depends(deps.get_controller_client), labels: List = Depends(deps.get_personal_labels), q: Optional[str] = Query(None, description="query keywords"), offset: int = Query(0), limit: Optional[int] = Query(None), ) -> Dict: """ Get keywords and aliases """ filter_f = partial(filter_keyword, q) if q else None items = list(labels_to_keywords(labels, filter_f)) if settings.REVERSE_KEYWORDS_OUTPUT: items.reverse() res = {"total": len(items), "items": paginate(items, offset, limit)} return {"result": res}
c835c55464e16debdfcb7c79f63cd7501a9a6e76
110
def _compile_unit(i): """Append gas to unit and update CO2e for pint/iam-unit compatibility""" if " equivalent" in i["unit"]: return i["unit"].replace("CO2 equivalent", "CO2e") if i["unit"] in ["kt", "t"]: return " ".join([i["unit"], i["gas"]]) else: return i["unit"]
0692167e95159d08b306a241baf4eadefdc29b35
111
def get_fdfs_url(file): """ 上传文件或图片到FastDFS :param file: 文件或图片对象,二进制数据或本地文件 :return: 文件或图片在FastDFS中的url """ # 创建FastDFS连接对象 fdfs_client = Fdfs_client(settings.FASTDFS_CONF_PATH) """ client.upload_by_filename(文件名), client.upload_by_buffer(文件bytes数据) """ # 上传文件或图片到fastDFS if isinstance(file, InMemoryUploadedFile): result = fdfs_client.upload_by_buffer(file.read()) else: result = fdfs_client.upload_by_filename(file) """ result = { 'Group name': 'group1', # FastDFS服务端Storage组名 'Remote file_id': 'group1/M00/00/00/wKgThF0LMsmATQGSAAExf6lt6Ck10.jpeg', # 文件存储的位置(索引),可用于下载 'Status': 'Upload successed.', # 文件上传结果反馈 'Local file name': '/home/python/Desktop/upload_Images/02.jpeg', # 所上传文件的真实路径 'Uploaded size': '76.00KB', # 文件大小 'Storage IP': '192.168.19.132'} # FastDFS服务端Storage的IP """ # 判断是否上传成功,result为一个字典 if result['Status'] != 'Upload successed.': return Response(status=403) # 获取文件或图片上传后的路径 file_url = result['Remote file_id'] return file_url
ba23e4f416b4b418706b3253cb26ff63b8e62fc6
112
from typing import Any from typing import List def get_all_edge_detects(clip: vs.VideoNode, **kwargs: Any) -> List[vs.VideoNode]: """Allows you to get all masks inheriting from EdgeDetect. Args: clip (vs.VideoNode): Source clip. kwargs: Arguments passed to EdgeDetect().get_mask Returns: List[vs.VideoNode]: List of masks. Example: from vardefunc.mask import get_all_edge_detect clip.set_output(0) for i, mask in enumerate(get_all_edge_detect(get_y(clip)), start=1): mask.set_output(i) """ masks = [ edge_detect().get_mask(clip, **kwargs).text.Text(edge_detect.__name__) # type: ignore for edge_detect in EdgeDetect.__subclasses__() ] return masks
7324568af5e6ff27f8f7d14b27a25e19666903a4
113
from datetime import datetime import pytz def get_ustz(localdt, timezone): """ Returns the timezone associated to a local datetime and an IANA timezone. There are two common timezone conventions. One is the Olson/IANA and the other is the Microsoft convention. For example, the closest IANA timezone for Boston, MA is America/New_York. More commonly, this is known as Eastern time zone. The goal of this function is to return the common name for a timezone in the contiguous US. Note that Arizona has its own IANA timezone and does not observe daylight savings. So depending on the time of year, the offset for Arizona will correspond to either Pacific or Mountain time. Parameters ---------- localdt : datetime The local datetime instance. timezone : str The IANA timezone associated with `localdt`. This should be a timezone for the contiguous US. use_noon : bool If `True`, ignore the time for the incoming datetime and use noon instead. This is nice for quick checks, but undesirable when accurate timezone identification is needed late at night or early morning Returns ------ tz : str The common name for the timezone. This will be one of Pacific, Mountain, Central, or Eastern. """ # Use noon to guarantee that we have the same day in each timezone. # This is desirable in the sense that we don't want someone's tweet jumping # from Eastern to Central, for example, at the end of daylight savings time. localdt = datetime.datetime(localdt.year, localdt.month, localdt.day, 12) timezone = pytz.timezone(timezone) dt = timezone.localize(localdt) for tz, tz_ref in TIMEZONES: dt_new = dt.astimezone(tz_ref) if dt_new.utcoffset() == dt.utcoffset(): return tz
c08d7407a8025522baf559becb649bde7aad3fa0
114
def plot_rolling_sharpe(returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6, **kwargs): """ Plots the rolling Sharpe ratio versus date. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. rolling_window : int, optional The days window over which to compute the sharpe ratio. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ rolling_sharpe_ts = timeseries.rolling_sharpe( returns, rolling_window) return rolling_sharpe_ts
e9a3ebcbfc46c403c82cf2b65502e30f3526ee15
115
def ResolveNamespace(namespace): """Validate app namespace, providing a default. If the argument is None, namespace_manager.get_namespace() is substituted. Args: namespace: The namespace argument value to be validated. Returns: The value of namespace, or the substituted default. Always a non-empty string or None. Raises: BadArgumentError if the value is not a string. """ if namespace is None: namespace = namespace_manager.get_namespace() else: namespace_manager.validate_namespace( namespace, datastore_errors.BadArgumentError) return namespace
c84e30250a7b1eed200de0bd9a59e77df05829e1
116
import math def Calculo_por_etapas(Diccionario): """Calculo de la hornilla por etapas""" Lista_Contenido=[] Lista_columnas=[] #Normalización de la capacidad de la hornilla #Mem_dias=float(Diccionario['¿Cada cuantos días quiere moler? (días)']) #Mem_Temp=Normalizar_Capacidad(float(Diccionario['Capacidad estimada de la hornilla']),Mem_dias) #print(float(Diccionario['Capacidad estimada de la hornilla'])) #print(Mem_Temp) Etapas=int(float(Diccionario['Etapas']))#Mem_Temp[1] #Etapas=12 #Saturador "minimo son dos etapas" if (Etapas>2): Factor_Division=Etapas-2 else: Factor_Division=2 Etapas=2 #Caracteristicas de las celdas de cada columna (Lista_columnas) #Fila 0 concentración de solidos inicial #Fila 1 Concentración de solidos final #Fila 2 Concentración promedio #Fila 3 Masa de jugo de entrada #Fila 4 Calor Especifico P Cte jugo #Fila 5 Densidad del Jugo #Fila 6 Volumen de jugo kg #Fila 7 Volumen de jugo en L #Fila 8 Temperatura de Entrada #Fila 9 Temperatura de Salida #Fila 10 Entalpia de Vaporización #Fila 11 Masa de Agua a Evaporar #Fila 12 Calor Nece Calc por Etapa for i in range(13): for j in range (Etapas): Lista_columnas.append(float(i+j)) Lista_Contenido.append(Lista_columnas) Lista_columnas=[] Lista_Contenido[0][0]=float(Diccionario['CSS del jugo pos-evaporación']) #Concentracion_solidos_inicial (CSS02) Lista_Contenido[1][0]=float(Diccionario['CSS panela']) #Concentracion_solidos_final (CSSF1) Lista_Contenido[0][Etapas-1]=float(Diccionario['CSS del jugo de Caña']) #Concentracion_solidos_inicial (CSS01) Lista_Contenido[1][Etapas-1]=float(Diccionario['CSS del jugo clarificado']) #Concentracion_solidos_final (CSSF1) if(Etapas>2): ite=0 for i in range(Etapas-2,0,-1): Lista_Contenido[0][i]=Lista_Contenido[1][i+1] if(ite==0): Lista_Contenido[1][i]=((Lista_Contenido[0][0]-Lista_Contenido[0][i])/Factor_Division)+Lista_Contenido[0][i] ite=ite+1 else: Lista_Contenido[1][i]=((Lista_Contenido[0][0]-Lista_Contenido[0][Etapas-2])/Factor_Division)+Lista_Contenido[0][i] for i in range(Etapas-1,-1,-1): #Concentración promedio=(Concentracion_solidos_inicial+Concentracion_solidos_final)/2 Lista_Contenido[2][i]=(Lista_Contenido[0][i]+Lista_Contenido[1][i])/2 if(i==Etapas-1): #Masa de jugo de entrada Lista_Contenido[3][i]=float(Diccionario['A clarificación']) else: #Masa de jugo de entrada=(Masa de jugo etapa anterior*CCS inicial etapa anterior)/CCS Final etapa anterior Lista_Contenido[3][i]=Lista_Contenido[3][i+1]*Lista_Contenido[0][i+1]/Lista_Contenido[1][i+1] #Calor_Especifico_P_Cte_jugo=4.18*(1-(0.006*Concetracion_promedio)) Lista_Contenido[4][i]=4.18*(1-(0.006*Lista_Contenido[2][i])) #Densidad_del_Jugo=997.39+(4.46*Concetracion_promedio) Lista_Contenido[5][i]=997.39+(4.46*Lista_Contenido[2][i]) #Volumen_jugo=Masa_jugo_de_entrada/Densidad_del_Jugo Lista_Contenido[6][i]=Lista_Contenido[3][i]/Lista_Contenido[5][i] #Volumen_jugo_L=Volumen_jugo*1000 Lista_Contenido[7][i]=Lista_Contenido[6][i]*1000.0 if(i==Etapas-1): #Temperatura_Entrada=Temperatura ambiente Lista_Contenido[8][i]=float(Diccionario['Temperatura del ambiente']) else: #Temperatura_Entrada=Temperatura_ebullición_agua+0.2209*math.exp(0.0557*Concentracion_solidos_inicial) Lista_Contenido[8][i]=Lista_Contenido[9][i+1] #Temperatura_Salida=G37+0.2209*math.exp(0.0557*Concentracion_solidos_final) Lista_Contenido[9][i]=float(Diccionario['Temperatura de ebullición del agua'])+0.2209*math.exp(0.0557*Lista_Contenido[1][i]) #Entalpia_Vaporizacion=(2492.9-(2.0523*Temperatura_Entrada))-(0.0030752*(Temperatura_Entrada**2)) Lista_Contenido[10][i]=(2492.9-(2.0523*Lista_Contenido[8][i]))-(0.0030752*(Lista_Contenido[8][i]**2)) #Masa_Agua_Evaporar=Masa_jugo_de_entrada-(Masa_jugo_de_entrada*Concentracion_solidos_inicial/Concentracion_solidos_final) Lista_Contenido[11][i]=Lista_Contenido[3][i]-(Lista_Contenido[3][i]*Lista_Contenido[0][i]/Lista_Contenido[1][i]) #Calor_por_Etapa=(Masa_jugo_de_entrada*Calor_Especifico_P_Cte_jugo*(Temperatura_Salida-Temperatura_Entrada)+Masa_Agua_Evaporar*Entalpia_Vaporizacion)/3600 Lista_Contenido[12][i]=(Lista_Contenido[3][i]*Lista_Contenido[4][i]*(Lista_Contenido[9][i]-Lista_Contenido[8][i])+Lista_Contenido[11][i]*Lista_Contenido[10][i])/3600.0 #Fijar decimales en 3 for j in range (13): for i in range (Etapas): Lista_Contenido[j][i]=round(Lista_Contenido[j][i],3) #Cambiar la salida o posicion de la paila de punteo a la paila 3 o 4 Lista_contenido_2=[] L_aux=[] for i in Lista_Contenido: inio=3 if (Etapas!=7): L_aux.append(i[2]) L_aux.append(i[1]) L_aux.append(i[0]) inio=3 else: L_aux.append(i[3]) L_aux.append(i[2]) L_aux.append(i[1]) L_aux.append(i[0]) inio=4 for t in range(inio,len(i)): L_aux.append(i[t]) Lista_contenido_2.append(L_aux) L_aux=[] Lista_Contenido=Lista_contenido_2 Etiquetas=[ 'Concentracion de Solidos Inicial [ºBrix]', 'Concentracion de Solidos Final [ºBrix]', 'Concentracion de Solidos Promedio [ºBrix]', 'Masa de Jugo Entrada [Kg]', 'Calor Especifico P Cte jugo [kJ/Kg °C]', 'Densidad del Jugo [kg/m3]', 'Volumen de jugo [m^3/kg]', 'Volumen de jugo [L]', 'Temperatura de Entrada [ºC]', 'Temperatura de Salida [ºC]', 'Entalpia de Vaporización [kJ/kg]', 'Masa de Agua a Evaporar [kg]', 'Calor Nece Calc por Etapa [kW]' ] Dict_aux=dict(zip(Etiquetas,Lista_Contenido)) Dict_aux_2=dict(zip(['Etapas'],[Etapas])) Dict_aux.update(Dict_aux_2) return Dict_aux
c3b531e1b3fbb3491a9d7a5521c216e5ce5c5b38
117
def venus_equ_proportional_minute(x, y, e, R): """ Venus equation proportional minute :param x: true argument (av) in degree :param y: mean center (cm) in degree :param e: eccentricity :param R: radius of the epicycle :return: minutes proportional in degree """ return utils.minuta_proportionalia(x, R, e, y)
5e1dfc8bdd00cbced144d8d4fdf3c62bfe12346b
118
def e2_cond(p, m, sigma, alpha, mu): """ This function is dependent from the gamma function. Conditional mean of the square of the normal distribution. See the article for more informations. Parameters ---------- p : float Proportion of persistent species. m : float Mean of the persistent species. sigma : float Root square mean of the persistent species. alpha : float Parameter of the model - Interaction strength. mu : float Parameter of the model - Interaction drift. Returns ------- float Conditional mean associated to the system. """ # The value delta is similar in the article. delta = alpha/(sigma*np.sqrt(p))*(1+mu*p*m) p_1 = np.exp(-delta**2/2) p_2 = 1-stats.norm.cdf(-delta) return (1/np.sqrt(2*np.pi))*-delta*p_1/p_2+1
a39dfe45e94dbb33c99ffa1c21b7cd0eb2032fd9
119
from collections import defaultdict def concline_generator(matches, idxs, df, metadata, add_meta, category, fname, preserve_case=False): """ Get all conclines :param matches: a list of formatted matches :param idxs: their (sent, word) idx """ conc_res = [] # potential speedup: turn idxs into dict mdict = defaultdict(list) # if remaking idxs here, don't need to do it earlier idxs = list(matches.index) for mid, (s, i) in zip(matches, idxs): #for s, i in matches: mdict[s].append((i, mid)) # shorten df to just relevant sents to save lookup time df = df.loc[list(mdict.keys())] # don't look up the same sentence multiple times for s, tup in sorted(mdict.items()): sent = df.loc[s] if not preserve_case: sent = sent.str.lower() meta = metadata[s] sname = meta.get('speaker', 'none') for i, mid in tup: if not preserve_case: mid = mid.lower() ix = '%d,%d' % (s, i) start = ' '.join(sent.loc[:i-1].values) end = ' '.join(sent.loc[i+1:].values) lin = [ix, category, fname, sname, start, mid, end] if add_meta: for k, v in sorted(meta.items()): if k in ['speaker', 'parse', 'sent_id']: continue if isinstance(add_meta, list): if k in add_meta: lin.append(v) elif add_meta is True: lin.append(v) conc_res.append(lin) return conc_res
b0f9cc9039f78996b38ed87f5faf3b725226a7dd
120
import tqdm def match_detections(predicted_data, gt_data, min_iou): """Carry out matching between detected and ground truth bboxes. :param predicted_data: List of predicted bboxes :param gt_data: List of ground truth bboxes :param min_iou: Min IoU value to match bboxes :return: List of matches """ all_matches = {} total_gt_bbox_num = 0 matched_gt_bbox_num = 0 frame_ids = gt_data.keys() for frame_id in tqdm(frame_ids, desc='Matching detections'): if frame_id not in predicted_data.keys(): all_matches[frame_id] = [] continue gt_bboxes = gt_data[frame_id] predicted_bboxes = predicted_data[frame_id] total_gt_bbox_num += len(gt_bboxes) similarity_matrix = calculate_similarity_matrix(gt_bboxes, predicted_bboxes) matches = [] for _ in xrange(len(gt_bboxes)): best_match_pos = np.unravel_index(similarity_matrix.argmax(), similarity_matrix.shape) best_match_value = similarity_matrix[best_match_pos] if best_match_value <= min_iou: break gt_id = best_match_pos[0] predicted_id = best_match_pos[1] similarity_matrix[gt_id, :] = 0.0 similarity_matrix[:, predicted_id] = 0.0 matches.append((gt_id, predicted_id)) matched_gt_bbox_num += 1 all_matches[frame_id] = matches print('Matched gt bbox: {} / {} ({:.2f}%)' .format(matched_gt_bbox_num, total_gt_bbox_num, 100. * float(matched_gt_bbox_num) / float(max(1, total_gt_bbox_num)))) return all_matches
1030ddd34d174cca343f0c888502f191d6cf9af4
121
def map_string(affix_string: str, punctuation: str, whitespace_only: bool = False) -> str: """Turn affix string into type char representation. Types are 'w' for non-whitespace char, and 's' for whitespace char. :param affix_string: a string :type: str :param punctuation: the set of characters to treat as punctuation :type punctuation: str :param whitespace_only: whether to treat only whitespace as word boundary or also include (some) punctuation :type whitespace_only: bool :return: the type char representation :rtype: str """ if whitespace_only: return "".join(["s" if char == " " else "w" for char in affix_string]) else: return "".join(["s" if char == " " or char in punctuation else "w" for char in affix_string])
6258f9e57a9081a1c791ec7c22f855079a99cdfb
122
import yaml def config_loads(cfg_text, from_cfg=None, whitelist_keys=None): """Same as config_load but load from a string """ try: cfg = AttrDict(yaml.load(cfg_text)) except TypeError: # empty string cfg = AttrDict() if from_cfg: if not whitelist_keys: whitelist_keys = [] _validate_config(cfg, from_cfg, whitelist_keys) return from_cfg + cfg return cfg
02a5aa3713590fb6f1fcd0665e55698b857f6f1c
123
def align_column ( table , index , align = 'left') : """Aling the certain column of the table >>> aligned = align_column ( table , 1 , 'left' ) """ nrows = [ list ( row ) for row in table ] lmax = 0 for row in nrows : if index <= len ( row ) : item = decolorize ( row [ index ] ) lmax = max ( lmax , len ( item ) ) if not lmax : return table aleft = align.lower() in left aright = not aleft and align.lower() in right new_table = [] for row in nrows : if index <= len ( row ) : item = decolorize ( row [ index ] ) nspace = lmax - len ( item ) if aleft : item = row [ index ] + nspace * ' ' elif aright: item = nspace * ' ' + row [ index ] else : sl = nspace / 2 sr = nspace - sl item = sl * ' ' + row [ index ] + sr * ' ' row[ index ] = item new_table.append ( row ) return [ tuple ( row ) for row in new_table ]
3042d885b902ad188a919e1aea3d285a889b7935
124
def make_form(domain, parent, data, existing=None): """simulate a POST payload from the location create/edit page""" location = existing or Location(domain=domain, parent=parent) def make_payload(k, v): if hasattr(k, '__iter__'): prefix, propname = k prefix = 'props_%s' % prefix else: prefix, propname = 'main', k return ('%s-%s' % (prefix, propname), v) payload = dict(make_payload(k, v) for k, v in data.iteritems()) return LocationForm(location, payload)
2db1d21e808e8c94ca7dbfae85fff29974b63c52
125
import torch def abs_densitye_seed(model, inputs, args, tokenizer, **kwargs): """Maximum density sampling by calculating information density for example when passed through [model]""" # print('getting embedding_a') X_a = load_and_embed_examples(args, model, tokenizer, evaluate=True, text = 'text_a') # print('getting embedding_b') X_b = load_and_embed_examples(args, model, tokenizer, evaluate=True, text = 'text_b') X = np.absolute(X_a - X_b) similarity_mtx = 1 / (1 + pairwise_distances(X, X, metric='euclidean')) scores = torch.tensor(similarity_mtx.mean(axis=1)) return scores
098bf5cb6afdc23f6e22a40c90d2a93165be4c8a
127
def gains2utvec(g): """Converts a vector into an outer product matrix and vectorizes its upper triangle to obtain a vector in same format as the CHIME visibility matrix. Parameters ---------- g : 1d array gain vector Returns ------- 1d array with vectorized form of upper triangle for the outer product of g """ n = len(g) G = np.dot(g.reshape(n, 1), g.conj().reshape(1, n)) return mat2utvec(G)
3782bd1c4215b97e6a398700ef8e7f7bf65b0416
129
from typing import List from typing import Dict def get_user_surveys(user: User) -> List[Dict]: """ Returns a list of all surveys created by specific user with survey secret. """ return list(map(Survey.get_api_brief_result_with_secrets, db.get_all_surveys(user)))
76bc202bfc770814467f8ff7dc35a829c8bde9f0
130
def combine_mpgs(objs, cls=None): """ Combine multiple multipart geometries into a single multipart geometry of geometry collection. """ # Generate new list of individual geometries new = [] for obj in objs: if isinstance(obj, shapely.geometry.base.BaseMultipartGeometry): new.extend(list(obj)) elif isinstance(obj, shapely.geometry.base.BaseGeometry): new.extend([obj]) else: raise TypeError("Invalid geometry type") # Convert list to geometry collection or provided class if cls is None: new = shapely.geometry.collection.GeometryCollection(new) else: new = cls(new) return new
de3050005152c9da3072e76e24c0087931473d61
131
def get_polygon_point_dist(poly, pt): """Returns the distance between a polygon and point. Parameters ---------- poly : libpysal.cg.Polygon A polygon to compute distance from. pt : libpysal.cg.Point a point to compute distance from Returns ------- dist : float The distance between ``poly`` and ``point``. Examples -------- >>> poly = Polygon([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))]) >>> pt = Point((2, 0.5)) >>> get_polygon_point_dist(poly, pt) 1.0 >>> pt2 = Point((0.5, 0.5)) >>> get_polygon_point_dist(poly, pt2) 0.0 """ if get_polygon_point_intersect(poly, pt) is not None: dist = 0.0 else: part_prox = [] for vertices in poly._vertices: vx_range = range(-1, len(vertices) - 1) seg = lambda i: LineSegment(vertices[i], vertices[i + 1]) _min_dist = min([get_segment_point_dist(seg(i), pt)[0] for i in vx_range]) part_prox.append(_min_dist) dist = min(part_prox) return dist
a3a6feff77440bd9d35029f8976564774e4f4cc1
132
def score_bearing( wanted: LocationReferencePoint, actual: PointOnLine, is_last_lrp: bool, bear_dist: float ) -> float: """Scores the difference between expected and actual bearing angle. A difference of 0° will result in a 1.0 score, while 180° will cause a score of 0.0.""" bear = compute_bearing(wanted, actual, is_last_lrp, bear_dist) return score_angle_sector_differences(wanted.bear, bear)
3027edd5fd2055ade160e20b1d2b01c25aa32a30
133
import json def load_graph (graph_path): """ load a graph from JSON """ with open(graph_path) as f: data = json.load(f) graph = json_graph.node_link_graph(data, directed=True) return graph
7f012360861410803edbd628d8ba685e1a9ee936
134
def assign_bond_states_to_dataframe(df: pd.DataFrame) -> pd.DataFrame: """ Takes a ``PandasPDB`` atom dataframe and assigns bond states to each atom based on: Atomic Structures of all the Twenty Essential Amino Acids and a Tripeptide, with Bond Lengths as Sums of Atomic Covalent Radii Heyrovska, 2008 First, maps atoms to their standard bond states (:const:`~graphein.protein.resi_atoms.DEFAULT_BOND_STATE`). Second, maps non-standard bonds states (:const:`~graphein.protein.resi_atoms.RESIDUE_ATOM_BOND_STATE`). Fills NaNs with standard bond states. :param df: Pandas PDB dataframe :type df: pd.DataFrame :return: Dataframe with added ``atom_bond_state`` column :rtype: pd.DataFrame """ # Map atoms to their standard bond states naive_bond_states = pd.Series(df["atom_name"].map(DEFAULT_BOND_STATE)) # Create series of bond states for the non-standard states ss = ( pd.DataFrame(RESIDUE_ATOM_BOND_STATE) .unstack() .rename_axis(("residue_name", "atom_name")) .rename("atom_bond_state") ) # Map non-standard states to the dataframe based on the residue and atom name df = df.join(ss, on=["residue_name", "atom_name"]) # Fill the NaNs with the standard states df = df.fillna(value={"atom_bond_state": naive_bond_states}) return df
6c8b204a6d4ca30b1fac46dc08b74ba47d7089be
135
def lastero(f, B=None): """ Last erosion. y = lastero(f, B=None) `lastero` creates the image y by computing the last erosion by the structuring element B of the image f . The objects found in y are the objects of the erosion by nB that can not be reconstructed from the erosion by (n+1)B , where n is a generic non negative integer. The image y is a proper subset of the morphological skeleton by B of f . Parameters ---------- f : Binary image. B : Structuring Element (default: 3x3 elementary cross). Returns ------- y : Binary image. """ assert isbinary(f),'pymorph.lastero: can only process binary images' if B is None: B = secross() dt = dist(f,B) return regmax(dt,B)
094cd5f93959d82487fb9b4518d751763fa79901
136
def Sparse2Raster(arr, x0, y0, epsg, px, py, filename="", save_nodata_as=-9999): """ Sparse2Rastersave_nodata_as """ BS = 256 geotransform = (x0, px, 0.0, y0, 0.0, -(abs(py))) srs = osr.SpatialReference() srs.ImportFromEPSG(int("%s" % (epsg))) projection = srs.ExportToWkt() if issparse(arr): m, n = arr.shape if m > 0 and n > 0: dtype = str(arr.dtype) if dtype in ["uint8"]: fmt = gdal.GDT_Byte elif dtype in ["uint16"]: fmt = gdal.GDT_UInt16 elif dtype in ["uint32"]: fmt = gdal.GDT_UInt32 elif dtype in ["float32"]: fmt = gdal.GDT_Float32 elif dtype in ["float64"]: fmt = gdal.GDT_Float64 else: fmt = gdal.GDT_Float64 CO = ["BIGTIFF=YES", "TILED=YES", "BLOCKXSIZE=256", "BLOCKYSIZE=256", 'COMPRESS=LZW'] driver = gdal.GetDriverByName("GTiff") dataset = driver.Create(filename, n, m, 1, fmt, CO) if (geotransform != None): dataset.SetGeoTransform(geotransform) if (projection != None): dataset.SetProjection(projection) band = dataset.GetRasterBand(1) band.SetNoDataValue(save_nodata_as) for i in range(0, m, BS): for j in range(0, n, BS): BY = min(m - i, BS) BX = min(n - j, BS) a = arr[i:i + BY, j:j + BX].todense() if save_nodata_as==0 and (np.isnan(a)).all(): #do nothing pass else: band.WriteArray(a, j, i) dataset = None return filename return None
23e23c0c1ea59f37fbf52a97b8d8e70933c1cd55
137
def modularity(modules, G, L): """ calculate modularity modularity = [list of nx.Graph objects] G = graph L = num of links """ N_m = len(modules) M = 0.0 for s in range(N_m): l_s = 0.0 d_s = 0 for i in modules[s]: l_s += float(modules[s].degree(i)) d_s += float(G.degree(i)) M += (l_s / L) - (d_s / (2.0 * L))**2 return M
fc818a1f8cda14c04f90c94b699853465da11797
138
def nonan_compstat_tstat_scan(dist, aInd, bInd, returnMaxInds = False): """ For local sieve analysis, compare A and B group for each site using a max t-statistic over a parameter space filteredDist: [ptid x sites x params] ndarray Returns tstat array [sites] aInd, bInd: Boolean row index for the two groups """ a = dist[aInd] b = dist[bInd] aN = aInd.sum() bN = bInd.sum() tstat = tstatistic(a, b, axis = 0, equal_var = False) """se = np.sqrt((aN-1)*np.var(a,axis=0)/((aN+bN) - 2) + (bN-1)*np.var(b,axis=0)/((aN+bN) - 2)) tstat = (np.mean(a,axis=0) - np.mean(b,axis=0)) / se""" """Even in the nonan cases, the tstat can be nan if there is no variation in either group (divide by zero)""" sitesNani = np.all(np.isnan(tstat), axis=1) """For sites with all nans across params, set all to 0. this makes maxi = 0""" tstat[sitesNani,:] = 0 """Zeros are better than returning nan because if this perm produces a nan result then it is not as extreme as observed (which is probably also nan)""" maxi = np.nanargmax(np.abs(tstat), axis=1) inds = np.ravel_multi_index((np.arange(maxi.shape[0]), maxi), tstat.shape) if not returnMaxInds: return tstat.flat[inds] else: return tstat.flat[inds], maxi
eae9c1c045e4ebda7372d8c23fbb447cd2c7a4cf
139
import re def extract_digits_from_end_of_string(input_string): """ Gets digits at the end of a string :param input_string: str :return: int """ result = re.search(r'(\d+)$', input_string) if result is not None: return int(result.group(0))
aae771a051a228c53c36062437de65ae4aa15d44
141
import torch def move_bdim_to_front(x, result_ndim=None): """ Returns a tensor with a batch dimension at the front. If a batch dimension already exists, move it. Otherwise, create a new batch dimension at the front. If `result_ndim` is not None, ensure that the resulting tensor has rank equal to `result_ndim`. """ x_dim = len(x.shape) x_bdim = x.bdim if x_bdim is None: x = torch.unsqueeze(x, 0) else: x = torch.movedim(x, x_bdim, 0) if result_ndim is None: return x diff = result_ndim - x_dim - (x_bdim is None) for _ in range(diff): x = torch.unsqueeze(x, 1) return x
313a1837b6c3b451cebacaa7815f2631dfa387e5
142
def paginate(**options): """ Automatically force request pagination for endpoints that shouldn't return all items in the database directly. If this decorator is used, ``limit`` and ``offset`` request arguments are automatically included in the request. The burden is then on developers to do something with those ``limit`` and ``offset`` arguments. An example request header set by this decorator is as follows: .. code-block:: text Link: <https://localhost/items?limit=50&offset=50>; rel="next", <https://localhost/items?limit=50&offset=500>; rel="last" Args: limit (int): Number of entries to limit a query by. total (int, callable): Number or callable for determining the total number of records that can be returned for the request. This is used in determining the pagination header. """ if 'total' not in options: raise AssertionError( '`@paginate` decorator requires `total=` parameter ' 'for determining total number of records to paginate. ' 'See the documentation for more details.') def decorator(func): @wraps(func) def inner(*args, **kwargs): # only paginate on get requests if request.method != 'GET': return func(*args, **kwargs) # format parameters limit = request.args.get('limit', options.get('limit')) offset = int(request.args.get('offset', options.get('offset', 0))) total = options['total']() if callable(options['total']) else options['total'] url = options.get('url', request.base_url) # config request parameters request.args = request.args.copy() request.args.setdefault('limit', limit) request.args.setdefault('offset', offset) # if no need to paginate, return without setting headers if limit is None: return func(*args, **kwargs) limit = int(limit) # add next page link headers = {} next_page = '<{}?limit={}&offset={}>'.format(url, limit, offset + limit) headers['Link'] = '{}; rel="next"'.format(next_page) # add last page link and header if options['total'] is not None: total = options['total']() if callable(options['total']) else options['total'] last_page = '<{}?limit={}&offset={}>'.format(url, limit, offset + limit) headers['Link'] += ', {}; rel="last"'.format(last_page) headers['X-Total-Count'] = str(total) # call the function and create response response = func(*args, **kwargs) # if a specific response has already been crafted, use it if isinstance(response, Response): return response # normalize response data if not isinstance(response, tuple): response = [response] response = list(response) if hasattr(response[0], 'json'): content_length = len(response[0].json) else: content_length = len(response[0]) if len(response) == 1: response.append(200) if len(response) == 2: response.append({}) # if the response data is equal to the pagination, it's # truncated and needs updated headers/status if content_length == limit: response[1] = 206 response[2].update(headers) return tuple(response) return inner return decorator
f2d7f38007c235507dd5f2eaed737292679440b9
143
import datasets def fetch_basc_vascular_atlas(n_scales='scale007', target_affine=np.diag((5, 5, 5))): """ Fetch the BASC brain atlas given its resolution. Parameters ---------- hrf_atlas: str, BASC dataset name possible values are: 'scale007', 'scale012', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325', 'scale444' target_affine : np.array, (default=np.diag((5, 5, 5))), affine matrix for the produced Nifti images Return ------ mask_full_brain : Nifti Image, full mask brain atlas_rois : Nifti Image, ROIs atlas """ if n_scales not in valid_scales: raise ValueError(f"n_scales should be in {valid_scales}, " f"got '{n_scales}'") basc_dataset = datasets.fetch_atlas_basc_multiscale_2015(version='sym') atlas_rois_fname = basc_dataset[n_scales] atlas_to_return = image.load_img(atlas_rois_fname) atlas_to_return = image.resample_img(atlas_to_return, target_affine, interpolation='nearest') brain_mask = image_nilearn.binarize_img(atlas_to_return, threshold=0) return brain_mask, atlas_to_return
86e6ded04118f5a1a3dc76f5f5a29d241e5071dc
144
def _get_time_slices( window_start, window, projection, # Defer calling until called by test code resampling_scale, lag = 1, ): """Extracts the time slice features. Args: window_start: Start of the time window over which to extract data. window: Length of the window (in days). projection: projection to reproject all data into. resampling_scale: length scale to resample data to. lag: Number of days before the fire to extract the features. Returns: A list of the extracted EE images. """ image_collections, time_sampling = _get_all_image_collections() window_end = window_start.advance(window, 'day') drought = image_collections['drought'].filterDate( window_start.advance(-lag - time_sampling['drought'], 'day'), window_start.advance( -lag, 'day')).median().reproject(projection).resample('bicubic') vegetation = image_collections['vegetation'].filterDate( window_start.advance(-lag - time_sampling['vegetation'], 'day'), window_start.advance( -lag, 'day')).median().reproject(projection).resample('bicubic') weather = image_collections['weather'].filterDate( window_start.advance(-lag - time_sampling['weather'], 'day'), window_start.advance(-lag, 'day')).median().reproject( projection.atScale(resampling_scale)).resample('bicubic') fire = image_collections['fire'].filterDate(window_start, window_end).map( ee_utils.remove_mask).max() detection = fire.clamp(6, 7).subtract(6).rename('detection') return [drought, vegetation, weather, fire, detection]
8350cc7fcd61b5aa53863ad00463d0eb3cc9d89e
145
def hash_parameters(keys, minimize=True, to_int=None): """ Calculates the parameters for a perfect hash. The result is returned as a HashInfo tuple which has the following fields: t The "table parameter". This is the minimum side length of the table used to create the hash. In practice, t**2 is the maximum size of the output hash. slots The original inputs mapped to a vector. This is the hash function. r The displacement vector. This is the displacement of the given row in the result vector. To find a given value, use ``x + r[y]``. offset The amount by which to offset all values (once converted to ints) to_int A function that converts the input to an int (if given). Keyword parameters: ``minimize`` Whether or not offset all integer keys internally by the minimum value. This typically results in smaller output. ``to_int`` A callable that converts the input keys to ints. If not specified, all keys should be given as ints. >>> hash_parameters([1, 5, 7], minimize=False) HashInfo(t=3, slots=(1, 5, 7), r=(-1, -1, 1), offset=0, to_int=None) >>> hash_parameters([1, 5, 7]) HashInfo(t=3, slots=(1, 5, 7), r=(0, 0, 2), offset=-1, to_int=None) >>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34) >>> phash = hash_parameters(l) >>> phash.slots (18, 19, 0, 21, 22, 3, 4, 24, 7, 26, 30, 10, 29, 13, 34, 15) For some values, the displacement vector will be rather empty: >>> hash_parameters('Andrea', to_int=ord).r (1, None, None, None, 0, -3, 4, None) """ # If to_int is not assigned, simply use the identity function. if to_int is None: to_int = __identity key_to_original = {to_int(original): original for original in keys} # Create a set of all items to be hashed. items = list(key_to_original.keys()) if minimize: offset = 0 - min(items) items = frozenset(x + offset for x in items) else: offset = 0 # 1. Start with a square array (not stored) that is t units on each side. # Choose a t such that t * t >= max(S) t = choose_best_t(items) assert t * t > max(items) and t * t >= len(items) # 2. Place each key K in the square at location (x,y), where # x = K mod t, y = K / t. row_queue = place_items_in_square(items, t) # 3. Arrange rows so that they'll fit into one row and generate a # displacement vector. final_row, displacement_vector = arrange_rows(row_queue, t) # Translate the internal keys to their original items. slots = tuple(key_to_original[item - offset] if item is not None else None for item in final_row) # Return the parameters return HashInfo( t=t, slots=slots, r=displacement_vector, offset=offset, to_int=to_int if to_int is not __identity else None )
899657596669de4852936737efbfecd9f7b4734a
146
import base64 import binascii def Base64EncodeHash(digest_value): """Returns the base64-encoded version of the input hex digest value.""" return base64.encodestring(binascii.unhexlify(digest_value)).rstrip('\n')
d1fa662c6bacbde84413edb8272b445bed26de90
147
def init_node(node_name, publish_topic): """ Init the node. Parameters ---------- node_name Name assigned to the node publish_topic Name of the publisher topic """ rospy.init_node(node_name, anonymous=True) publisher = rospy.Publisher(publish_topic, Int16MultiArray, queue_size=10) return publisher
47371f1617937842991db80dea3f2bc15fee4b43
148
import astropy.io.fits as pf def fits_checkkeyword(fitsfile, keyword, ext=0, silent=False): """ Check the keyword value of a FITS extension. Parameters ---------- fitsfile : str Path to the FITS file. keyword : str The keyword to check. ext : int or str Extension index (int) or key (str). Returns ------- Header key value If both the specified extension and keyword exist. ``None`` If a ``KeyError`` exception would have been raised and ``silent=True`` is set. Raises ------ KeyError If either the specified extension or the keyword cannot be found, and ``silent=False``, a KeyError exception will be raised. OSError If the specified file cannot be found, astropy.io.fits will raise OSError. """ fh = pf.open(fitsfile) try: return fh[ext].header[keyword] except KeyError as e: if silent: return None else: print('The specified extension or keyword is not found.') raise e
7c60d410bcfed6c6fdfece6f7bfec173b6cbbd9a
149
def arr_ds(time=True, var='tmp'): """ Read in a saved dataset containing lat, lon, and values :param time: (boolean) - whether to return dataset with time :param var: (str) - variable type (only tmp/rh currently) :return ds: (xr.dataset) - dataset """ if time: if var is 'tmp': path = pre.join_cwd('data/air.sig995.1948.nc') if var is 'rh': path = pre.join_cwd('data/rhum.sig995.1948.nc') else: path = pre.join_cwd('data/slp.nc') return xr.open_dataset(path)
0f8afcf09eae925247a0b174ac2247713ef63377
150
from django_toolkit.datetime_util import quarter as datetime_quarter from datetime import datetime def quarter(d): """ Return start/stop datetime for the quarter as defined by dt. """ first_date, last_date = datetime_quarter(datetime(d.year, d.month, d.day)) return first_date.date(), last_date.date()
9c5510e4b2c131715a1bde8233fd50b7241b1d39
151
def _fp(yhat, ytrue): """ Class wise false positive count. :param yhat: :param ytrue: :return: """ yhat_true = np.asarray(yhat == np.max(yhat, axis=1, keepdims=True), dtype="float32") return np.sum(yhat_true * (1. - ytrue), axis=0)
3a9ac128ac3a845183d219e029f93452bb94c3b7
152
import random def out_flag(): """Either -o or --outfile""" return '-o' if random.randint(0, 1) else '--outfile'
129e7a493618ca7457fab271a396023807fd2f38
154
def guess_from_peak(y, x, negative=False): """Estimate starting values from 1D peak data and return (height,center,sigma). Parameters ---------- y : array-like y data x : array-like x data negative : bool, optional determines if peak height is positive or negative, by default False Returns ------- (height, center, sigma) : (float, float, float) Estimates of 1 gaussian line parameters. """ sort_increasing = np.argsort(x) x = x[sort_increasing] y = y[sort_increasing] # find the max/min values of x and y, and the x value at max(y) maxy, miny = max(y), min(y) maxx, minx = max(x), min(x) height = maxy - miny # set a backup sigma, and center in case using the halfmax calculation doesn't work. # The backup sigma = 1/6 the full x range and the backup center is the # location of the maximum sig = (maxx - minx) / 6.0 cen = x[np.argmax(y)] # the explicit conversion to a NumPy array is to make sure that the # indexing on line 65 also works if the data is supplied as pandas.Series # find the x positions where y is above (ymax+ymin)/2 x_halfmax = np.array(x[y > (maxy + miny) / 2.0]) if negative: height = -(maxy - miny) # backup center for if negative. cen = x[np.argmin(y)] x_halfmax = x[y < (maxy + miny) / 2.0] # calculate sigma and center based on where y is above half-max: if len(x_halfmax) > 2: sig = (x_halfmax[-1] - x_halfmax[0]) / 2.0 cen = x_halfmax.mean() return height, cen, sig
b78f42ba0fed1a1a696000d223c42bd9972409f4
155
def get_default_extension(): """ return the default view extension """ return rawData.Visualization
0539866dee782b7cb605c2c54e1896375b31cd95
156
def getNewPluginManager() -> pluginManager.ArmiPluginManager: """ Return a new plugin manager with all of the hookspecs pre-registered. """ pm = pluginManager.ArmiPluginManager("armi") pm.add_hookspecs(ArmiPlugin) return pm
dac7694587528d3d7213294eb7f9fccbc1dca7b2
157
def get_utterances_from_stm(stm_file): """ Return list of entries containing phrase and its start/end timings :param stm_file: :return: """ res = [] with io.open(stm_file, "r", encoding='utf-8') as f: for stm_line in f: if re.match ("^;;",stm_line) is None : tokens = stm_line.split() start_time = float(tokens[3]) end_time = float(tokens[4]) filename = tokens[0] if tokens[2] != "inter_segment_gap": transcript = " ".join(t for t in tokens[6:]).strip().encode("utf-8", "ignore").decode("utf-8", "ignore") if transcript != "ignore_time_segment_in_scoring" and transcript.strip() !="": # if the transcription not empty and not equal to ignore_time_segment_in_scoring res.append({"start_time": start_time, "end_time": end_time, "filename": filename, "transcript": transcript }) return res
e8c7329ec04824570071994b6d6b05609f68b7a4
158
def lookup_quo_marks(lang='en-US', map_files=MAP_FILES, encoding='utf-8'): """Looks up quotation marks for a language. Arguments: ``lang`` (``str``): An RFC 5646-ish language code (e.g., "en-US", "pt-BR", "de", "es"). Defines the language the quotation marks of which to look up. Default: 'en-US'. ``maps`` (sequence of ``str`` instances): A List of possible locations of mappsings of RFC 5646-like language codes to lists of quotation marks. Default: ``MAP_FILES`` (module constant). ``encoding`` (``str``): The encoding of those files. Defaults to 'utf-8'. If ``lang`` contains a country code, but no quotation marks have been defined for that country, the country code is discarded and the quotation marks for the language simpliciter are looked up. For example, 'de-DE' will find 'de'. If ``lang`` does not contain a country code or if that code has been discarded and no quotation marks have been defined for that language simpliciter, but quotation marks have been defined for variants of that language as they are spoken in a particular country, the quotation marks of the variant that has been defined first are used. For example, 'en' will find 'en-US'. Returns (``QuoMarks``): The quotation marks of that language. Raises: ``QuoMarkUnknownLanguageError``: If no quotation marks have been defined for ``lang``. All exceptions ``load_quotation_maps`` and ``QuoMarks.__init__`` raise. """ map_ = load_maps(map_files, encoding=encoding) for i in range(3): try: return QuoMarks(*map_[lang]) except KeyError: if i == 0: lang = lang.split('-')[0] elif i == 1: for j in map_: if not isinstance(j, basestring): # pylint: disable=E0602 continue if j.startswith(lang): lang = j break else: break raise QuoMarkUnknownLangError(lang=lang)
e97d245faae256184809b61da1646c607042db3c
159
def replace_unwanted_xml_attrs(body): """ Method to return transformed string after removing all the unwanted characters from given xml body :param body: :return: """ return body.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
6f7dde06590bc8b8ad8477e7cee284ae38568b42
161
def protocol_version_to_kmip_version(value): """ Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent. Args: value (ProtocolVersion): A ProtocolVersion struct to be converted into a KMIPVersion enumeration. Returns: KMIPVersion: The enumeration equivalent of the struct. If the struct cannot be converted to a valid enumeration, None is returned. """ if not isinstance(value, ProtocolVersion): return None if value.major == 1: if value.minor == 0: return enums.KMIPVersion.KMIP_1_0 elif value.minor == 1: return enums.KMIPVersion.KMIP_1_1 elif value.minor == 2: return enums.KMIPVersion.KMIP_1_2 elif value.minor == 3: return enums.KMIPVersion.KMIP_1_3 elif value.minor == 4: return enums.KMIPVersion.KMIP_1_4 else: return None elif value.major == 2: if value.minor == 0: return enums.KMIPVersion.KMIP_2_0 else: return None else: return None
6180f1eed3411e5257a989fdbb2fda48d4c59277
162
def valid_template(template): """Is this a template that returns a valid URL?""" if template.name.lower() == "google books" and ( template.has("plainurl") or template.has("plain-url") ): return True if template.name.lower() == "billboardurlbyname": return True return False
51191d6b60af23265dc6cb4ff87c520e80bac59f
164
def build_get_complex_item_null_request( **kwargs # type: Any ): # type: (...) -> HttpRequest """Get array of complex type with null item [{'integer': 1 'string': '2'}, null, {'integer': 5, 'string': '6'}]. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest Example: .. code-block:: python # response body for status code(s): 200 response.json() == [ { "integer": 0, # Optional. "string": "str" # Optional. } ] """ accept = "application/json" # Construct URL url = '/array/complex/itemnull' # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, headers=header_parameters, **kwargs )
96866e7b35c925459d594698759d67979e6d0d6f
165
def get_choice(): """ Gets and returns choice for mode to use when running minimax """ choice = input( "Please enter a number (1 - 4)\n 1. Both players use minimax correctly at every turn\n 2. The starting player (X) is an expert and the opponent (0) only has a 50% chance to use minimax\n\t at each turn\n 3. The starting player (X) only has a 50% chance to use minimax at each turn and the opponent (0)\n\t is an expert.\n 4. Both players only have a 50% chance to use minimax at each turn.\n" ) while (choice != '1' and choice != '2' and choice != '3' and choice != '4'): choice = input("Not a choice. Go agane: (1 - 4)\n") return choice
d79278acc9bc0a36480c1067b81e64c5512dd586
166
def _arange_ndarray(arr, shape, axis, reverse=False): """ Create an ndarray of `shape` with increments along specified `axis` Parameters ---------- arr : ndarray Input array of arbitrary shape. shape : tuple of ints Shape of desired array. Should be equivalent to `arr.shape` except `shape[axis]` which may have any positive value. axis : int Axis to increment along. reverse : bool If False, increment in a positive fashion from 1 to `shape[axis]`, inclusive. If True, the bounds are the same but the order reversed. Returns ------- padarr : ndarray Output array sized to pad `arr` along `axis`, with linear range from 1 to `shape[axis]` along specified `axis`. Notes ----- The range is deliberately 1-indexed for this specific use case. Think of this algorithm as broadcasting `np.arange` to a single `axis` of an arbitrarily shaped ndarray. """ initshape = tuple(1 if i != axis else shape[axis] for (i, x) in enumerate(arr.shape)) if not reverse: padarr = np.arange(1, shape[axis] + 1) else: padarr = np.arange(shape[axis], 0, -1) padarr = padarr.reshape(initshape) for i, dim in enumerate(shape): if padarr.shape[i] != dim: padarr = padarr.repeat(dim, axis=i) return padarr
f869bcd0d51ec7dc7858570337019e00948eaee9
167
from typing import Any from typing import Type def _map_nonlinearities( element: Any, nonlinearity_mapping: Type[NonlinearityMapping] = NonlinearityMapping ) -> Any: """Checks whether a string input specifies a PyTorch layer. The method checks if the input is a string. If the input is a string, it is preprocessed and then mapped to a corresponding PyTorch activation layer. If the input is not a string it is returned unchanged. Parameters ---------- element : Any Arbitrary input to this function. Returns ------- Any Returns either a callable activation or normalization layer or the input element. """ nonlinearities = nonlinearity_mapping() return _map_call_dict(nonlinearities, element)
0ea5972471f1c766bb5303a60eee4e50df14c9d5
170
def _get_streamflow(product, feature_id, s_date, s_time, e_date, lag): """Downloads streamflow time series for a given river. Downloads streamflow time series for a given river feature using the HydroShare archive and Web service. Units are in cubic feet per second as returned by HydroShare. For the API description, see https://apps.hydroshare.org/apps/nwm-data-explorer/api/ Args: product: String indicating model product. Valid values are: analysis_assim, short_range, medium_range, long_range feature_id: String identifier of the river feature. s_date: (String or Date) Valid date for the model simulation. s_time: (String) Two digit simulation hour, e.g., '06'. e_date: (String or Date) End date of data to retrieve. Valid for analysis_assim only. lag: (String) Lag argument for URI. This is an escaped comma delimited list of long_range forecast simulation hours, e.g., 00z%2C06z%2C12z%2C18z. Returns: A list of dicts representing time series. Each series includes name, datetimes, and values. For example: {'name': 'Member 1 t00z', 'dates': ['2016-06-02 01:00:00+00:00', '2016-06-02 02:...'] 'values': [257.2516, 1295.7293]} Raises: HTTPError: An error occurred accessing data from HydroShare. ValueError: Service request returned no data, likely due to invalid input arguments. """ if 'long_range' in product: product = 'long_range' s_date = date_parser.parse(str(s_date)).strftime('%Y-%m-%d') if e_date: e_date = date_parser.parse(str(e_date)).strftime('%Y-%m-%d') uri_template = ( HS_API_URI + 'get-netcdf-data?config={0}&geom=channel_rt&' 'variable=streamflow&COMID={1}&' 'startDate={2}&time={3}&endDate={4}&lag={5}') uri = uri_template.format(product, feature_id, s_date, s_time, e_date, lag) response = urlopen(uri) json_data = _get_netcdf_data_response_to_json(uri, response) series_list = _unpack_series(json_data, product) return series_list
4485b29f3a34862cd674243314296010e10d0847
172
def get_profanity(text: str, duplicates=False) -> list: """Gets all profane words and returns them in a list""" text: str = text.lower() additional: list = [] profane: list = [word for word in PROFANE_WORD_LIST if word in text] if duplicates: for word in profane: c: int = text.count(word) if c > 1: x: list = [word for _ in range(c - 1)] additional.extend(list(x)) profane.extend(additional) return profane
332b8ac355e974d0f750ad003a907e22b4f1b552
174
def build_stats(train_result, eval_result, time_callback): """Normalizes and returns dictionary of stats. Args: train_result: The final loss at training time. eval_result: Output of the eval step. Assumes first value is eval_loss and second value is accuracy_top_1. time_callback: Time tracking callback instance. Returns: Dictionary of normalized results. """ stats = {} if eval_result: stats['eval_loss'] = eval_result[0] stats['eval_acc'] = eval_result[1] stats['train_loss'] = train_result[0] stats['train_acc'] = train_result[1] if time_callback: timestamp_log = time_callback.timestamp_log stats['step_timestamp_log'] = timestamp_log stats['train_finish_time'] = time_callback.train_finish_time if len(timestamp_log) > 1: stats['avg_exp_per_second'] = ( time_callback.batch_size * time_callback.log_steps * (len(time_callback.timestamp_log) - 1) / (timestamp_log[-1].timestamp - timestamp_log[0].timestamp)) return stats
959c4ac9b1ed9aabb41a329dba6e06384d4492a7
175
def multiplicative(v1, v2, alpha=1, beta=1): """ Weighted elementwise multiplication. """ compword = str(v1.row2word[0]) + " " + str(v2.row2word[0]) comp = (alpha * v1) * (beta * v2) comp.row2word = [compword] return comp
9305291f4e0a43a47d578962f205797ead5fcf04
176
def is_information(status_code, **options): """ gets a value indicating that given status code is a information code. if returns True if the provided status code is from `InformationResponseCodeEnum` values. :param int status_code: status code to be checked. :keyword bool strict_status: specifies that it should only consider the status code as information if it is from `InformationResponseCodeEnum` values. otherwise all codes from `INFORMATION_CODE_MIN` to `INFORMATION_CODE_MAX` will be considered as information. defaults to True if not provided. :rtype: bool """ return get_component(ResponseStatusPackage.COMPONENT_NAME).is_information(status_code, **options)
45bce315e582a93a76f34d3a027a4dd041655aba
177
from typing import Any from typing import Optional import time def create_application_registration( onefuzz_instance_name: str, name: str, approle: OnefuzzAppRole, subscription_id: str ) -> Any: """Create an application registration""" app = get_application( display_name=onefuzz_instance_name, subscription_id=subscription_id ) if not app: raise Exception("onefuzz app registration not found") resource_access = [ {"id": role["id"], "type": "Scope"} for role in app["appRoles"] if role["value"] == approle.value ] params = { "isDeviceOnlyAuthSupported": True, "displayName": name, "publicClient": { "redirectUris": ["https://%s.azurewebsites.net" % onefuzz_instance_name] }, "isFallbackPublicClient": True, "requiredResourceAccess": ( [ { "resourceAccess": resource_access, "resourceAppId": app["appId"], } ] if len(resource_access) > 0 else [] ), } registered_app = query_microsoft_graph( method="POST", resource="applications", body=params, subscription=subscription_id, ) logger.info("creating service principal") service_principal_params = { "accountEnabled": True, "appRoleAssignmentRequired": False, "servicePrincipalType": "Application", "appId": registered_app["appId"], } def try_sp_create() -> None: error: Optional[Exception] = None for _ in range(10): try: query_microsoft_graph( method="POST", resource="servicePrincipals", body=service_principal_params, subscription=subscription_id, ) return except GraphQueryError as err: # work around timing issue when creating service principal # https://github.com/Azure/azure-cli/issues/14767 if ( "service principal being created must in the local tenant" not in str(err) ): raise err logger.warning( "creating service principal failed with an error that occurs " "due to AAD race conditions" ) time.sleep(60) if error is None: raise Exception("service principal creation failed") else: raise error try_sp_create() registered_app_id = registered_app["appId"] app_id = app["appId"] def try_authorize_application(data: Any) -> None: authorize_application( UUID(registered_app_id), UUID(app_id), subscription_id=subscription_id, ) retry(try_authorize_application, "authorize application") def try_assign_instance_role(data: Any) -> None: assign_instance_app_role(onefuzz_instance_name, name, subscription_id, approle) retry(try_assign_instance_role, "assingn role") return registered_app
9f7a6f52e4e07437eec655c2d51bb5cdc9659a21
178
def from_mel( mel_, sr=16000, n_fft=2048, n_iter=32, win_length=1000, hop_length=100, ): """ Change melspectrogram into waveform using Librosa. Parameters ---------- spectrogram: np.array Returns -------- result: np.array """ return librosa.feature.inverse.mel_to_audio( mel_, sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window='hann', center=True, pad_mode='reflect', power=1.0, n_iter=n_iter, )
939791db3df6a5099b548abc14033a8463c39ed6
180
def expand(arg): """ sp.expand currently has no matrix support """ if isinstance(arg, sp.Matrix): return arg.applyfunc(sp.expand) else: return sp.expand(arg)
d71fa3f2747cb3cdaa4c5b844037f1a1c4fa7480
181
def ElementTreeToDataset(element_tree, namespaces, csv_path, load_all_data): """Convert an ElementTree tree model into a DataSet object. Args: element_tree: ElementTree.ElementTree object containing complete data from DSPL XML file namespaces: A list of (namespace_id, namespace_url) tuples csv_path: Directory where CSV files associated with dataset can be found load_all_data: Boolean indicating whether all CSV data should be loaded Returns: dspl_model.DataSet object """ dspl_dataset = dspl_model.DataSet() # Fill in basic info dspl_dataset.namespace = element_tree.getroot().get( _DSPL_SCHEMA_PREFIX + 'targetNamespace', default='') for namespace_id, namespace_url in namespaces: if namespace_id: dspl_dataset.AddImport( dspl_model.Import(namespace_id=namespace_id, namespace_url=namespace_url)) info_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'info') if info_element is not None: dspl_dataset.name = _GetValue( info_element.find(_DSPL_SCHEMA_PREFIX + 'name')) dspl_dataset.description = ( _GetValue(info_element.find(_DSPL_SCHEMA_PREFIX + 'description'))) dspl_dataset.url = ( _GetValue(info_element.find(_DSPL_SCHEMA_PREFIX + 'url'))) provider_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'provider') if provider_element is not None: dspl_dataset.provider_name = _GetValue( provider_element.find(_DSPL_SCHEMA_PREFIX + 'name')) dspl_dataset.provider_url = ( _GetValue(provider_element.find(_DSPL_SCHEMA_PREFIX + 'url'))) # Get topics topics_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'topics') if topics_element is not None: topic_elements = topics_element.findall(_DSPL_SCHEMA_PREFIX + 'topic') for topic_element in topic_elements: dspl_dataset.AddTopic(ElementToTopic(topic_element)) # Get concepts concepts_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'concepts') if concepts_element is not None: concept_elements = concepts_element.findall(_DSPL_SCHEMA_PREFIX + 'concept') for concept_element in concept_elements: dspl_dataset.AddConcept(ElementToConcept(concept_element)) # Get slices slices_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'slices') if slices_element is not None: slice_elements = slices_element.findall(_DSPL_SCHEMA_PREFIX + 'slice') for slice_element in slice_elements: dspl_dataset.AddSlice(ElementToSlice(slice_element, dspl_dataset)) # Get tables tables_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'tables') if tables_element is not None: table_elements = tables_element.findall(_DSPL_SCHEMA_PREFIX + 'table') for table_element in table_elements: dspl_dataset.AddTable( ElementToTable(table_element, csv_path, load_all_data)) return dspl_dataset
20c5d2ad06971a994a47e5d8317c3a81c0895a06
182
def load_config(path): """ load the config of LSTMLM """ if path.rfind('.ckpt') != -1: path_name = path[0: path.rfind('.ckpt')] else: path_name = path with open(path_name + '.config', 'rt') as f: name = f.readline().split()[0] config = wb.Config.load(f) return config
595e6c73f45d94f7d691a64e88cd81d67d4ad1aa
183
def box_plot_stats( ## arguments / inputs x, ## input array of values coef = 1.5 ## positive real number ## (determines how far the whiskers extend from the iqr) ): """ calculates box plot five-number summary: the lower whisker extreme, the lower ‘hinge’ (observed value), the median, the upper ‘hinge’, and upper whisker extreme (observed value) returns a results dictionary containing 2 items: "stats" and "xtrms" 1) the "stats" item contains the box plot five-number summary as an array 2) the "xtrms" item contains values which lie beyond the box plot extremes functions much the same as R's 'boxplot.stats()' function for which this Python implementation was predicated ref: The R Project for Statistical Computing. (2019). Box Plot Statistics. http://finzi.psych.upenn.edu/R/library/grDevices/html/boxplot.stats.html. Tukey, J. W. (1977). Exploratory Data Analysis. Section 2C. McGill, R., Tukey, J.W. and Larsen, W.A. (1978). Variations of Box Plots. The American Statistician, 32:12-16. http://dx.doi.org/10.2307/2683468. Velleman, P.F. and Hoaglin, D.C. (1981). Applications, Basics and Computing of Exploratory Data Analysis. Duxbury Press. Emerson, J.D. and Strenio, J. (1983). Boxplots and Batch Comparison. Chapter 3 of Understanding Robust and Exploratory Data Analysis, eds. D.C. Hoaglin, F. Mosteller and J.W. Tukey. Wiley. Chambers, J.M., Cleveland, W.S., Kleiner, B. and Tukey, P.A. (1983). Graphical Methods for Data Analysis. Wadsworth & Brooks/Cole. """ ## quality check for coef if coef <= 0: raise ValueError("cannot proceed: coef must be greater than zero") ## convert input to numpy array x = np.array(x) ## determine median, lower ‘hinge’, upper ‘hinge’ median = np.quantile(a = x, q = 0.50, interpolation = "midpoint") first_quart = np.quantile(a = x, q = 0.25, interpolation = "midpoint") third_quart = np.quantile(a = x, q = 0.75, interpolation = "midpoint") ## calculate inter quartile range intr_quart_rng = third_quart - first_quart ## calculate extreme of the lower whisker (observed, not interpolated) lower = first_quart - (coef * intr_quart_rng) lower_whisk = np.compress(x >= lower, x) lower_whisk_obs = np.min(lower_whisk) ## calculate extreme of the upper whisker (observed, not interpolated) upper = third_quart + (coef * intr_quart_rng) upper_whisk = np.compress(x <= upper, x) upper_whisk_obs = np.max(upper_whisk) ## store box plot results dictionary boxplot_stats = {} boxplot_stats["stats"] = np.array([lower_whisk_obs, first_quart, median, third_quart, upper_whisk_obs]) ## store observations beyond the box plot extremes boxplot_stats["xtrms"] = np.array(x[(x < lower_whisk_obs) | (x > upper_whisk_obs)]) ## return dictionary return boxplot_stats
4bc56d85103f6ba9c2267685e5d64c51ab5e1101
184
def expand_multinomial(expr, deep=True): """ Wrapper around expand that only uses the multinomial hint. See the expand docstring for more information. Examples ======== >>> from sympy import symbols, expand_multinomial, exp >>> x, y = symbols('x y', positive=True) >>> expand_multinomial((x + exp(x + 1))**2) x**2 + 2*x*exp(x + 1) + exp(2*x + 2) """ return sympify(expr).expand(deep=deep, mul=False, power_exp=False, power_base=False, basic=False, multinomial=True, log=False)
7069035c449e9969907dbb5854b30da9701c194a
185
import pickle def unpickle_tokens(filepath): """Unpickle the tokens into memory.""" try: with open(filepath+'_tokens.pickle', 'rb') as f: tokens = pickle.load(f) except FileNotFoundError: tokens = tokenize_and_tag(filepath) pickle_tokens(tokens, filepath) return tokens
5c68a4f4ba05983577e7de65ad0f47e61528dc38
186
def make_cov(df, columns=["parallax", "pmra", "pmdec"]): """Generate covariance matrix from Gaia data columns : list list of columns to calculate covariance. Must be a subset of 'ra', 'dec' 'parallax', 'pmra', 'pmdec'. Returns ------- numpy.array (N, number of columns) array of covariance matrices """ gaia_order = ["ra", "dec", "parallax", "pmra", "pmdec"] N = len(np.atleast_1d(df[columns[0] + "_error"])) # N could be 1 n = len(columns) C = np.zeros([N, n, n]) for i, j in zip(*np.triu_indices(n)): if i == j: C[:, [i], [j]] = np.atleast_1d( df[f"{columns[i]}_error"] * df[f"{columns[j]}_error"] )[:, None] else: corr_name = ( "_".join( sorted([columns[i], columns[j]], key=lambda x: gaia_order.index(x)) ) + "_corr" ) C[:, [i, j], [j, i]] = np.atleast_1d( df[f"{columns[i]}_error"] * df[f"{columns[j]}_error"] * df[corr_name] )[:, None] return C.squeeze()
c246151b68b744b1f1c1fa03276d098d4683409f
187
from functools import reduce def kinetics(request, section='', subsection=''): """ The RMG database homepage. """ # Make sure section has an allowed value if section not in ['libraries', 'families', '']: raise Http404 # Load the kinetics database, if necessary database.load('kinetics', section) # Determine which subsection we wish to view db = None try: db = database.get_kinetics_database(section, subsection) except ValueError: pass if db is not None: # A subsection was specified, so render a table of the entries in # that part of the database is_group_database = False # Sort entries by index if db.top is not None and len(db.top) > 0: # If there is a tree in this database, only consider the entries # that are in the tree entries0 = getDatabaseTreeAsList(db, db.top) tree = '<ul class="kineticsTree">\n{0}\n</ul>\n'.format(getKineticsTreeHTML(db, section, subsection, db.top)) else: # If there is not a tree, consider all entries entries0 = list(db.entries.values()) if any(isinstance(item, list) for item in entries0): # if the entries are lists entries0 = reduce(lambda x, y: x+y, entries0) # Sort the entries by index and label entries0.sort(key=lambda entry: (entry.index, entry.label)) tree = '' entries = [] for entry0 in entries0: if isinstance(entry0.data, str): data_format = 'Link' else: data_format = entry0.data.__class__.__name__ entry = { 'index': entry0.index, 'label': entry0.label, 'dataFormat': data_format, } if isinstance(db, KineticsGroups): is_group_database = True entry['structure'] = getStructureInfo(entry0.item) entry['parent'] = entry0.parent entry['children'] = entry0.children elif 'rules' in subsection: if isinstance(entry0.item, list): # if the reactants are not group objects, then this rate rule came from # the averaging step, and we don't want to show all of the averaged nodes # in the web view. We only want to show nodes with direct values or # training rates that became rate rules. continue else: entry['reactants'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.reactants]) entry['products'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.products]) entry['arrow'] = '&hArr;' if entry0.item.reversible else '&rarr;' else: entry['reactants'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.reactants]) entry['products'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.products]) entry['arrow'] = '&hArr;' if entry0.item.reversible else '&rarr;' entries.append(entry) return render(request, 'kineticsTable.html', {'section': section, 'subsection': subsection, 'databaseName': db.name, 'databaseDesc': db.long_desc, 'entries': entries, 'tree': tree, 'isGroupDatabase': is_group_database}) else: # No subsection was specified, so render an outline of the kinetics # database components kinetics_libraries = [(label, library) for label, library in database.kinetics.libraries.items() if subsection in label] kinetics_libraries.sort() # If this is a subsection, but not the main kinetics page, # we don't need to iterate through the entire database, as this takes a long time to load. try: families_to_process = [database.kinetics.families[subsection]] except KeyError: # if main kinetics page, or some other error families_to_process = database.kinetics.families.values() for family in families_to_process: for i in range(0, len(family.depositories)): if 'untrained' in family.depositories[i].name: family.depositories.pop(i) family.depositories.append(getUntrainedReactions(family)) kinetics_families = [(label, family) for label, family in database.kinetics.families.items() if subsection in label] kinetics_families.sort() return render(request, 'kinetics.html', {'section': section, 'subsection': subsection, 'kineticsLibraries': kinetics_libraries, 'kineticsFamilies': kinetics_families})
5a6d53282ff462912e9a9ba899e71aadaa0c7392
188
import xml def cot_to_cot(craft: dict, known_craft: dict = {}) -> str: """ Given an input CoT XML Event with an ICAO Hex as the UID, will transform the Event's name, callsign & CoT Event Type based on known craft input database (CSV file). """ return xml.etree.ElementTree.tostring(cot_to_cot_xml(craft, known_craft))
54fd916e9c0d32aec57c38ac40c2aee97491314d
189
def update_export(module, export, filesystem, system): """ Create new filesystem or update existing one""" assert export changed = False name = module.params['name'] client_list = module.params['client_list'] if client_list: if set(map(transform, unmunchify(export.get_permissions()))) \ != set(map(transform, client_list)): if not module.check_mode: export.update_permissions(client_list) changed = True return changed
d7f122c63fc892c05215d2ed8c5c9e5d227209ca
190
import typing def create_kdf(kdf_type: str) -> typing.Type[KDF]: """Returns the class corresponding to the given key derivation function type name. Args: kdf_type The name of the OpenSSH private key key derivation function type. Returns: The subclass of :py:class:`KDF` corresponding to the key derivation function type name. Raises: KeyError: There is no subclass of :py:class:`KDF` corresponding to the given key derivation function type name. """ return _KDF_MAPPING[kdf_type]
453b417534c87c71e73ed7b39b33ec3ba8d4e9af
191
def promote(lhs, rhs, promote_option=True): """Promote two scalar dshapes to a possibly larger, but compatible type. Examples -------- >>> from datashape import int32, int64, Option >>> x = Option(int32) >>> y = int64 >>> promote(x, y) Option(ty=ctype("int64")) >>> promote(int64, int64) ctype("int64") Don't promote to option types. >>> promote(x, y, promote_option=False) ctype("int64") Notes ---- This uses ``numpy.result_type`` for type promotion logic. See the numpy documentation at http://docs.scipy.org/doc/numpy/reference/generated/numpy.result_type.html """ if lhs == rhs: return lhs else: left, right = getattr(lhs, 'ty', lhs), getattr(rhs, 'ty', rhs) dtype = datashape.CType.from_numpy_dtype( np.result_type( datashape.to_numpy_dtype(left), datashape.to_numpy_dtype(right), ), ) if promote_option: dtype = optionify(lhs, rhs, dtype) return dtype
8b197d631ad71bdbb7a4d4fcf1f6513aa4f5a41b
192
def calc_amp_pop(eigenvecs, wave_func, nstates): """Calculates amplitudes and population from wave function, eigenvectors""" pop = np.zeros(nstates) amp = np.zeros((nstates), dtype=np.complex128) for j in range(nstates): amp[j] = np.dot(eigenvecs[:, j], wave_func) pop[j] = np.real(bra_ket(amp[j], amp[j])) return amp, pop
7f092a9634bfff0e7e04582965667c9b7ecb0aaa
194
from typing import Hashable import typing def reflect(cls, *args, **kwargs): """ Construct a funsor, populate ``._ast_values``, and cons hash. This is the only interpretation allowed to construct funsors. """ if len(args) > len(cls._ast_fields): # handle varargs new_args = tuple(args[:len(cls._ast_fields) - 1]) + (args[len(cls._ast_fields) - 1 - len(args):],) assert len(new_args) == len(cls._ast_fields) _, args = args, new_args # JAX DeviceArray has .__hash__ method but raise the unhashable error there. cache_key = tuple(id(arg) if type(arg).__name__ == "DeviceArray" or not isinstance(arg, Hashable) else arg for arg in args) if cache_key in cls._cons_cache: return cls._cons_cache[cache_key] arg_types = tuple(typing.Tuple[tuple(map(type, arg))] if (type(arg) is tuple and all(isinstance(a, Funsor) for a in arg)) else typing.Tuple if (type(arg) is tuple and not arg) else type(arg) for arg in args) cls_specific = (cls.__origin__ if cls.__args__ else cls)[arg_types] result = super(FunsorMeta, cls_specific).__call__(*args) result._ast_values = args # alpha-convert eagerly upon binding any variable result = _alpha_mangle(result) cls._cons_cache[cache_key] = result return result
2b898de824f86460c8f7abbb4c0a9375e90ae1aa
195
import torch def make_strictly_feasible(x, lb, ub, rstep=1e-10): """Shift a point to the interior of a feasible region. Each element of the returned vector is at least at a relative distance `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used. """ x_new = x.clone() active = find_active_constraints(x, lb, ub, rstep) lower_mask = torch.eq(active, -1) upper_mask = torch.eq(active, 1) if rstep == 0: torch.nextafter(lb[lower_mask], ub[lower_mask], out=x_new[lower_mask]) torch.nextafter(ub[upper_mask], lb[upper_mask], out=x_new[upper_mask]) else: x_new[lower_mask] = lb[lower_mask].add(lb[lower_mask].abs().clamp(1,None), alpha=rstep) x_new[upper_mask] = ub[upper_mask].sub(ub[upper_mask].abs().clamp(1,None), alpha=rstep) tight_bounds = (x_new < lb) | (x_new > ub) x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds]) return x_new
cb38cf093b5459c3e32a1a02e4767dee6dae6637
197
def add_emails(request): """ Args: request: Http Request (ignored in this function) Returns: Add operation status wrapped on response's object """ error_messages = [] success_messages = [] status = HTTP_200_OK success, message = queries.add_emails(request.data) if success: success_messages.append(message) else: error_messages.append(message) status = HTTP_403_FORBIDDEN return create_response(error_messages=error_messages, success_messages=success_messages, status=status)
c4b8de45a5a233dd0e43febda4f973ecf64745d4
198
import math def tanD(angle): """ angle est la mesure d'un angle en degrés ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Retourne la tangente de angle. """ return math.tan(math.radians(angle))
641e564fefcdf6d1b804507b672e0e6476144b48
199
def offset_zero_by_one(feature): """Sets the start coordinate to 1 if it is actually 0. Required for the flanking to work properly in those cases. """ if feature.start == 0: feature.start += 1 return feature
3c8fb9754bde7b7efaa5d092e8239aeb099e26a4
200
def smilesToMolecule(smiles): """ Convert a SMILES string to a CDK Molecule object. Returns: the Molecule object """ mol = None try: smilesParser = cdk.smiles.SmilesParser(silentChemObjectBuilder) mol = smilesParser.parseSmiles(smiles) except cdk.exception.InvalidSmilesException as e: System.err.println('An error occured while parsing the SMILES') e.printStackTrace() return mol
9a50a21c77a5306de47b39d2290e3e6c04184acc
201
from typing import OrderedDict def build_pathmatcher(name, defaultServiceUrl): """ This builds and returns a full pathMatcher entry, for appending to an existing URL map. Parameters: name: The name of the pathMatcher. defaultServiceUrl: Denotes the URL requests should go to if none of the path patterns match. """ matcher = OrderedDict() matcher['defaultService'] = defaultServiceUrl matcher['name'] = name return matcher
e21a79d51b41bd393a8fa2e254c6db7cf61bd441
202
def gaussian1D_smoothing(input_array, sigma, window_size): """ Function to smooth input array using 1D gaussian smoothing Args: input_array (numpy.array): input array of values sigma (float): sigma value for gaussian smoothing window_size (int): window size for gaussian smoothing Returns: numpy.array: smoothed output array """ # compute truncate value (#standard_deviations) truncate = (((window_size - 1)/2)-0.5)/sigma return gaussian_filter1d(input_array, sigma=sigma, truncate=truncate)
1e7185e358c3dba77c584e072537c7c3b5d9ca4c
203
import re def add_whitespace(c_fn): """ Add two spaces between all tokens of a C function """ tok = re.compile(r'[a-zA-Z0-9_]+|\*|\(|\)|\,|\[|\]') return ' ' + ' '.join(tok.findall(c_fn)) + ' '
57d59a5956c3914fa01587b6262e7d4348d77446
204
def readFlow(fn): """ Read .flo file in Middlebury format""" with open(fn, 'rb') as f: magic = np.fromfile(f, np.float32, count=1) if 202021.25 != magic: print('Magic number incorrect. Invalid .flo file') return None else: w = np.fromfile(f, np.int32, count=1) h = np.fromfile(f, np.int32, count=1) #print('Reading %d x %d flo file\n' % (w, h)) data = np.fromfile(f, np.float32, count=2*int(w)*int(h)) # Reshape data into 3D array (columns, rows, bands) # The reshape here is for visualization, the original code is (w,h,2) x=np.resize(data, (int(h), int(w), 2)) return x
d9e5ab6f661d904755c0457827e3cfed87752f95
205
def plot_umap_list(adata, title, color_groups): """ Plots UMAPS based with different coloring groups :param adata: Adata Object containing a latent space embedding :param title: Figure title :param color_groups: Column name in adata.obs used for coloring the UMAP :return: """ try: if adata.X.shape[1] == 2: adata.obsm['X_umap'] = adata.X else: sc.pp.neighbors(adata, use_rep='X') sc.tl.umap(adata) figures = [] for group in color_groups: fig = sc.pl.umap(adata, color=group, title=title+'_'+group, return_fig=True) fig.tight_layout() figures.append(fig) return figures except ValueError as e: print(e) return []
a5fc70fb507b575a4b8ab2b0a57bb01f55e390ff
206
def MatrixCrossProduct(Mat1, Mat2): """ Returns the cross products of Mat1 and Mat2. :param: - Mat1 & Mat2 - Required : 5D matrix with shape (3,1,nz,ny,nx). :return: - Mat3 : 5D matrix with shape (3,1,nz,ny,nx). """ Mat3 = np.zeros_like(Mat1) Mat3[0] = Mat1[1]*Mat2[2]-Mat1[2]*Mat2[1] Mat3[1] = Mat1[2]*Mat2[0]-Mat1[0]*Mat2[2] Mat3[2] = Mat1[0]*Mat2[1]-Mat1[1]*Mat2[0] return Mat3
5789209c1fbd8bfacff3e48e844aa0454f94958d
208
import collections import json def partition_preds_by_scrape_type(verify_predictions, evidence_predictions, val_examples): """Partition predictions by which scrape_type they come from. The validation fold contains four sets of evidence: drqa, lucene, ukp_pred, and ukp_wiki. The intention is in this function to partition these into four sets so that they can each be scored separately to measure the difference between them on models that are trained on one of these (train_scrape). Args: verify_predictions: Claim verification predictions to partition, a 3-dim tensor of probabilities (one for each class) evidence_predictions: Evidence predictions to partition, a scalar probability of matching val_examples: Validation examples, typically all of FeverMetricsCallback._validation_flat Returns: Predictions and examples partitioned by scrape type """ partitioned_verify = collections.defaultdict(list) partitioned_match = collections.defaultdict(list) partitioned_example = collections.defaultdict(list) for verify_probs, match_prob, example in zip(verify_predictions, evidence_predictions, val_examples): struct, _ = example metadata = json.loads(unwrap_tensor(struct['metadata'])) scrape_type = metadata['scrape_type'] partitioned_verify[scrape_type].append(verify_probs) partitioned_match[scrape_type].append(match_prob) partitioned_example[scrape_type].append(example) return partitioned_verify, partitioned_match, partitioned_example
137fdfb4bf1f837c087f597eedd4ce4229b33a02
209
def apply_delay_turbulence(signal, delay, fs): """Apply phase delay due to turbulence. :param signal: Signal :param delay: Delay :param fs: Sample frequency """ k_r = np.arange(0, len(signal), 1) # Create vector of indices k = k_r - delay * fs # Create vector of warped indices kf = np.floor(k).astype(int) # Floor the warped indices. Convert to integers so we can use them as indices. dk = kf - k ko = np.copy(kf) kf[ko<0] = 0 kf[ko+1>=len(ko)] = 0 R = ( (1.0 + dk) * signal[kf] + (-dk) * signal[kf+1] ) * (ko >= 0) * (ko+1 < len(k)) #+ 0.0 * (kf<0) return R
f5801b3888867b05c890e4dba8f64d0cd273f610
210
def binaryContext(): """Return the registered context for the binary functions. Return Value: Ctor() for the binary function context """ return bin_func_class
93ed6627d90e4dfb493b8b851c35b59d56fd558f
211
from pathlib import Path def validate_vm_file(file_name: Path, nx: int, ny: int, nz: int): """ Validates that a velocity model file has the correct size, and no 0 values in a sample of the layers :param file_name: A Path object representing the file to test :param nx, ny, nz: The size of the VM in grid spaces (nx*ny*nz) :return: A possibly empty list of issues with the VM file """ errors = [] vm_size = nx * ny * nz size = file_name.stat().st_size if size != vm_size * SIZE_FLOAT: errors.append( f"VM filesize for {file_name} expected: {vm_size * SIZE_FLOAT} found: {size}" ) with VelocityModelFile(nx, ny, nz, file_name, writable=False, memmap=True) as vmf: min_v = vmf.get_values().min() if min_v <= 0.0: errors.append(f"File {file_name} has minimum value of {min_v}") return errors
0f0cd5a1bb13038ca0455770f4c240973775b891
212
def compute_xlabel_confusion_matrix(y_true, y_pred, labels_train=None, labels_test=None, normalize=True, sample_weight=None): """Computes confusion matrix when the labels used to train the classifier are different than those of the test set. Args: y_true: Ground truth. y_pred: Estimated labels. labels_train: List of labels used to train the classifier. This may be used to reorder or select a subset of labels. If none is given, those that appear at least once in y_pred are used in sorted order. labels_test: List of labels of the test set. This may be used to reorder or select a subset of labels. If none is given, those that appear at least once in y_true are used in sorted order. sample_weight: Sample weights. Returns: Confusion matrix (num_classes_test x num_classes_train) """ y_true = list2ndarray(y_true) y_pred = list2ndarray(y_pred) if labels_train is None: labels_train = np.unique(y_pred) else: labels_train = list2ndarray(labels_train) if labels_test is None: labels_test = np.unique(y_true) else: labels_test = list2ndarray(labels_test) assert y_true.dtype == y_pred.dtype, 'y_true and y_pred labels does not have the same type' assert labels_train.dtype == labels_test.dtype, 'Train and test labels does not have the same type' assert labels_train.dtype == y_pred.dtype, 'Labels, y_true and y_pred does not have the same type' num_classes_test = len(labels_test) if issubclass(y_true.dtype.type, np.integer): y_pred += num_classes_test elif issubclass(y_true.dtype.type, np.dtype('U')) or issubclass( y_true.dtype.type, np.dtype('S')): y_true = np.asarray(['TEST_' + s for s in y_true]) y_pred = np.asarray(['TRAIN_' + s for s in y_pred]) else: raise Exception() if issubclass(labels_train.dtype.type, np.integer): labels_train += num_classes_test elif issubclass(labels_train.dtype.type, np.dtype('U')) or issubclass( labels_train.dtype.type, np.dtype('S')): labels_test = np.asarray(['TEST_' + s for s in labels_test]) labels_train = np.asarray(['TRAIN_' + s for s in labels_train]) else: raise Exception() labels = np.concatenate((labels_test, labels_train)) C = confusion_matrix(y_true, y_pred, labels, sample_weight) C = C[:num_classes_test, num_classes_test:] if normalize: C = C/np.sum(C, axis=1, keepdims=True) return C
10f8e8767b98979d0d07dcb6ccfdddfaa8b78c1c
214
def generate_synthetic_data(n=50): #n is the number of generated random training points from normal distribution """Create two sets of points from bivariate normal distributions.""" points = np.concatenate((ss.norm(0,1).rvs((n,2)),ss.norm(1,1).rvs((n,2))), axis=0) #norm(mean, standard deviation) #'.rvs' Random variates of given type. Here we have: .rvs((number of rows, number of columns)) # 'axis = 0' means that we are concatenating along the rows of these arrays #the whole lemgth/size of points array is 2nx2: nx2 for the first generated points set and nx2 for the second one outcomes = np.concatenate((np.repeat(0,n), np.repeat(1,n)), axis=0) #generate two ndarrays/classes/outcomes/targets: the first one has 0 values and length(n) #and the second one with values 1 and length n. #0 and 1 here refer to the names of classes #axis =0 means that the concatenating happens along the rows return (points, outcomes)
e63bc114a1b69dc841f439486fc0b455698a4529
215
def mask_array(array, idx, n_behind, n_ahead): """[summary] Args: array ([type]): [description] idx ([type]): [description] n_behind ([type]): [description] n_ahead ([type]): [description] Returns: [type]: [description] """ first = max(0, idx - n_behind) last = min(idx + n_ahead + 1, len(array)) array_masked = array[first:last].copy() return array_masked
04781f75bd1b0cae5b690759b5da475f59a43fe8
216
from typing import Set def get_nfs_acl(path: str, user: str) -> str: """ Retrieve the complete list of access control permissions assigned to a file or directory. """ raw = command(["/usr/bin/nfs4_getfacl", path], output=True).stdout.decode("utf-8") allowed: Set[str] = set() denied: Set[str] = set() for line in raw.splitlines(): if line.startswith("#"): continue type_, _, principal, perms = line.split(":") if principal != user: continue if type_ == "A": allowed.update(perms) elif type_ == "D": denied.update(perms) return "".join(sorted(allowed - denied))
bca401e9da9ddcb9419359024268362082c3f64b
217
from hdbscan import HDBSCAN def run_hdbscan(X_df, X_tsne, output_dir, transparent): """Cluster using density estimation Parameters ---------- X_df: DataFrame X_tsne: array-like, [n_samples, 2] output_dir: str, path transparent: bool Returns ------- clusterer: HDBSCAN object assignments: numpy array of shape [n_samples,] """ clusterer = HDBSCAN( core_dist_n_jobs=-1, cluster_selection_method="eom", # 'leaf', approx_min_span_tree=False, min_cluster_size=100, min_samples=1, leaf_size=100, gen_min_span_tree=True, # alpha=10., memory=Memory(cachedir=None, verbose=0), ) assignments = clusterer.fit_predict(X_df) centroid_labels, counts = np.unique(assignments, return_counts=True) n_clusters = len(centroid_labels) assignments[assignments == -1] = n_clusters - 1 logger.info("[HDBSCAN] Found {} clusters".format(n_clusters)) logger.info("[HDBSCAN] Cluster assignments:\n{}".format(counts)) logger.info( "[HDBSCAN] Cluster persistence:\n{}".format(clusterer.cluster_persistence_) ) return assignments, clusterer.exemplars_, n_clusters, clusterer
5b5b89f792cbf5acc3ab3681e0ac8d9ea6ce1705
219
def check_min_sample_periods(X, time_column, min_sample_periods): """ Check if all periods contained in a dataframe for a certain time_column contain at least min_sample_periods examples. """ return (X[time_column].value_counts() >= min_sample_periods).prod()
074c196a169d65582dbb32cc57c86c82ce4cb9c9
220