content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def svn_repos_get_logs2(*args): """ svn_repos_get_logs2(svn_repos_t repos, apr_array_header_t paths, svn_revnum_t start, svn_revnum_t end, svn_boolean_t discover_changed_paths, svn_boolean_t strict_node_history, svn_repos_authz_func_t authz_read_func, svn_log_message_receiver_t receiver, apr_pool_t pool) -> svn_error_t """ return apply(_repos.svn_repos_get_logs2, args)
8580b4df10d6a6ebecbea960e14cd2ca79720beb
3,659,544
def error_function(theta, X, y): """Error function J definition""" diff = np.dot(X, theta) - y return (1. / 2 * m) * np.dot(np.transpose(diff), diff)
0762651fe9f33107a4e198e70319f77a5ae50e0b
3,659,545
def register(operation_name): """ Registers the decorated class as an Operation with the supplied operation name :param operation_name: The identifying name for the Operation """ def wrapper(clazz): if operation_name not in OPERATIONS: OPERATIONS[operation_name] = clazz return clazz return wrapper
036612a64d987ede2546bd5dbc9d848e6ed6c48b
3,659,546
def log2_grad(orig, grad): """Returns [grad * 1 / (log(2) * x)]""" x = orig.args[0] ones = ones_like(x) two = const(2.0, dtype=x.checked_type.dtype) return [grad * ones / (log(two) * x)]
c17d7eeee43e64e0eeb7feb86f357374cc2516e4
3,659,549
import posixpath import requests def _stream_annotation(file_name, pn_dir): """ Stream an entire remote annotation file from Physionet. Parameters ---------- file_name : str The name of the annotation file to be read. pn_dir : str The PhysioNet directory where the annotation file is located. Returns ------- ann_data : ndarray The resulting data stream in numpy array format. """ # Full url of annotation file url = posixpath.join(config.db_index_url, pn_dir, file_name) # Get the content response = requests.get(url) # Raise HTTPError if invalid url response.raise_for_status() # Convert to numpy array ann_data = np.fromstring(response.content, dtype=np.dtype('<u1')) return ann_data
8a1168562b87f27cc035d21b7f91cb23a611d0b4
3,659,550
def get_scenarios(): """ Return a list scenarios and values for parameters in each of them :return: """ # Recover InteractiveSession isess = deserialize_isession_and_prepare_db_session() if isess and isinstance(isess, Response): return isess scenarios = get_scenarios_in_state(isess.state) return build_json_response(scenarios, 200)
2812cab14347f03e208d5c3b6399ffa954dfe843
3,659,551
def download() -> str: """ Returns a download of the active files. :return: the zip files needs to be downloaded. """ file_manager = utility.load_file_manager() response = make_response(file_manager.zip_active_files( "scrubbed_documents.zip")) # Disable download caching response.headers["Cache-Control"] = \ "max-age=0, no-cache, no-store, must-revalidate" response.headers["Expires"] = 0 response.headers["Pragma"] = "no-cache" return response
a507c6fd2281226a0db4f4e9e84d2c4f0e6e9562
3,659,552
import pickle def load_from_pickle_file(filepath): """ Loads a pickle file into a python variable """ with open(filepath, "rb") as f: python_obj = pickle.load(f) return python_obj
8ad8b947e762590d1be8d6b3ca4b519293692f09
3,659,554
from aiida.orm import Dict from aiida_quantumespresso.utils.resources import get_default_options def generate_inputs_pw(fixture_code, generate_structure, generate_kpoints_mesh, generate_upf_data): """Generate default inputs for a `PwCalculation.""" def _generate_inputs_pw(): """Generate default inputs for a `PwCalculation.""" inputs = { 'code': fixture_code('quantumespresso.pw'), 'structure': generate_structure(), 'kpoints': generate_kpoints_mesh(2), 'parameters': Dict(dict={ 'CONTROL': { 'calculation': 'scf' }, 'SYSTEM': { 'ecutrho': 240.0, 'ecutwfc': 30.0 } }), 'pseudos': { 'Si': generate_upf_data('Si') }, 'metadata': { 'options': get_default_options() } } return inputs return _generate_inputs_pw
fbe0a332a011b1909380275b1f70444b2dfb5d17
3,659,555
def connected_components(edge_index, num_nodes=None): """Find the connected components of a given graph. Args: edge_index (LongTensor): Edge coordinate matrix. num_nodes (int, optional): Number of nodes. Defaults to None. Returns: LongTensor: Vector assigning each node to its component index. """ if num_nodes is None: num_nodes = edge_index.max().item() + 1 device = edge_index.device row, col = edge_index.cpu() out = cc_cpu.connected_components(row, col, num_nodes) return out.to(device)
cb0b620fbd5577375b2ac79d62194537e61a1204
3,659,556
import tqdm def assembly2graph(path=DATA_PATH): """Convert assemblies (assembly.json) to graph format""" """Return a list of NetworkX graphs""" graphs = [] input_files = get_input_files(path) for input_file in tqdm(input_files, desc="Generating Graphs"): ag = AssemblyGraph(input_file) graph = ag.get_graph_networkx() graphs.append(graph) return graphs, input_files
46f2ed913c5047d68ee523f1a23382c2036fc1d7
3,659,557
def _get_default_backing(backing): """ _get_default_backing(backing) Returns the prefered backing store - if user provides a valid Backing object, use it - if there is a default_backing object instantiated, use it - if the user provided a configuration dict, use it to create a new default_backing object - otherwise, create a default_backing object using our defaults. """ # Probably they didn't mean to do this... global default_backing, default_backing_config if isinstance(backing, Backing): return backing if default_backing: return default_backing elif type(backing) is dict: default_backing = Backing(**backing) else: # create a new default backing default_backing = Backing(**default_backing_config) return default_backing
60f878ee730bea33b93b88e88dfe29885f6fac85
3,659,558
def slice(request, response, start, end=None): """Send a byte range of the response body :param start: The starting offset. Follows python semantics including negative numbers. :param end: The ending offset, again with python semantics and None (spelled "null" in a query string) to indicate the end of the file. """ content = resolve_content(response) response.content = content[start:end] return response
450e43afb988736dee991bcf284d5b92e11aec74
3,659,559
import warnings def get_ps(sdfits, scan, ifnum=0, intnum=None, plnum=0, fdnum=0, method='vector', avgf_min=256): """ Parameters ---------- sdfits : scan : int Scan number. plnum : int Polarization number. method : {'vector', 'classic'}, optional Method used to compute the source temperature. If set to ``'vector'`` it will use Eq. (16) of Winkel et al. (2012). If set to ``'classic'`` it will use the same method as GBTIDL. The default is ``'vector'``. Returns ------- """ ps_scan = sdfits.get_scans(scan, ifnum=ifnum, intnum=intnum, plnum=plnum) rows = ps_scan.table obsmode = rows["OBSMODE"] last_on = rows["LASTON"] last_off = rows["LASTOFF"] procnum = rows["PROCSEQN"] source = np.unique(rows['OBJECT'])[0] tcal = np.average(rows['TCAL'], axis=0) procname, swstate, swtchsig = obsmode[0].split(':') if procname not in ["OffOn", "OnOff"]: warnings.warn(f"Selected scan is not OnOff or OffOn, it is: {procname}." f"Cannot get Tcal from this scan.") return None scan_on, scan_off = utils.get_ps_scan_pair(scan, procnum, procname) sou_on = sdfits.get_scans(scan_on, sig="T", cal="T", ifnum=ifnum, intnum=intnum, plnum=plnum) sou_off = sdfits.get_scans(scan_on, sig="T", cal="F", ifnum=ifnum, intnum=intnum, plnum=plnum) off_on = sdfits.get_scans(scan_off, sig="T", cal="T", ifnum=ifnum, intnum=intnum, plnum=plnum) off_off = sdfits.get_scans(scan_off, sig="T", cal="F", ifnum=ifnum, intnum=intnum, plnum=plnum) if method == 'vector': sou_on.average() sou_off.average() off_on.average() off_off.average() off_freq = off_off.freq sou_freq = sou_on.freq nchan = off_on.data.shape[0] facs = utils.factors(nchan) avgf = np.min(facs[facs >= avgf_min]) kappa_off = get_kappa(off_on.data, off_off.data, avgf=avgf) kappa_freq = off_freq.reshape(nchan//avgf, avgf).mean(axis=1) # Interpolate back to high frequency resolution. pt = np.argsort(kappa_freq) pi = np.argsort(sou_freq) kappa_interp = np.interp(sou_freq.to('Hz').value[pi], kappa_freq.to('Hz').value[pt], kappa_off) # Compute the source temperature (Eq. (16) in Winkel et al. 2012). tsou_on = (kappa_interp + 1.)*tcal*(sou_on.data - off_on.data)/off_on.data tsou_off = kappa_interp*tcal*(sou_off.data - off_off.data)/off_off.data # Average. tsou = 0.5*(tsou_on + tsou_off) elif method == 'gbtidl': # Eqs. (1) and (2) from Braatz (2009, GBTIDL calibration guide) # https://www.gb.nrao.edu/GBT/DA/gbtidl/gbtidl_calibration.pdf tsys = gbtidl_tsys(off_on.data, off_off.data, tcal) sig = 0.5*(sou_on.data + sou_off.data) ref = 0.5*(off_on.data + off_off.data) ta = gbtidl_sigref2ta(sig, ref, tsys) tint_sou = 0.5*(sou_on.table["EXPOSURE"] + sou_off.table["EXPOSURE"]) tint_off = 0.5*(off_on.table["EXPOSURE"] + off_off.table["EXPOSURE"]) tint = 0.5*(tint_sou + tint_off) dnu = np.mean(sou_on.table["CDELT1"]) tsou = np.average(ta, axis=0, weights=dnu*tint*np.power(tsys, -2.)) elif method == 'classic': tsys = classic_tsys(off_on.data, off_off.data, tcal) ta_on = (sou_on.data - off_on.data)/off_on.data*(tsys[:,np.newaxis] + tcal) ta_off = (sou_off.data - off_off.data)/off_off.data*(tsys[:,np.newaxis]) tint_sou = 0.5*(sou_on.table["EXPOSURE"] + sou_off.table["EXPOSURE"]) tint_off = 0.5*(off_on.table["EXPOSURE"] + off_off.table["EXPOSURE"]) tint = 0.5*(tint_sou + tint_off) dnu = np.mean(sou_on.table["CDELT1"]) ta_on = np.average(ta_on, axis=0, weights=dnu*tint_sou*np.power(tsys, -2.)) ta_off = np.average(ta_off, axis=0, weights=dnu*tint_off*np.power(tsys, -2.)) tsou = 0.5*(ta_on + ta_off) return tsou
8226f4e38ab7fce3c1c7f66c88b275e7c7798d86
3,659,560
import click def choiceprompt(variable: Variable) -> Binding: """Prompt to choose from several values for the given name.""" if not variable.choices: raise ValueError("variable with empty choices") choices = {str(number): value for number, value in enumerate(variable.choices, 1)} lines = [ f"Select {variable.name}:", *[f"{number} - {value}" for number, value in choices.items()], "Choose from {}".format(", ".join(choices.keys())), ] choice = click.prompt( "\n".join(lines), type=click.Choice(list(choices)), default="1", show_choices=False, ) return bind(variable, choices[choice])
78700032f93abaca2227d653d5f199e6dbf3b4ba
3,659,561
import base64 import hashlib def rehash(file_path): """Return (hash, size) for a file with path file_path. The hash and size are used by pip to verify the integrity of the contents of a wheel.""" with open(file_path, 'rb') as file: contents = file.read() hash = base64.urlsafe_b64encode(hashlib.sha256(contents).digest()).decode('latin1').rstrip('=') size = len(contents) return hash, size
167449640e8cbf17d36e7221df3490a12381dd8e
3,659,563
import requests def _magpie_update_services_conflict(conflict_services, services_dict, request_cookies): # type: (List[Str], ServicesSettings, AnyCookiesType) -> Dict[Str, int] """ Resolve conflicting services by name during registration by updating them only if pointing to different URL. """ magpie_url = get_magpie_url() statuses = dict() for svc_name in conflict_services: statuses[svc_name] = 409 svc_url_new = services_dict[svc_name]["url"] svc_url_db = "{magpie}/services/{svc}".format(magpie=magpie_url, svc=svc_name) svc_resp = requests.get(svc_url_db, cookies=request_cookies) svc_info = get_json(svc_resp).get(svc_name) svc_url_old = svc_info["service_url"] if svc_url_old != svc_url_new: svc_info["service_url"] = svc_url_new res_svc_put = requests.patch(svc_url_db, data=svc_info, cookies=request_cookies) statuses[svc_name] = res_svc_put.status_code print_log("[{url_old}] => [{url_new}] Service URL update ({svc}): {resp}" .format(svc=svc_name, url_old=svc_url_old, url_new=svc_url_new, resp=res_svc_put.status_code), logger=LOGGER) return statuses
71f72680aebd1fd781c5cbd9e77eeba06a64062e
3,659,564
def rtc_runner(rtc): """Resolved tool contract runner.""" return run_main(polish_chunks_pickle_file=rtc.task.input_files[0], sentinel_file=rtc.task.input_files[1], subreads_file=rtc.task.input_files[2], output_json_file=rtc.task.output_files[0], max_nchunks=rtc.task.max_nchunks)
c9d5a1c23e5b88c6d7592dde473dc41123616a77
3,659,565
from typing import Any import json def dict_to_json_str(o: Any) -> str: """ Converts a python object into json. """ json_str = json.dumps(o, cls=EnhancedJSONEncoder, sort_keys=True) return json_str
8377cee2e25d5daeefd7a349ede02f7134e052b2
3,659,566
def paramid_to_paramname(paramid): """Turn a parameter id number into a parameter name""" try: return param_info[paramid]['n'] except KeyError: return "UNKNOWN_%s" % str(hex(paramid))
c5e47c6754448a20d79c33b6b501039b1463108e
3,659,567
def max_dist_comp(G, cc0, cc1): """ Maximum distance between components Parameters ---------- G : nx.graph Graph cc0 : list Component 0 cc1 : list Compoennt 1 Returns ------- threshold : float Maximum distance """ # Assertions assert isinstance(G, nx.Graph), "G is not a NetworkX graph" # Calculation threshold = 0 for n0 in cc0: for n1 in cc1: distance = metrics.distance_between_nodes(G, n0, n1) if distance > threshold: threshold = distance return threshold
5df633ee746d537462b84674dc5adadd6f6f7e53
3,659,568
def convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, pad_token_label_id=-100, sequence_a_segment_id=0, mask_padding_with_zero=True, ): """ Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d", ex_index, len(examples)) tokens = [] label_ids = [] for word, label in zip(example.words, example.labels): word_tokens = tokenizer.tokenize(word) tokens.extend(word_tokens) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. special_tokens_count = 3 if sep_token_extra else 2 if len(tokens) > max_seq_length - special_tokens_count: tokens = tokens[: (max_seq_length - special_tokens_count)] label_ids = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s", example.guid) logger.info("tokens: %s", " ".join([str(x) for x in tokens])) logger.info("input_ids: %s", " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s", " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) logger.info("label_ids: %s", " ".join([str(x) for x in label_ids])) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids) ) return features
02e4a731c818e0833152aa8e44d6a49e523ef1fb
3,659,569
def exp(var): """ Returns variable representing exp applied to the input variable var """ result = Var(np.exp(var.val)) result.parents[var] = var.children[result] = np.exp(var.val) return result
d2811bbb240da33ce158a8bff5739ace2275da0e
3,659,570
from typing import List from typing import Any from typing import Callable def invalidate_cache( key: str = None, keys: List = [], obj: Any = None, obj_attr: str = None, namespace: str = None, ): """Invalidates a specific cache key""" if not namespace: namespace = HTTPCache.namespace if key: keys = [key] def wrapper(func: Callable): @wraps(func) async def inner(*args, **kwargs): try: # extracts the `id` attribute from the `obj_attr` giparameter passed to the `@cache` method _obj = kwargs.get(f"{obj}", None) _keys = await HTTPKeys.generate_keys( keys=keys, config=HTTPCache, obj=_obj, obj_attr=obj_attr ) _cache = HTTPCacheBackend( redis=HTTPCache.redis_client, namespace=namespace ) await _cache.invalidate_all(keys=_keys) _computed_response = await func(*args, **kwargs) return _computed_response except Exception as e: log_error(msg=f"Cache Error: {e}", e=e, method="cache") return await func(*args, **kwargs) return inner return wrapper
1b200db81db9d1134bbbbb8a09d3b57f5c6623dc
3,659,571
def read_dataset(filename): """Reads in the TD events contained in the N-MNIST/N-CALTECH101 dataset file specified by 'filename'""" # NMIST: 34×34 pixels big f = open(filename, 'rb') raw_data = np.fromfile(f, dtype=np.uint8) f.close() raw_data = np.uint32(raw_data) all_y = raw_data[1::5] all_x = raw_data[0::5] all_p = (raw_data[2::5] & 128) >> 7 #bit 7 all_ts = ((raw_data[2::5] & 127) << 16) | (raw_data[3::5] << 8) | (raw_data[4::5]) #Process time stamp overflow events time_increment = 2 ** 13 overflow_indices = np.where(all_y == 240)[0] for overflow_index in overflow_indices: all_ts[overflow_index:] += time_increment #Everything else is a proper td spike td_indices = np.where(all_y != 240)[0] events = np.stack([all_x[td_indices], all_y[td_indices], all_ts[td_indices], all_p[td_indices]], axis=1).astype(np.float32) # events[:,3] = 2*events[:,3]-1 return events
74861968e36d3e357ce62f615456aa02eb3bd28b
3,659,572
def prge_annotation(): """Returns an annotation with protein/gene entities (PRGE) identified. """ annotation = {"ents": [{"text": "p53", "label": "PRGE", "start": 0, "end": 0}, {"text": "MK2", "label": "PRGE", "start": 0, "end": 0}], "text": "p53 and MK2", "title": ""} return annotation
dda417c1c1a1146482f4a3340741d938714dbf30
3,659,573
from scipy.spatial import Delaunay def inner_points_mask(points): """Mask array into `points` where ``points[msk]`` are all "inner" points, i.e. `points` with one level of edge points removed. For 1D, this is simply points[1:-1,:] (assuming ordered points). For ND, we calculate and remove the convex hull. Parameters ---------- points : nd array (npoints, ndim) Returns ------- msk : (npoints, ndim) Bool array. """ msk = np.ones((points.shape[0],), dtype=bool) if points.shape[1] == 1: assert (np.diff(points[:,0]) >= 0.0).all(), ("points not monotonic") msk[0] = False msk[-1] = False else: tri = Delaunay(points) edge_idx = np.unique(tri.convex_hull) msk.put(edge_idx, False) return msk
d81788fdbe3f19f67719951b319bcdd5e01d4d60
3,659,574
from typing import Union import torch from typing import Tuple from typing import List def array2list(X_train: Union[np.ndarray, torch.Tensor], y_train: Union[np.ndarray, torch.Tensor], X_test: Union[np.ndarray, torch.Tensor], y_test: Union[np.ndarray, torch.Tensor], batch_size: int, memory_alloc: float = 4 ) -> Union[Tuple[List[np.ndarray]], Tuple[List[torch.Tensor]]]: """ Splits train and test numpy arrays or torch tensors into lists of arrays/tensors of a specified size. The remainders are not included. """ all_data = [X_train, y_train, X_test, y_test] arrsize = sum([get_array_memsize(x) for x in all_data]) store_on_cpu = (arrsize / 1e9) > memory_alloc X_train = array2list_(X_train, batch_size, store_on_cpu) y_train = array2list_(y_train, batch_size, store_on_cpu) X_test = array2list_(X_test, batch_size, store_on_cpu) y_test = array2list_(y_test, batch_size, store_on_cpu) return X_train, y_train, X_test, y_test
8b4703b9296a786d24ae97edf142fc917d9a3b07
3,659,575
def livecoding_redirect_view(request): """ livecoding oath2 fetch access token after permission dialog """ code = request.GET.get('code') if code is None: return HttpResponse("code param is empty/not found") try: url = "https://www.livecoding.tv/o/token/" data = dict(code=code, grant_type='authorization_code', redirect_uri=LIVECODING_REDIRECT_URI, client_id=LIVECODING_KEY, client_secret=LIVECODING_SECRET) response = requests.post(url, data=data) except urllib2.URLError as e: print(e) return HttpResponse("Failed to make POST request to fetch token") res = json.loads(response.content) print res access_token = res['access_token'] print(access_token) user = User.objects.get(username='admin') print user a, created = AccessToken.objects.get_or_create(user=user) print a, created a.access_token = access_token a.save() print(a) redirect = request.GET.get('redirect') if redirect is None: return HttpResponse(response.content) else: return HttpResponseRedirect(redirect)
acf06a996da8b70d982fa670080b48faeb452f60
3,659,576
from typing import OrderedDict def sort_dict(value): """Sort a dictionary.""" return OrderedDict((key, value[key]) for key in sorted(value))
93e03b64d44ab79e8841ba3ee7a3546c1e38d6e4
3,659,577
def hyb_stor_capacity_rule(mod, prj, prd): """ Power capacity of a hybrid project's storage component. """ return 0
86ed72e48738df66fca945ff8aaf976f0a7d14e0
3,659,578
def gilr_layer_cpu(X, hidden_size, nonlin=tf.nn.elu, name='gilr'): """ g_t = sigmoid(Ux_t + b) h_t = g_t h_{t-1} + (1-g_t) f(Vx_t + c) """ with vscope(name): n_dims = X.get_shape()[-1].value act = fc_layer(X, 2 * hidden_size, nonlin=tf.identity) gate, impulse = tf.split(act, 2, len(act.shape) - 1) gate = tf.sigmoid(gate) impulse = nonlin(impulse) return s_linear_recurrence_cpu(gate, (1-gate) * impulse)
fce2d10be0b8ccb5923d1781795d08b562d602bf
3,659,579
import urllib import json def activation(formula=None, instrument=None, flux=None, cdratio=0, fastratio=0, mass=None, exposure=24, getdata=False): """Calculate sample activation using the FRM II activation web services. ``formula``: the chemical formula, see below for possible formats The *flux* can be specified either by: ``instrument``: the instrument name to select flux data or: ``flux``: The thermal flux (for cold instruments use the equivalent thermal flux) ``cdratio``: The ratio between full flux and flux with 1mm Cd in the beam, 0 to deactivate ``fastratio``: Thermal/fast neutron ratio, 0 to deactivate ``mass``: the sample mass in g ``exposure``: exposure time in h, default 24h ``getdata``: In addition to printing the result table, return a dict with the full results for further processing **Formula input format** Formula: ``CaCO3`` Formula with fragments: ``CaCO3+6H2O`` Formula with parentheses: ``HO ((CH2)2O)6 H`` Formula with isotope: ``CaCO[18]3+6H2O`` Counts can be integer or decimal: ``CaCO3+(3HO1.5)2`` Mass fractions use %wt, with the final portion adding to 100%: ``10%wt Fe // 15% Co // Ni`` Volume fractions use %vol, with the final portion adding to 100%: ``10%vol Fe@8.1 // Ni@8.5`` For volume fractions you have to specify the density using ``@<density>``! Mixtures can nest. The following is a 10% salt solution by weight \ mixed 20:80 by volume with D2O: ``20%vol (10%wt NaCl@2.16 // H2O@1) // D2O@1`` """ if formula is None: try: # preparation for a future enhanced sample class formula = session.experiment.sample.formula except (ConfigurationError, AttributeError): # ConfigurationError is raised if no experiment is in session pass if formula is None: raise UsageError('Please give a formula') if flux: instrument = 'Manual' if instrument is None: try: instrument = session.instrument.instrument or None except ConfigurationError: pass if instrument is None: raise UsageError('Please specifiy an instrument or flux') if mass is None: try: formula = session.experiment.sample.mass except (ConfigurationError, AttributeError): pass if mass is None: raise UsageError('Please specify the sample mass') qs = '?json=1&formula=%(formula)s&instrument=%(instrument)s&mass=%(mass)g' \ % locals() if flux: qs += '&fluence=%(flux)f&cdratio=%(cdratio)f&fastratio=%(fastratio)f' \ % locals() qs = ACTIVATIONURL + qs try: with urllib.request.urlopen(qs) as response: data = json.load(response) except urllib.error.HTTPError as e: session.log.warning('Error opening: %s', qs) session.log.warning(e) return None if data['ecode'] == 'unknown instrument' and flux is None: session.log.warning('Instrument %s unknown to calculator, ' 'specify flux manually', instrument) session.log.info('Known instruments') printTable(['instrument'], [(d, ) for d in data['instruments']], session.log.info) if data['result']['activation']: h = data['result']['activation']['headers'] th = [h['isotope'], h['daughter'], h['reaction'], h['Thalf_str']] for ha in h['activities']: th.append(ha) rows = [] for r in data['result']['activation']['rows']: rd = [r['isotope'], r['daughter'], r['reaction'], r['Thalf_str']] for a in r['activities']: rd.append('%.3g' % a if a > 1e-6 else '<1e-6') rows.append(rd) dr = ['', '', '', 'Dose (uSv/h)'] for d in data['result']['activation']['doses']: dr.append('%.3g' % d) rows.append(dr) printTable(th, rows, session.log.info) else: session.log.info('No activation') if getdata: return data return
40588f5d5d76625b759f6642205b28aba8b9ceb8
3,659,582
def wrap_parfor_blocks(parfor, entry_label = None): """wrap parfor blocks for analysis/optimization like CFG""" blocks = parfor.loop_body.copy() # shallow copy is enough if entry_label == None: entry_label = min(blocks.keys()) assert entry_label > 0 # we are using 0 for init block here # add dummy jump in init_block for CFG to work blocks[0] = parfor.init_block blocks[0].body.append(ir.Jump(entry_label, blocks[0].loc)) for block in blocks.values(): if len(block.body) == 0 or (not block.body[-1].is_terminator): block.body.append(ir.Jump(entry_label, block.loc)) return blocks
03528c18c9cd1f8d9671d12e0a4fa8668003305b
3,659,583
from typing import Counter def frequency_of_occurrence(words, specific_words=None): """ Returns a list of (instance, count) sorted in total order and then from most to least common Along with the count/frequency of each of those words as a tuple If specific_words list is present then SUM of frequencies of specific_words is returned """ freq = sorted(sorted(Counter(words).items(), key=itemgetter(0)), key=itemgetter(1), reverse=True) if not specific_words or specific_words==None: return freq else: frequencies = 0 for (inst, count) in freq: if inst in specific_words: frequencies += count return float(frequencies)
a98670a89e843774bd1237c0d2e518d2cd8fb242
3,659,584
from typing import Union def cache_remove_all( connection: 'Connection', cache: Union[str, int], binary=False, query_id=None, ) -> 'APIResult': """ Removes all entries from cache, notifying listeners and cache writers. :param connection: connection to Ignite server, :param cache: name or ID of the cache, :param binary: (optional) pass True to keep the value in binary form. False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ query_struct = Query( OP_CACHE_REMOVE_ALL, [ ('hash_code', Int), ('flag', Byte), ], query_id=query_id, ) return query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, }, )
81e7cdbae9b3a04e205e15275dee2c45caa96d36
3,659,585
from typing import List def choices_function() -> List[str]: """Choices functions are useful when the choice list is dynamically generated (e.g. from data in a database)""" return ['a', 'dynamic', 'list', 'goes', 'here']
30b4b05435bacc0a42c91a3f0be09a90098a012f
3,659,587
def GetInfraPythonPath(hermetic=True, master_dir=None): """Returns (PythonPath): The full working Chrome Infra utility path. This path is consistent for master, slave, and tool usage. It includes (in this order): - Any environment PYTHONPATH overrides. - If 'master_dir' is supplied, the master's python path component. - The Chrome Infra build path. - The system python path. Args: hermetic (bool): True, prune any non-system path from the system path. master_dir (str): If not None, include a master path component. """ path = PythonPath() if master_dir: path += GetMasterPythonPath(master_dir) path += GetBuildPythonPath() path += GetSysPythonPath(hermetic=hermetic) return path
a43486c68559e42606a3a55444c998640529ef2b
3,659,588
import uuid def nodeid(): """nodeid() -> UUID Generate a new node id >>> nodeid() UUID('...') :returns: node id :rtype: :class:`uuid.UUID` """ return uuid.uuid4()
88a3ddc335ce2ca07bfc0e2caf8487dc2342e80f
3,659,589
def drop_redundant_cols(movies_df): """ Drop the following redundant columns: 1. `release_data_wiki` - after dropping the outlier 2. `revenue` - after using it to fill `box_office` missing values 3. `budget_kaggle` - after using it to fill `budget_wiki` missing values 4. `duration` - after using it to fill `runtime` missing values Parameters ---------- movies_df : Pandas dataframe Joined movie data Returns ------- Pandas dataframe Movie data with redundant columns dropped """ # Drop record with `release_date` outlier and `release_date_wiki` column outlier_index = movies_df.loc[(movies_df['release_date_wiki'] > '2000') & (movies_df['release_date_kaggle'] < '1960')].index movies_df.drop(outlier_index, inplace=True) movies_df.drop('release_date_wiki', axis=1, inplace=True) # Pairs of redundant columns redundant_pairs = [ ['box_office', 'revenue'], ['budget_wiki', 'budget_kaggle'], ['runtime', 'duration'] ] # Fill the first column and drop the second column for each pair for a, b in redundant_pairs: movies_df = filla_dropb(a, b, movies_df) return movies_df
f4fb2c98eafc4ec9074cbc659510af30c7155b9c
3,659,590
import pickle import gzip import collections import re import fnmatch def get_msids_for_add_msids(opt, logger): """ Parse MSIDs spec file (opt.add_msids) and return corresponding list of MSIDs. This implements support for a MSID spec file like:: # MSIDs that match the name or pattern are included, where * matches # anything (0 or more characters) while ? matches exactly one character: # aopcadm? aacccd* # MSIDs with the same subsystem and sampling rate as given MSIDs are included. # Example: */1wrat gives all acis4eng engineering telemetry. */1wrat # MSIDs with the same subsystem regardless of sampling rate. # Example: **/3tscpos gives all engineering SIM telemetry **/3tscpos :param opt: options :param logger: logger :return: msids_out, msids_content (mapping of MSID to content type) """ logger.info(f'Reading available cheta archive MSIDs from {opt.sync_root}') with get_readable(opt.sync_root, opt.is_url, sync_files['msid_contents']) as (tmpfile, uri): if tmpfile is None: # If index_file is not found then get_readable returns None logger.info(f'No cheta MSIDs list file found at{uri}') return None logger.info(f'Reading cheta MSIDs list file {uri}') msids_content = pickle.load(gzip.open(tmpfile, 'rb')) content_msids = collections.defaultdict(list) for msid, content in msids_content.items(): content_msids[content].append(msid) logger.info(f'Reading MSID specs from {opt.add_msids}') with open(opt.add_msids) as fh: lines = [line.strip() for line in fh.readlines()] msid_specs = [line.upper() for line in lines if (line and not line.startswith('#'))] logger.info('Assembling list of MSIDs that match MSID specs') msids_out = [] for msid_spec in msid_specs: if msid_spec.startswith('**/'): msid_spec = msid_spec[3:] content = msids_content[msid_spec] subsys = re.match(r'([^\d]+)', content).group(1) for content, msids in content_msids.items(): if content.startswith(subsys): logger.info(f' Found {len(msids)} MSIDs for **/{msid_spec} with ' f'content = {content}') msids_out.extend(msids) elif msid_spec.startswith('*/'): msid_spec = msid_spec[2:] content = msids_content[msid_spec] msids = content_msids[content] logger.info(f' Found {len(msids)} MSIDs for */{msid_spec} with ' f'content = {content}') msids_out.extend(msids) else: msids = [msid for msid in msids_content if fnmatch(msid, msid_spec)] if not msids: raise ValueError(f'no MSID matching {msid} (remember derived params like PITCH ' 'must be written as"dp_<MSID>"') logger.info(f' Found {len(msids)} MSIDs for {msid_spec}') msids_out.extend(msids) logger.info(f' Found {len(msids_out)} matching MSIDs total') return msids_out, msids_content
b7f2a5b9f1452c8f43684223313716253e27848b
3,659,591
def gaussian_similarity(stimulus_representation, i, j, w, c, r): """ Function that calculates and returns the gaussian similarity of stimuli i and j (equation 4b in [Noso86]_) Parameters ---------- stimulus_representation : np.array The stimuli are given to this function in the form of a n x N matrix, where n is the number of stimuli and N is the number of dimensions of each stimuli in the psychological space i : int Stimulus i j : int Stimulus j w : list This is the list of weights corresponding to each dimension of the stimulus in the psychological space c : int This is the scale parameter used in the distance calculation r : int This is the Minkowski's distance metric. A value of 1 corresponds to city-block metric (generally used when the stimuli has separable dimensions) ; A value of 2 corresponds to eucledian distance metric (generally used when the stimuli has integral dimensions) Returns ------- np.float64 The Gaussian similarity between the two stimulus """ def distance(): """ Calculates the distance between two stimulus (equation 6 in [Noso86]_) Returns ------- np.float64 Distance scaled by the scale parameter 'c' """ sum = 0.0 N = np.shape(stimulus_representation)[1] for idx in range(N): sum += (w[idx] * (stimulus_representation[i, idx] - stimulus_representation[j, idx]) ** r) sum = sum ** (1 / r) return c * sum return np.exp(-(distance()) ** 2)
e1436a26d4f028f237e03d40590cb6b7405b3f16
3,659,592
def windShearVector(u, v, top, bottom, unit=None): """ calculate the u and v layer difference and return as vector """ udiff = layerDiff(u, top, bottom, unit) vdiff = layerDiff(v, top, bottom, unit) return makeVector(udiff, vdiff)
fa9fe1869621c04f00004a8a5c01e78d4faa3221
3,659,593
def withdraw_entry(contest): """Withdraws a submitted entry from the contest. After this step the submitted entry will be seen as a draft. """ return _update_sketch(contest, code=None, action="withdraw")
fdedfeb61e0fe3b47918b66ca1f9dfd56450e39c
3,659,594
def _conditional_field(if_, condition, colon, comment, eol, indent, body, dedent): """Formats an `if` construct.""" del indent, dedent # Unused # The body of an 'if' should be columnized with the surrounding blocks, so # much like an inline 'bits', its body is treated as an inline list of blocks. header_row = _Row('if', ['{} {}{} {}'.format(if_, condition, colon, comment)]) indented_body = _indent_blocks(body) assert indented_body, 'Expected body of if condition.' return [_Block([header_row] + eol + indented_body[0].prefix, indented_body[0].header, indented_body[0].body)] + indented_body[1:]
09c357659d5f78946d74cddc83f3e4c5c9cad0ed
3,659,595
def check_sum_cases(nation='England'): """check total data""" ck=LocalLatest() fail=False data=ck.data.get('data') latest={} data=clean_cases(data) #repair glitches #check latest data matches stored data for nation for i in data: _code=i['areaCode'] latest[_code]=i try: _nation=ons_week.nation[_code] except Exception as e: log.error(e) log.error(i['areaName']) continue if _nation==nation: if _code in ons_week.stored_names: place=ons_week.stored_names[_code] _total=DailyCases.objects.filter(areaname=place).aggregate(Max('totalLabConfirmedCases')).get('totalLabConfirmedCases__max') _latest=i['cumCasesByPublishDate'] if _total !=_latest: print(f'Mismatch: {place} Latest total{_latest} != stored {_total}') fail=True else: #print(f'{place} up to date') pass else: place=i['areaName'] print(f'{place} not counted / not in TR tally') sumtotal=0 for _code in ons_week.stored_names: if ons_week.nation[_code]==nation: i=latest.get(_code) if i: _latest=i['cumCasesByPublishDate'] _total=DailyCases.objects.filter(areacode=_code).aggregate(Max('totalLabConfirmedCases')).get('totalLabConfirmedCases__max') if _latest!=_total: print(f'Mismatch: {_code} Latest total{_latest} != stored {_total}') else: if _latest: sumtotal +=_latest else: print(f'Missing place {_code} in PHE published cases') print(f'Sum total of stored names for {nation} is {sumtotal}') return fail
5f30d4a856c21c1397e2f1cdd9a0ee03d026b5a2
3,659,596
def get_module_name() -> str: """Gets the name of the module that called a function Is meant to be used within a function. :returns: The name of the module that called your function """ return getmodulename(stack()[2][1])
12541aa8445ebd796657d76d3001523882202ea0
3,659,597
def elements_counter(arr, count=0): """递归计算列表包含的元素数 Arguments: arr {[list]} -- [列表] Keyword Arguments: count {int} -- [列表包含的元素数] (default: {0}) Returns: [int] -- [列表包含的元素数] """ if len(arr): arr.pop(0) count += 1 return elements_counter(arr, count) return count
80809781fd2d6a7a2fa92a4b7d5713771a07f8eb
3,659,599
from typing import Dict def dataset_is_open_data(dataset: Dict) -> bool: """Check if dataset is tagged as open data.""" is_open_data = dataset.get("isOpenData") if is_open_data: return is_open_data["value"] == "true" return False
fc1591d4a045ba904658bb93577a364145492465
3,659,600
def _remove_suffix_apple(path): """ Strip off .so or .dylib. >>> _remove_suffix_apple("libpython.so") 'libpython' >>> _remove_suffix_apple("libpython.dylib") 'libpython' >>> _remove_suffix_apple("libpython3.7") 'libpython3.7' """ if path.endswith(".dylib"): return path[:-len(".dylib")] if path.endswith(".so"): return path[:-len(".so")] return path
c5526b0f3420625c2efeba225187f72c7a51fb4b
3,659,601
def sparsenet201(**kwargs): """ SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=201, model_name="sparsenet201", **kwargs)
83de415e043876ae90dd3f79c3ca26dd82d8c5df
3,659,602
def alt_text_to_curly_bracket(text): """ Converts the text that appears in the alt attribute of image tags from gatherer to a curly-bracket mana notation. ex: 'Green'->{G}, 'Blue or Red'->{U/R} 'Variable Colorless' -> {XC} 'Colorless' -> {C} 'N colorless' -> {N}, where N is some number """ def convert_color_to_letter(color): if color.lower() not in ('red', 'white', 'blue', 'green', 'black', 'colorless', 'tap', 'energy'): # some cards have weird split mana costs where you can pay N colorless # or one of a specific color. # Since we're ending up here, and what we're given isn't a color, lets assume its N return color else: if color.lower() == 'blue': return 'U' else: return color[0].upper() try: val = int(text, 10) except Exception: pass else: # This is just a number. Easy enough. return f"{{{text}}}" if ' or ' in text: # this is a compound color, not as easy to deal with. text = text.replace('or', '') text = '/'.join([convert_color_to_letter(x) for x in text.split()]) else: if 'Variable' in text: text = 'X' else: # hopefully all that's left is just simple color symbols. text = convert_color_to_letter(text) # at this point we've hopefully return f"{{{text}}}"
c604b236a8d0baeff244e0e246176a406674c9e2
3,659,603
def massage_primary(repo_primary, src_cache, cdt): """ Massages the result of dictify() into a less cumbersome form. In particular: 1. There are many lists that can only be of length one that don't need to be lists at all. 2. The '_text' entries need to go away. 3. The real information starts at ['metadata']['package'] 4. We want the top-level key to be the package name and under that, an entry for each arch for which the package exists. """ new_dict = dict({}) for package in repo_primary['metadata']['package']: name = package['name'][0]['_text'] arch = package['arch'][0]['_text'] if arch == 'src': continue checksum = package['checksum'][0]['_text'] source = package['format'][0]['{rpm}sourcerpm'][0]['_text'] # If you need to check if the sources exist (perhaps you've got the source URL wrong # or the distro has forgotten to copy them?): # import requests # sbase_url = cdt['sbase_url'] # surl = sbase_url + source # print("{} {}".format(requests.head(surl).status_code, surl)) location = package['location'][0]['href'] version = package['version'][0] summary = package['summary'][0]['_text'] try: description = package['description'][0]['_text'] except: description = "NA" if '_text' in package['url'][0]: url = package['url'][0]['_text'] else: url = '' license = package['format'][0]['{rpm}license'][0]['_text'] try: provides = package['format'][0]['{rpm}provides'][0]['{rpm}entry'] provides = massage_primary_requires(provides, cdt) except: provides = [] try: requires = package['format'][0]['{rpm}requires'][0]['{rpm}entry'] requires = massage_primary_requires(requires, cdt) except: requires = [] new_package = dict({'checksum': checksum, 'location': location, 'home': url, 'source': source, 'version': version, 'summary': yaml_quote_string(summary), 'description': description, 'license': license, 'provides': provides, 'requires': requires}) if name in new_dict: if arch in new_dict[name]: print("WARNING: Duplicate packages exist for {} for arch {}".format(name, arch)) new_dict[name][arch] = new_package else: new_dict[name] = dict({arch: new_package}) return new_dict
fd57ff925b46eb5adddee2c180fbc01b3c60ec7c
3,659,604
def ansi_color_name_to_escape_code(name, style="default", cmap=None): """Converts a color name to the inner part of an ANSI escape code""" cmap = _ensure_color_map(style=style, cmap=cmap) if name in cmap: return cmap[name] m = RE_XONSH_COLOR.match(name) if m is None: raise ValueError("{!r} is not a color!".format(name)) parts = m.groupdict() # convert regex match into actual ANSI colors if parts["reset"] is not None: if parts["reset"] == "NO_COLOR": warn_deprecated_no_color() res = "0" elif parts["bghex"] is not None: res = "48;5;" + rgb_to_256(parts["bghex"][3:])[0] elif parts["background"] is not None: color = parts["color"] if "#" in color: res = "48;5;" + rgb_to_256(color[1:])[0] else: fgcolor = cmap[color] if fgcolor.isdecimal(): res = str(int(fgcolor) + 10) elif fgcolor.startswith("38;"): res = "4" + fgcolor[1:] elif fgcolor == "DEFAULT": res = "39" else: msg = ( "when converting {!r}, did not recognize {!r} within " "the following color map as a valid color:\n\n{!r}" ) raise ValueError(msg.format(name, fgcolor, cmap)) else: # have regular, non-background color mods = parts["modifiers"] if mods is None: mods = [] else: mods = mods.strip("_").split("_") mods = [ANSI_ESCAPE_MODIFIERS[mod] for mod in mods] color = parts["color"] if "#" in color: mods.append("38;5;" + rgb_to_256(color[1:])[0]) elif color == "DEFAULT": res = "39" else: mods.append(cmap[color]) res = ";".join(mods) cmap[name] = res return res
70b8fe19fc34d14c678c9e54890a4da7e0e37c24
3,659,605
import shlex import getopt from datetime import datetime def twitter(bot, message): """#twitter [-p 天数] -p : 几天以前 """ try: cmd, *args = shlex.split(message.text) except ValueError: return False if not cmd[0] in config['trigger']: return False if not cmd[1:] == 'twitter': return False try: options, args = getopt.gnu_getopt(args, 'hp:') except getopt.GetoptError: # 格式不对 reply(bot, message, twitter.__doc__) return True days = 0 for o, a in options: if o == '-p': # 几天以前 try: days = int(a) if days < 0: raise ValueError except ValueError: reply(bot, message, twitter.__doc__) return True elif o == '-h': # 帮助 reply(bot, message, twitter.__doc__) return True tweets = Twitter.objects(Q(date__gte=datetime.now().date()+timedelta(days=-days)) & Q(date__lte=datetime.now().date()+timedelta(days=-days+1))) if tweets: reply(bot, message, '\n---------\n'.join([str(tweet) for tweet in tweets])) return True else: reply(bot, message, '安娜啥都没说...') return True
ba02cebb5a680f26f2eb17c32b62ead9ac3995a3
3,659,606
import logging def get_zones(request): """Returns preprocessed thermal data for a given request or None.""" logging.info("received zone request:", request.building) zones, err = _get_zones(request.building) if err is not None: return None, err grpc_zones = [] for zones in zones: grpc_zones.append( building_zone_names_pb2.NamePoint(name=zones)) return building_zone_names_pb2.Reply(names=grpc_zones), None
b04dca4da5b68faea64744c9c7093a977eb120c1
3,659,608
def proper_classification(sp): """ Uses splat.classifyByStandard to classify spectra using spex standards """ #sp.slitpixelwidth=1 #sp.slitwidth=1 #sp.toInstrument('WFC3-G141') wsp= wisps.Spectrum(wave=sp.wave.value, flux=sp.flux.value, noise=sp.noise.value, contam= np.ones_like(sp.noise.value)) val=wisps.classify(wsp, stripunits=True) return val
31529d96fbc4fec69a5996fb33829be4caf51529
3,659,609
from typing import Tuple import torch def sum_last_4_layers(sequence_outputs: Tuple[torch.Tensor]) -> torch.Tensor: """Sums the last 4 hidden representations of a sequence output of BERT. Args: ----- sequence_output: Tuple of tensors of shape (batch, seq_length, hidden_size). For BERT base, the Tuple has length 13. Returns: -------- summed_layers: Tensor of shape (batch, seq_length, hidden_size) """ last_layers = sequence_outputs[-4:] return torch.stack(last_layers, dim=0).sum(dim=0)
14bba441a116712d1431b1ee6dda33dc5ec4142c
3,659,610
def TotalCust(): """(read-only) Total Number of customers served from this line section.""" return lib.Lines_Get_TotalCust()
58984b853cdd9587c7db5ff6c30b7af20a64985a
3,659,611
import re def extra_normalize(text_orig: str): """ This function allows a simple normalization to the original text to make possible the aligning process. The replacement_patterns were obtained during experimentation with real text it is possible to add more or to get some errors without new rules. :Note: very important, every rule in replacement_patterns do not change the length of the original text, only replace patterns with same length string. This process is different to preProcessFlow. """ replacement_patterns = [(r'[:](?=\s*?\n)','##1'), (r'\xc2|\xa0',' '), (r'(\w\s*?):(?=\s+?[A-Z]+?)|(\w\s*?):(?=\s*?"+?[A-Z]+?)','\g<1>##2'), (r'[?!]','##3'), (r'(\w+?)(\n)(?=["$%()*+&,-/;:¿¡<=>@[\\]^`{|}~\t\s]*(?=.*[A-Z0-9]))','\g<1>##4'), # any alphanumeric char # follow by \n follow by any number of point sign follow by a capital letter, replace by alphanumerig+. (r'(\w+?)(\n)(?=["$%()*+&,-/;:¿¡<=>@[\\]^`{|}~\t\s\n]*(?=[a-zA-Z0-9]))','\g<1>##5'),# any alphanumeric char # follow by \n follow by any number of point sign follow by a letter, replace by alphanumerig+. (r'[:](?=\s*?)(?=["$%()*+&,-/;:¿¡<=>@[\\]^`{|}~\t\s]*[A-Z]+?)','##6'), (r'(\w+?\s*?)\|','\g<1>##7'), (r'\n(?=\s*?[A-Z]+?)','##8'), (r'##\d','apdbx'), ] for (pattern, repl) in replacement_patterns: (text_orig, count) = re.subn(pattern, repl, text_orig) text_orig = replace_dot_sequence(text_orig) text_orig = multipart_words(text_orig) text_orig = abbreviations(text_orig) text_orig = re.sub(r'apdbx+','.', text_orig) text_orig = add_doc_ending_point(text_orig)#append . final si el último caracter no tiene punto, evita un ciclo infinito al final. return text_orig
d06ee939c8035cd7b83ed7f1577b383bfcaf203d
3,659,612
def list2str(lst: list) -> str: """ 将 list 内的元素转化为字符串,使得打印时能够按行输出并在前面加上序号(从1开始) e.g. In: lst = [a,b,c] str = list2str(lst) print(str) Out: 1. a 2. b 3. c """ i = 1 res_list = [] for x in lst: res_list.append(str(i)+'. '+str(x)) i += 1 res_str = '\n'.join(res_list) return res_str
3da11748d650e234c082255b8d7dff5e56e65732
3,659,613
def _prompt_save(): # pragma: no cover """Show a prompt asking the user whether he wants to save or not. Output is 'save', 'cancel', or 'close' """ b = prompt( "Do you want to save your changes before quitting?", buttons=['save', 'cancel', 'close'], title='Save') return show_box(b)
859cbbe94ef35bf434b1c4f6cac9ec61a6311fb8
3,659,614
def plot_dataset_samples_1d( dataset, n_samples=10, title="Dataset", figsize=DFLT_FIGSIZE, ax=None, plot_config_kwargs={}, seed=123, ): """Plot `n_samples` samples of the a datset.""" np.random.seed(seed) with plot_config(plot_config_kwargs): if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) alpha = 0.5 + 1 / (n_samples ** 0.5 + 1) for i in range(n_samples): x, y = dataset[np.random.randint(len(dataset))] x = rescale_range(x, (-1, 1), dataset.min_max) ax.plot(x.numpy(), y.numpy(), alpha=alpha) ax.set_xlim(*dataset.min_max) if title is not None: ax.set_title(title, fontsize=14) return ax
41b34e276a0236d46e13d7b0f24797e739384661
3,659,615
def list_versions(namespace, name, provider): """List version for mnodule. Args: namespace (str): namespace for the version name (str): Name of the module provider (str): Provider for the module Returns: response: JSON formatted respnse """ try: return make_response(backend.get_versions(namespace, name, provider), 200) except ModuleNotFoundException as module_not_found: return make_response(module_not_found.message, 404)
dca0c24f391cce69a10fe7e61165647c9ce1cf66
3,659,616
import requests def script_cbor(self, script_hash: str, **kwargs): """ CBOR representation of a plutus script https://docs.blockfrost.io/#tag/Cardano-Scripts/paths/~1scripts~1{script_hash}~1cbor/get :param script_hash: Hash of the script. :type script_hash: str :param return_type: Optional. "object", "json" or "pandas". Default: "object". :type return_type: str :returns A list of ScriptCborResponse objects. :rtype [ScriptCborResponse] :raises ApiError: If API fails :raises Exception: If the API response is somehow malformed. """ return requests.get( url=f"{self.url}/scripts/{script_hash}/cbor", headers=self.default_headers )
fdb71d1e95d67da4a18552f4e42a28e27c7ab95a
3,659,617
def ithOfNPointsOnCircleY(i,n,r): """ return x coordinate of ith value of n points on circle of radius r points are numbered from 0 through n-1, spread counterclockwise around circle point 0 is at angle 0, as of on a unit circle, i.e. at point (0,r) """ # Hints: similar to ithOfNPointsOnCircleX, but use r sin (theta) return "stub"
d4e697145423146b085f8423315c795745498afd
3,659,618
def get_tags(ec2id, ec2type, region): """ get tags return tags (json) """ mytags = [] ec2 = connect('ec2', region) if ec2type == 'volume': response = ec2.describe_volumes(VolumeIds=[ec2id]) if 'Tags' in response['Volumes'][0]: mytags = response['Volumes'][0]['Tags'] elif ec2type == 'snapshot': response = ec2.describe_snapshots(SnapshotIds=[ec2id]) if 'Tags' in response['Snapshots'][0]: mytags = response['Snapshots'][0]['Tags'] return mytags
c150d83b6563f79140febb65a4ea7e50bc733286
3,659,619
def parse(data, raw=False, quiet=False): """ Main text parsing function Parameters: data: (string) text data to parse raw: (boolean) unprocessed output if True quiet: (boolean) suppress warning messages if True Returns: Dictionary. Raw or processed structured data. """ jc.utils.compatibility(__name__, info.compatible, quiet) jc.utils.input_type_check(data) raw_output = {} if jc.utils.has_data(data): for line in filter(None, data.splitlines()): linedata = line.split(':', maxsplit=1) key = linedata[0].strip().lower().replace(' ', '_').replace('.', '_') value = linedata[1].strip() raw_output[key] = value if raw: return raw_output else: return _process(raw_output)
dd7da8a23a0691dc2df75391c77fa1448362330a
3,659,620
def callNasaApi(date='empty'): """calls NASA APIS Args: date (str, optional): date for nasa APOD API. Defaults to 'empty'. Returns: Dict: custom API response """ print('calling nasa APOD API...') url = nasaInfo['nasa_apod_api_uri'] if date != 'empty': params = getApodEndpointParams('True', date) else: params = getApodEndpointParams('True') response = makeApiCall(url, params, HttpMethods.get.value) return response
5eaa7fe9434c608df47828c8ce19d4e5e5cfe799
3,659,622
def train_reduced_model(x_values: np.ndarray, y_values: np.ndarray, n_components: int, seed: int, max_iter: int = 10000) -> sklearn.base.BaseEstimator: """ Train a reduced-quality model by putting a Gaussian random projection in front of the multinomial logistic regression stage of the pipeline. :param x_values: input embeddings for training set :param y_values: integer labels corresponding to embeddings :param n_components: Number of dimensions to reduce the embeddings to :param seed: Random seed to drive Gaussian random projection :param max_iter: Maximum number of iterations of L-BGFS to run. The default value of 10000 will achieve a tight fit but takes a while. :returns A model (Python object with a `predict()` method) fit on the input training data with the specified level of dimension reduction by random projection. """ reduce_pipeline = sklearn.pipeline.Pipeline([ ("dimred", sklearn.random_projection.GaussianRandomProjection( n_components=n_components, random_state=seed )), ("mlogreg", sklearn.linear_model.LogisticRegression( multi_class="multinomial", max_iter=max_iter )) ]) print(f"Training model with n_components={n_components} and seed={seed}.") return reduce_pipeline.fit(x_values, y_values)
ab2871875c751b5d7abb56991a55607e79c17e6e
3,659,623
def pv(array): """Return the PV value of the valid elements of an array. Parameters ---------- array : `numpy.ndarray` array of values Returns ------- `float` PV of the array """ non_nan = np.isfinite(array) return array[non_nan].max() - array[non_nan].min()
987ae80fa68cd1dee3e179975b283bb7f48dd2aa
3,659,624
def format_bad_frames(bad_frames): """Create an array of bad frame indices from string loaded from yml file.""" if bad_frames == "": bads = [] else: try: bads = [x.split("-") for x in bad_frames.split(",")] bads = [[int(x) for x in y] for y in bads] bads = np.concatenate( [ np.array(x) if len(x) == 1 else np.arange(x[0], x[1] + 1) for x in bads ] ) except: bads = [] bads = list(bads) bads = [x.item() for x in bads] return bads
433bcba8cc8bf7985a8103d595759d04099dce6a
3,659,625