Dirus/GPTOWN
Updated
repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
has2k1/mizani | mizani/utils.py | https://github.com/has2k1/mizani/blob/312d0550ee0136fd1b0384829b33f3b2065f47c8/mizani/utils.py#L257-L277 | def same_log10_order_of_magnitude(x, delta=0.1):
"""
Return true if range is approximately in same order of magnitude
For example these sequences are in the same order of magnitude:
- [1, 8, 5] # [1, 10)
- [35, 20, 80] # [10 100)
- [232, 730] # [100, 1000)
Parameters
----------
x : array-like
Values in base 10. Must be size 2 and
``rng[0] <= rng[1]``.
delta : float
Fuzz factor for approximation. It is multiplicative.
"""
dmin = np.log10(np.min(x)*(1-delta))
dmax = np.log10(np.max(x)*(1+delta))
return np.floor(dmin) == np.floor(dmax) | [
"def",
"same_log10_order_of_magnitude",
"(",
"x",
",",
"delta",
"=",
"0.1",
")",
":",
"dmin",
"=",
"np",
".",
"log10",
"(",
"np",
".",
"min",
"(",
"x",
")",
"*",
"(",
"1",
"-",
"delta",
")",
")",
"dmax",
"=",
"np",
".",
"log10",
"(",
"np",
".",
"max",
"(",
"x",
")",
"*",
"(",
"1",
"+",
"delta",
")",
")",
"return",
"np",
".",
"floor",
"(",
"dmin",
")",
"==",
"np",
".",
"floor",
"(",
"dmax",
")"
] | Return true if range is approximately in same order of magnitude
For example these sequences are in the same order of magnitude:
- [1, 8, 5] # [1, 10)
- [35, 20, 80] # [10 100)
- [232, 730] # [100, 1000)
Parameters
----------
x : array-like
Values in base 10. Must be size 2 and
``rng[0] <= rng[1]``.
delta : float
Fuzz factor for approximation. It is multiplicative. | [
"Return",
"true",
"if",
"range",
"is",
"approximately",
"in",
"same",
"order",
"of",
"magnitude"
] | python | valid | 29.619048 |
gitpython-developers/GitPython | git/compat.py | https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/compat.py#L161-L185 | def surrogateescape_handler(exc):
"""
Pure Python implementation of the PEP 383: the "surrogateescape" error
handler of Python 3. Undecodable bytes will be replaced by a Unicode
character U+DCxx on decoding, and these are translated into the
original bytes on encoding.
"""
mystring = exc.object[exc.start:exc.end]
try:
if isinstance(exc, UnicodeDecodeError):
# mystring is a byte-string in this case
decoded = replace_surrogate_decode(mystring)
elif isinstance(exc, UnicodeEncodeError):
# In the case of u'\udcc3'.encode('ascii',
# 'this_surrogateescape_handler'), both Python 2.x and 3.x raise an
# exception anyway after this function is called, even though I think
# it's doing what it should. It seems that the strict encoder is called
# to encode the unicode string that this function returns ...
decoded = replace_surrogate_encode(mystring, exc)
else:
raise exc
except NotASurrogateError:
raise exc
return (decoded, exc.end) | [
"def",
"surrogateescape_handler",
"(",
"exc",
")",
":",
"mystring",
"=",
"exc",
".",
"object",
"[",
"exc",
".",
"start",
":",
"exc",
".",
"end",
"]",
"try",
":",
"if",
"isinstance",
"(",
"exc",
",",
"UnicodeDecodeError",
")",
":",
"# mystring is a byte-string in this case",
"decoded",
"=",
"replace_surrogate_decode",
"(",
"mystring",
")",
"elif",
"isinstance",
"(",
"exc",
",",
"UnicodeEncodeError",
")",
":",
"# In the case of u'\\udcc3'.encode('ascii',",
"# 'this_surrogateescape_handler'), both Python 2.x and 3.x raise an",
"# exception anyway after this function is called, even though I think",
"# it's doing what it should. It seems that the strict encoder is called",
"# to encode the unicode string that this function returns ...",
"decoded",
"=",
"replace_surrogate_encode",
"(",
"mystring",
",",
"exc",
")",
"else",
":",
"raise",
"exc",
"except",
"NotASurrogateError",
":",
"raise",
"exc",
"return",
"(",
"decoded",
",",
"exc",
".",
"end",
")"
] | Pure Python implementation of the PEP 383: the "surrogateescape" error
handler of Python 3. Undecodable bytes will be replaced by a Unicode
character U+DCxx on decoding, and these are translated into the
original bytes on encoding. | [
"Pure",
"Python",
"implementation",
"of",
"the",
"PEP",
"383",
":",
"the",
"surrogateescape",
"error",
"handler",
"of",
"Python",
"3",
".",
"Undecodable",
"bytes",
"will",
"be",
"replaced",
"by",
"a",
"Unicode",
"character",
"U",
"+",
"DCxx",
"on",
"decoding",
"and",
"these",
"are",
"translated",
"into",
"the",
"original",
"bytes",
"on",
"encoding",
"."
] | python | train | 43.52 |
geertj/pyskiplist | pyskiplist/skiplist.py | https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L403-L415 | def index(self, key, default=UNSET):
"""Find the first key-value pair with key *key* and return its position.
If the key is not found, return *default*. If default was not provided,
raise a ``KeyError``
"""
self._find_lt(key)
node = self._path[0][2]
if node is self._tail or key < node[0]:
if default is self.UNSET:
raise KeyError('key {!r} not in list'.format(key))
return default
return self._distance[0] | [
"def",
"index",
"(",
"self",
",",
"key",
",",
"default",
"=",
"UNSET",
")",
":",
"self",
".",
"_find_lt",
"(",
"key",
")",
"node",
"=",
"self",
".",
"_path",
"[",
"0",
"]",
"[",
"2",
"]",
"if",
"node",
"is",
"self",
".",
"_tail",
"or",
"key",
"<",
"node",
"[",
"0",
"]",
":",
"if",
"default",
"is",
"self",
".",
"UNSET",
":",
"raise",
"KeyError",
"(",
"'key {!r} not in list'",
".",
"format",
"(",
"key",
")",
")",
"return",
"default",
"return",
"self",
".",
"_distance",
"[",
"0",
"]"
] | Find the first key-value pair with key *key* and return its position.
If the key is not found, return *default*. If default was not provided,
raise a ``KeyError`` | [
"Find",
"the",
"first",
"key",
"-",
"value",
"pair",
"with",
"key",
"*",
"key",
"*",
"and",
"return",
"its",
"position",
"."
] | python | train | 38.384615 |
pallets/werkzeug | src/werkzeug/debug/tbtools.py | https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/debug/tbtools.py#L548-L554 | def eval(self, code, mode="single"):
"""Evaluate code in the context of the frame."""
if isinstance(code, string_types):
if PY2 and isinstance(code, text_type): # noqa
code = UTF8_COOKIE + code.encode("utf-8")
code = compile(code, "<interactive>", mode)
return eval(code, self.globals, self.locals) | [
"def",
"eval",
"(",
"self",
",",
"code",
",",
"mode",
"=",
"\"single\"",
")",
":",
"if",
"isinstance",
"(",
"code",
",",
"string_types",
")",
":",
"if",
"PY2",
"and",
"isinstance",
"(",
"code",
",",
"text_type",
")",
":",
"# noqa",
"code",
"=",
"UTF8_COOKIE",
"+",
"code",
".",
"encode",
"(",
"\"utf-8\"",
")",
"code",
"=",
"compile",
"(",
"code",
",",
"\"<interactive>\"",
",",
"mode",
")",
"return",
"eval",
"(",
"code",
",",
"self",
".",
"globals",
",",
"self",
".",
"locals",
")"
] | Evaluate code in the context of the frame. | [
"Evaluate",
"code",
"in",
"the",
"context",
"of",
"the",
"frame",
"."
] | python | train | 51 |
genomoncology/related | src/related/converters.py | https://github.com/genomoncology/related/blob/be47c0081e60fc60afcde3a25f00ebcad5d18510/src/related/converters.py#L72-L93 | def to_set_field(cls):
"""
Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter.
"""
class SetConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
values = values or set()
args = {to_model(self.cls, value) for value in values}
return TypedSet(cls=self.cls, args=args)
return SetConverter(cls) | [
"def",
"to_set_field",
"(",
"cls",
")",
":",
"class",
"SetConverter",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"cls",
")",
":",
"self",
".",
"_cls",
"=",
"cls",
"@",
"property",
"def",
"cls",
"(",
"self",
")",
":",
"return",
"resolve_class",
"(",
"self",
".",
"_cls",
")",
"def",
"__call__",
"(",
"self",
",",
"values",
")",
":",
"values",
"=",
"values",
"or",
"set",
"(",
")",
"args",
"=",
"{",
"to_model",
"(",
"self",
".",
"cls",
",",
"value",
")",
"for",
"value",
"in",
"values",
"}",
"return",
"TypedSet",
"(",
"cls",
"=",
"self",
".",
"cls",
",",
"args",
"=",
"args",
")",
"return",
"SetConverter",
"(",
"cls",
")"
] | Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter. | [
"Returns",
"a",
"callable",
"instance",
"that",
"will",
"convert",
"a",
"value",
"to",
"a",
"Sequence",
"."
] | python | train | 27.545455 |
DataDog/integrations-core | tokumx/datadog_checks/tokumx/vendor/pymongo/mongo_client.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/mongo_client.py#L1073-L1086 | def _close_cursor_now(self, cursor_id, address=None):
"""Send a kill cursors message with the given id.
What closing the cursor actually means depends on this client's
cursor manager. If there is none, the cursor is closed synchronously
on the current thread.
"""
if not isinstance(cursor_id, integer_types):
raise TypeError("cursor_id must be an instance of (int, long)")
if self.__cursor_manager is not None:
self.__cursor_manager.close(cursor_id, address)
else:
self._kill_cursors([cursor_id], address, self._get_topology()) | [
"def",
"_close_cursor_now",
"(",
"self",
",",
"cursor_id",
",",
"address",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"cursor_id",
",",
"integer_types",
")",
":",
"raise",
"TypeError",
"(",
"\"cursor_id must be an instance of (int, long)\"",
")",
"if",
"self",
".",
"__cursor_manager",
"is",
"not",
"None",
":",
"self",
".",
"__cursor_manager",
".",
"close",
"(",
"cursor_id",
",",
"address",
")",
"else",
":",
"self",
".",
"_kill_cursors",
"(",
"[",
"cursor_id",
"]",
",",
"address",
",",
"self",
".",
"_get_topology",
"(",
")",
")"
] | Send a kill cursors message with the given id.
What closing the cursor actually means depends on this client's
cursor manager. If there is none, the cursor is closed synchronously
on the current thread. | [
"Send",
"a",
"kill",
"cursors",
"message",
"with",
"the",
"given",
"id",
"."
] | python | train | 44 |
facetoe/zenpy | zenpy/lib/api.py | https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L636-L644 | def request_verification(self, user, identity):
"""
Sends the user a verification email with a link to verify ownership of the email address.
:param user: User id or object
:param identity: Identity id or object
:return: requests Response object
"""
return UserIdentityRequest(self).put(self.endpoint.request_verification, user, identity) | [
"def",
"request_verification",
"(",
"self",
",",
"user",
",",
"identity",
")",
":",
"return",
"UserIdentityRequest",
"(",
"self",
")",
".",
"put",
"(",
"self",
".",
"endpoint",
".",
"request_verification",
",",
"user",
",",
"identity",
")"
] | Sends the user a verification email with a link to verify ownership of the email address.
:param user: User id or object
:param identity: Identity id or object
:return: requests Response object | [
"Sends",
"the",
"user",
"a",
"verification",
"email",
"with",
"a",
"link",
"to",
"verify",
"ownership",
"of",
"the",
"email",
"address",
"."
] | python | train | 43 |
markchil/gptools | gptools/gaussian_process.py | https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/gaussian_process.py#L331-L339 | def free_params(self, value):
"""Set the free parameters. Note that this bypasses enforce_bounds.
"""
value = scipy.asarray(value, dtype=float)
self.K_up_to_date = False
self.k.free_params = value[:self.k.num_free_params]
self.noise_k.free_params = value[self.k.num_free_params:self.k.num_free_params + self.noise_k.num_free_params]
if self.mu is not None:
self.mu.free_params = value[self.k.num_free_params + self.noise_k.num_free_params:] | [
"def",
"free_params",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"scipy",
".",
"asarray",
"(",
"value",
",",
"dtype",
"=",
"float",
")",
"self",
".",
"K_up_to_date",
"=",
"False",
"self",
".",
"k",
".",
"free_params",
"=",
"value",
"[",
":",
"self",
".",
"k",
".",
"num_free_params",
"]",
"self",
".",
"noise_k",
".",
"free_params",
"=",
"value",
"[",
"self",
".",
"k",
".",
"num_free_params",
":",
"self",
".",
"k",
".",
"num_free_params",
"+",
"self",
".",
"noise_k",
".",
"num_free_params",
"]",
"if",
"self",
".",
"mu",
"is",
"not",
"None",
":",
"self",
".",
"mu",
".",
"free_params",
"=",
"value",
"[",
"self",
".",
"k",
".",
"num_free_params",
"+",
"self",
".",
"noise_k",
".",
"num_free_params",
":",
"]"
] | Set the free parameters. Note that this bypasses enforce_bounds. | [
"Set",
"the",
"free",
"parameters",
".",
"Note",
"that",
"this",
"bypasses",
"enforce_bounds",
"."
] | python | train | 55.555556 |
rigetti/pyquil | pyquil/operator_estimation.py | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/operator_estimation.py#L470-L483 | def _local_pauli_eig_meas(op, idx):
"""
Generate gate sequence to measure in the eigenbasis of a Pauli operator, assuming
we are only able to measure in the Z eigenbasis. (Note: The unitary operations of this
Program are essentially the Hermitian conjugates of those in :py:func:`_one_q_pauli_prep`)
"""
if op == 'X':
return Program(RY(-pi / 2, idx))
elif op == 'Y':
return Program(RX(pi / 2, idx))
elif op == 'Z':
return Program()
raise ValueError(f'Unknown operation {op}') | [
"def",
"_local_pauli_eig_meas",
"(",
"op",
",",
"idx",
")",
":",
"if",
"op",
"==",
"'X'",
":",
"return",
"Program",
"(",
"RY",
"(",
"-",
"pi",
"/",
"2",
",",
"idx",
")",
")",
"elif",
"op",
"==",
"'Y'",
":",
"return",
"Program",
"(",
"RX",
"(",
"pi",
"/",
"2",
",",
"idx",
")",
")",
"elif",
"op",
"==",
"'Z'",
":",
"return",
"Program",
"(",
")",
"raise",
"ValueError",
"(",
"f'Unknown operation {op}'",
")"
] | Generate gate sequence to measure in the eigenbasis of a Pauli operator, assuming
we are only able to measure in the Z eigenbasis. (Note: The unitary operations of this
Program are essentially the Hermitian conjugates of those in :py:func:`_one_q_pauli_prep`) | [
"Generate",
"gate",
"sequence",
"to",
"measure",
"in",
"the",
"eigenbasis",
"of",
"a",
"Pauli",
"operator",
"assuming",
"we",
"are",
"only",
"able",
"to",
"measure",
"in",
"the",
"Z",
"eigenbasis",
".",
"(",
"Note",
":",
"The",
"unitary",
"operations",
"of",
"this",
"Program",
"are",
"essentially",
"the",
"Hermitian",
"conjugates",
"of",
"those",
"in",
":",
"py",
":",
"func",
":",
"_one_q_pauli_prep",
")"
] | python | train | 37.357143 |
Eyepea/systemDream | src/systemdream/journal/handler.py | https://github.com/Eyepea/systemDream/blob/018fa5e9ff0f4fdc62fa85b235725d0f8b24f1a8/src/systemdream/journal/handler.py#L109-L137 | def emit(self, record):
"""Write record as journal event.
MESSAGE is taken from the message provided by the
user, and PRIORITY, LOGGER, THREAD_NAME,
CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be
used if present.
"""
if record.args and isinstance(record.args, collections.Mapping):
extra = dict(self._extra, **record.args) # Merge metadata from handler and record
else:
extra = self._extra
try:
msg = self.format(record)
pri = self.mapPriority(record.levelno)
mid = getattr(record, 'MESSAGE_ID', None)
send(msg,
SOCKET=self.socket,
MESSAGE_ID=mid,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**extra)
except Exception:
self.handleError(record) | [
"def",
"emit",
"(",
"self",
",",
"record",
")",
":",
"if",
"record",
".",
"args",
"and",
"isinstance",
"(",
"record",
".",
"args",
",",
"collections",
".",
"Mapping",
")",
":",
"extra",
"=",
"dict",
"(",
"self",
".",
"_extra",
",",
"*",
"*",
"record",
".",
"args",
")",
"# Merge metadata from handler and record",
"else",
":",
"extra",
"=",
"self",
".",
"_extra",
"try",
":",
"msg",
"=",
"self",
".",
"format",
"(",
"record",
")",
"pri",
"=",
"self",
".",
"mapPriority",
"(",
"record",
".",
"levelno",
")",
"mid",
"=",
"getattr",
"(",
"record",
",",
"'MESSAGE_ID'",
",",
"None",
")",
"send",
"(",
"msg",
",",
"SOCKET",
"=",
"self",
".",
"socket",
",",
"MESSAGE_ID",
"=",
"mid",
",",
"PRIORITY",
"=",
"format",
"(",
"pri",
")",
",",
"LOGGER",
"=",
"record",
".",
"name",
",",
"THREAD_NAME",
"=",
"record",
".",
"threadName",
",",
"CODE_FILE",
"=",
"record",
".",
"pathname",
",",
"CODE_LINE",
"=",
"record",
".",
"lineno",
",",
"CODE_FUNC",
"=",
"record",
".",
"funcName",
",",
"*",
"*",
"extra",
")",
"except",
"Exception",
":",
"self",
".",
"handleError",
"(",
"record",
")"
] | Write record as journal event.
MESSAGE is taken from the message provided by the
user, and PRIORITY, LOGGER, THREAD_NAME,
CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be
used if present. | [
"Write",
"record",
"as",
"journal",
"event",
"."
] | python | train | 37.896552 |
chemlab/chemlab | chemlab/utils/pbc.py | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/utils/pbc.py#L108-L120 | def periodic_distance(a, b, periodic):
'''
Periodic distance between two arrays. Periodic is a 3
dimensional array containing the 3 box sizes.
'''
a = np.array(a)
b = np.array(b)
periodic = np.array(periodic)
delta = np.abs(a - b)
delta = np.where(delta > 0.5 * periodic, periodic - delta, delta)
return np.sqrt((delta ** 2).sum(axis=-1)) | [
"def",
"periodic_distance",
"(",
"a",
",",
"b",
",",
"periodic",
")",
":",
"a",
"=",
"np",
".",
"array",
"(",
"a",
")",
"b",
"=",
"np",
".",
"array",
"(",
"b",
")",
"periodic",
"=",
"np",
".",
"array",
"(",
"periodic",
")",
"delta",
"=",
"np",
".",
"abs",
"(",
"a",
"-",
"b",
")",
"delta",
"=",
"np",
".",
"where",
"(",
"delta",
">",
"0.5",
"*",
"periodic",
",",
"periodic",
"-",
"delta",
",",
"delta",
")",
"return",
"np",
".",
"sqrt",
"(",
"(",
"delta",
"**",
"2",
")",
".",
"sum",
"(",
"axis",
"=",
"-",
"1",
")",
")"
] | Periodic distance between two arrays. Periodic is a 3
dimensional array containing the 3 box sizes. | [
"Periodic",
"distance",
"between",
"two",
"arrays",
".",
"Periodic",
"is",
"a",
"3",
"dimensional",
"array",
"containing",
"the",
"3",
"box",
"sizes",
"."
] | python | train | 28.307692 |
eerimoq/bitstruct | bitstruct.py | https://github.com/eerimoq/bitstruct/blob/8e887c10241aa51c2a77c10e9923bb3978b15bcb/bitstruct.py#L589-L597 | def unpack_from_dict(fmt, names, data, offset=0):
"""Same as :func:`~bitstruct.unpack_from_dict()`, but returns a
dictionary.
See :func:`~bitstruct.pack_dict()` for details on `names`.
"""
return CompiledFormatDict(fmt, names).unpack_from(data, offset) | [
"def",
"unpack_from_dict",
"(",
"fmt",
",",
"names",
",",
"data",
",",
"offset",
"=",
"0",
")",
":",
"return",
"CompiledFormatDict",
"(",
"fmt",
",",
"names",
")",
".",
"unpack_from",
"(",
"data",
",",
"offset",
")"
] | Same as :func:`~bitstruct.unpack_from_dict()`, but returns a
dictionary.
See :func:`~bitstruct.pack_dict()` for details on `names`. | [
"Same",
"as",
":",
"func",
":",
"~bitstruct",
".",
"unpack_from_dict",
"()",
"but",
"returns",
"a",
"dictionary",
"."
] | python | valid | 29.666667 |
gtaylor/paypal-python | paypal/countries.py | https://github.com/gtaylor/paypal-python/blob/aa7a987ea9e9b7f37bcd8a8b54a440aad6c871b1/paypal/countries.py#L254-L273 | def is_valid_country_abbrev(abbrev, case_sensitive=False):
"""
Given a country code abbreviation, check to see if it matches the
country table.
abbrev: (str) Country code to evaluate.
case_sensitive: (bool) When True, enforce case sensitivity.
Returns True if valid, False if not.
"""
if case_sensitive:
country_code = abbrev
else:
country_code = abbrev.upper()
for code, full_name in COUNTRY_TUPLES:
if country_code == code:
return True
return False | [
"def",
"is_valid_country_abbrev",
"(",
"abbrev",
",",
"case_sensitive",
"=",
"False",
")",
":",
"if",
"case_sensitive",
":",
"country_code",
"=",
"abbrev",
"else",
":",
"country_code",
"=",
"abbrev",
".",
"upper",
"(",
")",
"for",
"code",
",",
"full_name",
"in",
"COUNTRY_TUPLES",
":",
"if",
"country_code",
"==",
"code",
":",
"return",
"True",
"return",
"False"
] | Given a country code abbreviation, check to see if it matches the
country table.
abbrev: (str) Country code to evaluate.
case_sensitive: (bool) When True, enforce case sensitivity.
Returns True if valid, False if not. | [
"Given",
"a",
"country",
"code",
"abbreviation",
"check",
"to",
"see",
"if",
"it",
"matches",
"the",
"country",
"table",
"."
] | python | train | 25.75 |
synw/dataswim | dataswim/charts/__init__.py | https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/__init__.py#L301-L311 | def mline_point_(self, col, x=None, y=None, rsum=None, rmean=None):
"""
Splits a column into multiple series based on the column's
unique values. Then visualize theses series in a chart.
Parameters: column to split, x axis column, y axis column
Optional: rsum="1D" to resample and sum data an rmean="1D"
to mean the data
"""
line = self._multiseries(col, x, y, "line", rsum, rmean)
point = self._multiseries(col, x, y, "point", rsum, rmean)
return line * point | [
"def",
"mline_point_",
"(",
"self",
",",
"col",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"rsum",
"=",
"None",
",",
"rmean",
"=",
"None",
")",
":",
"line",
"=",
"self",
".",
"_multiseries",
"(",
"col",
",",
"x",
",",
"y",
",",
"\"line\"",
",",
"rsum",
",",
"rmean",
")",
"point",
"=",
"self",
".",
"_multiseries",
"(",
"col",
",",
"x",
",",
"y",
",",
"\"point\"",
",",
"rsum",
",",
"rmean",
")",
"return",
"line",
"*",
"point"
] | Splits a column into multiple series based on the column's
unique values. Then visualize theses series in a chart.
Parameters: column to split, x axis column, y axis column
Optional: rsum="1D" to resample and sum data an rmean="1D"
to mean the data | [
"Splits",
"a",
"column",
"into",
"multiple",
"series",
"based",
"on",
"the",
"column",
"s",
"unique",
"values",
".",
"Then",
"visualize",
"theses",
"series",
"in",
"a",
"chart",
".",
"Parameters",
":",
"column",
"to",
"split",
"x",
"axis",
"column",
"y",
"axis",
"column",
"Optional",
":",
"rsum",
"=",
"1D",
"to",
"resample",
"and",
"sum",
"data",
"an",
"rmean",
"=",
"1D",
"to",
"mean",
"the",
"data"
] | python | train | 42.727273 |
google/brotli | research/brotlidump.py | https://github.com/google/brotli/blob/4b2b2d4f83ffeaac7708e44409fe34896a01a278/research/brotlidump.py#L1578-L1615 | def processStream(self):
"""Process a brotli stream.
"""
print('addr hex{:{}s}binary context explanation'.format(
'', self.width-10))
print('Stream header'.center(60, '-'))
self.windowSize = self.verboseRead(WindowSizeAlphabet())
print('Metablock header'.center(60, '='))
self.ISLAST = False
self.output = bytearray()
while not self.ISLAST:
self.ISLAST = self.verboseRead(
BoolCode('LAST', description="Last block"))
if self.ISLAST:
if self.verboseRead(
BoolCode('EMPTY', description="Empty block")): break
if self.metablockLength(): continue
if not self.ISLAST and self.uncompressed(): continue
print('Block type descriptors'.center(60, '-'))
self.numberOfBlockTypes = {}
self.currentBlockCounts = {}
self.blockTypeCodes = {}
self.blockCountCodes = {}
for blockType in (L,I,D): self.blockType(blockType)
print('Distance code parameters'.center(60, '-'))
self.NPOSTFIX, self.NDIRECT = self.verboseRead(DistanceParamAlphabet())
self.readLiteralContextModes()
print('Context maps'.center(60, '-'))
self.cmaps = {}
#keep the number of each kind of prefix tree for the last loop
numberOfTrees = {I: self.numberOfBlockTypes[I]}
for blockType in (L,D):
numberOfTrees[blockType] = self.contextMap(blockType)
print('Prefix code lists'.center(60, '-'))
self.prefixCodes = {}
for blockType in (L,I,D):
self.readPrefixArray(blockType, numberOfTrees[blockType])
self.metablock() | [
"def",
"processStream",
"(",
"self",
")",
":",
"print",
"(",
"'addr hex{:{}s}binary context explanation'",
".",
"format",
"(",
"''",
",",
"self",
".",
"width",
"-",
"10",
")",
")",
"print",
"(",
"'Stream header'",
".",
"center",
"(",
"60",
",",
"'-'",
")",
")",
"self",
".",
"windowSize",
"=",
"self",
".",
"verboseRead",
"(",
"WindowSizeAlphabet",
"(",
")",
")",
"print",
"(",
"'Metablock header'",
".",
"center",
"(",
"60",
",",
"'='",
")",
")",
"self",
".",
"ISLAST",
"=",
"False",
"self",
".",
"output",
"=",
"bytearray",
"(",
")",
"while",
"not",
"self",
".",
"ISLAST",
":",
"self",
".",
"ISLAST",
"=",
"self",
".",
"verboseRead",
"(",
"BoolCode",
"(",
"'LAST'",
",",
"description",
"=",
"\"Last block\"",
")",
")",
"if",
"self",
".",
"ISLAST",
":",
"if",
"self",
".",
"verboseRead",
"(",
"BoolCode",
"(",
"'EMPTY'",
",",
"description",
"=",
"\"Empty block\"",
")",
")",
":",
"break",
"if",
"self",
".",
"metablockLength",
"(",
")",
":",
"continue",
"if",
"not",
"self",
".",
"ISLAST",
"and",
"self",
".",
"uncompressed",
"(",
")",
":",
"continue",
"print",
"(",
"'Block type descriptors'",
".",
"center",
"(",
"60",
",",
"'-'",
")",
")",
"self",
".",
"numberOfBlockTypes",
"=",
"{",
"}",
"self",
".",
"currentBlockCounts",
"=",
"{",
"}",
"self",
".",
"blockTypeCodes",
"=",
"{",
"}",
"self",
".",
"blockCountCodes",
"=",
"{",
"}",
"for",
"blockType",
"in",
"(",
"L",
",",
"I",
",",
"D",
")",
":",
"self",
".",
"blockType",
"(",
"blockType",
")",
"print",
"(",
"'Distance code parameters'",
".",
"center",
"(",
"60",
",",
"'-'",
")",
")",
"self",
".",
"NPOSTFIX",
",",
"self",
".",
"NDIRECT",
"=",
"self",
".",
"verboseRead",
"(",
"DistanceParamAlphabet",
"(",
")",
")",
"self",
".",
"readLiteralContextModes",
"(",
")",
"print",
"(",
"'Context maps'",
".",
"center",
"(",
"60",
",",
"'-'",
")",
")",
"self",
".",
"cmaps",
"=",
"{",
"}",
"#keep the number of each kind of prefix tree for the last loop",
"numberOfTrees",
"=",
"{",
"I",
":",
"self",
".",
"numberOfBlockTypes",
"[",
"I",
"]",
"}",
"for",
"blockType",
"in",
"(",
"L",
",",
"D",
")",
":",
"numberOfTrees",
"[",
"blockType",
"]",
"=",
"self",
".",
"contextMap",
"(",
"blockType",
")",
"print",
"(",
"'Prefix code lists'",
".",
"center",
"(",
"60",
",",
"'-'",
")",
")",
"self",
".",
"prefixCodes",
"=",
"{",
"}",
"for",
"blockType",
"in",
"(",
"L",
",",
"I",
",",
"D",
")",
":",
"self",
".",
"readPrefixArray",
"(",
"blockType",
",",
"numberOfTrees",
"[",
"blockType",
"]",
")",
"self",
".",
"metablock",
"(",
")"
] | Process a brotli stream. | [
"Process",
"a",
"brotli",
"stream",
"."
] | python | test | 46.368421 |
DataBiosphere/dsub | dsub/lib/providers_util.py | https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/lib/providers_util.py#L160-L196 | def build_recursive_delocalize_command(source, outputs, file_provider):
"""Return a multi-line string with a shell script to copy recursively.
Arguments:
source: Folder with the data.
For example /mnt/data
outputs: a list of OutputFileParam.
file_provider: file provider string used to filter the output params; the
returned command will only apply outputs whose file provider
matches this file filter.
Returns:
a multi-line string with a shell script that copies the inputs
recursively to GCS.
"""
command = _LOCALIZE_COMMAND_MAP[file_provider]
filtered_outputs = [
var for var in outputs
if var.recursive and var.file_provider == file_provider
]
return '\n'.join([
textwrap.dedent("""
for ((i = 0; i < 3; i++)); do
if {command} {data_mount}/{docker_path} {destination_uri}; then
break
elif ((i == 2)); then
2>&1 echo "Recursive de-localization failed."
exit 1
fi
done
""").format(
command=command,
data_mount=source.rstrip('/'),
docker_path=var.docker_path,
destination_uri=var.uri) for var in filtered_outputs
]) | [
"def",
"build_recursive_delocalize_command",
"(",
"source",
",",
"outputs",
",",
"file_provider",
")",
":",
"command",
"=",
"_LOCALIZE_COMMAND_MAP",
"[",
"file_provider",
"]",
"filtered_outputs",
"=",
"[",
"var",
"for",
"var",
"in",
"outputs",
"if",
"var",
".",
"recursive",
"and",
"var",
".",
"file_provider",
"==",
"file_provider",
"]",
"return",
"'\\n'",
".",
"join",
"(",
"[",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\n for ((i = 0; i < 3; i++)); do\n if {command} {data_mount}/{docker_path} {destination_uri}; then\n break\n elif ((i == 2)); then\n 2>&1 echo \"Recursive de-localization failed.\"\n exit 1\n fi\n done\n \"\"\"",
")",
".",
"format",
"(",
"command",
"=",
"command",
",",
"data_mount",
"=",
"source",
".",
"rstrip",
"(",
"'/'",
")",
",",
"docker_path",
"=",
"var",
".",
"docker_path",
",",
"destination_uri",
"=",
"var",
".",
"uri",
")",
"for",
"var",
"in",
"filtered_outputs",
"]",
")"
] | Return a multi-line string with a shell script to copy recursively.
Arguments:
source: Folder with the data.
For example /mnt/data
outputs: a list of OutputFileParam.
file_provider: file provider string used to filter the output params; the
returned command will only apply outputs whose file provider
matches this file filter.
Returns:
a multi-line string with a shell script that copies the inputs
recursively to GCS. | [
"Return",
"a",
"multi",
"-",
"line",
"string",
"with",
"a",
"shell",
"script",
"to",
"copy",
"recursively",
"."
] | python | valid | 32.324324 |
MisterY/gnucash-portfolio | gnucash_portfolio/accounts.py | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/accounts.py#L119-L127 | def get_end_balance(self, after: date) -> Decimal:
""" Calculates account balance """
# create a new date without hours
#date_corrected = datetimeutils.end_of_day(after)
datum = Datum()
datum.from_date(after)
datum.end_of_day()
#log(DEBUG, "getting balance on %s", date_corrected)
return self.get_balance_on(datum.value) | [
"def",
"get_end_balance",
"(",
"self",
",",
"after",
":",
"date",
")",
"->",
"Decimal",
":",
"# create a new date without hours",
"#date_corrected = datetimeutils.end_of_day(after)",
"datum",
"=",
"Datum",
"(",
")",
"datum",
".",
"from_date",
"(",
"after",
")",
"datum",
".",
"end_of_day",
"(",
")",
"#log(DEBUG, \"getting balance on %s\", date_corrected)",
"return",
"self",
".",
"get_balance_on",
"(",
"datum",
".",
"value",
")"
] | Calculates account balance | [
"Calculates",
"account",
"balance"
] | python | train | 41.777778 |
mthh/jenkspy | jenkspy/core.py | https://github.com/mthh/jenkspy/blob/f57c0149e1d4dfd2369270ace55981fcf55f699b/jenkspy/core.py#L15-L72 | def jenks_breaks(values, nb_class):
"""
Compute jenks natural breaks on a sequence of `values`, given `nb_class`,
the number of desired class.
Parameters
----------
values : array-like
The Iterable sequence of numbers (integer/float) to be used.
nb_class : int
The desired number of class (as some other functions requests
a `k` value, `nb_class` is like `k` + 1). Have to be lesser than
the length of `values` and greater than 2.
Returns
-------
breaks : tuple of floats
The computed break values, including minimum and maximum, in order
to have all the bounds for building `nb_class` class,
so the returned tuple has a length of `nb_class` + 1.
Examples
--------
Using nb_class = 3, expecting 4 break values , including min and max :
>>> jenks_breaks(
[1.3, 7.1, 7.3, 2.3, 3.9, 4.1, 7.8, 1.2, 4.3, 7.3, 5.0, 4.3],
nb_class = 3) # Should output (1.2, 2.3, 5.0, 7.8)
"""
if not isinstance(values, Iterable) or isinstance(values, (str, bytes)):
raise TypeError("A sequence of numbers is expected")
if isinstance(nb_class, float) and int(nb_class) == nb_class:
nb_class = int(nb_class)
if not isinstance(nb_class, int):
raise TypeError(
"Number of class have to be a positive integer: "
"expected an instance of 'int' but found {}"
.format(type(nb_class)))
nb_values = len(values)
if np and isinstance(values, np.ndarray):
values = values[np.argwhere(np.isfinite(values)).reshape(-1)]
else:
values = [i for i in values if isfinite(i)]
if len(values) != nb_values:
warnings.warn('Invalid values encountered (NaN or Inf) were ignored')
nb_values = len(values)
if nb_class >= nb_values or nb_class < 2:
raise ValueError("Number of class have to be an integer "
"greater than 2 and "
"smaller than the number of values to use")
return jenks._jenks_breaks(values, nb_class) | [
"def",
"jenks_breaks",
"(",
"values",
",",
"nb_class",
")",
":",
"if",
"not",
"isinstance",
"(",
"values",
",",
"Iterable",
")",
"or",
"isinstance",
"(",
"values",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"A sequence of numbers is expected\"",
")",
"if",
"isinstance",
"(",
"nb_class",
",",
"float",
")",
"and",
"int",
"(",
"nb_class",
")",
"==",
"nb_class",
":",
"nb_class",
"=",
"int",
"(",
"nb_class",
")",
"if",
"not",
"isinstance",
"(",
"nb_class",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"Number of class have to be a positive integer: \"",
"\"expected an instance of 'int' but found {}\"",
".",
"format",
"(",
"type",
"(",
"nb_class",
")",
")",
")",
"nb_values",
"=",
"len",
"(",
"values",
")",
"if",
"np",
"and",
"isinstance",
"(",
"values",
",",
"np",
".",
"ndarray",
")",
":",
"values",
"=",
"values",
"[",
"np",
".",
"argwhere",
"(",
"np",
".",
"isfinite",
"(",
"values",
")",
")",
".",
"reshape",
"(",
"-",
"1",
")",
"]",
"else",
":",
"values",
"=",
"[",
"i",
"for",
"i",
"in",
"values",
"if",
"isfinite",
"(",
"i",
")",
"]",
"if",
"len",
"(",
"values",
")",
"!=",
"nb_values",
":",
"warnings",
".",
"warn",
"(",
"'Invalid values encountered (NaN or Inf) were ignored'",
")",
"nb_values",
"=",
"len",
"(",
"values",
")",
"if",
"nb_class",
">=",
"nb_values",
"or",
"nb_class",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"Number of class have to be an integer \"",
"\"greater than 2 and \"",
"\"smaller than the number of values to use\"",
")",
"return",
"jenks",
".",
"_jenks_breaks",
"(",
"values",
",",
"nb_class",
")"
] | Compute jenks natural breaks on a sequence of `values`, given `nb_class`,
the number of desired class.
Parameters
----------
values : array-like
The Iterable sequence of numbers (integer/float) to be used.
nb_class : int
The desired number of class (as some other functions requests
a `k` value, `nb_class` is like `k` + 1). Have to be lesser than
the length of `values` and greater than 2.
Returns
-------
breaks : tuple of floats
The computed break values, including minimum and maximum, in order
to have all the bounds for building `nb_class` class,
so the returned tuple has a length of `nb_class` + 1.
Examples
--------
Using nb_class = 3, expecting 4 break values , including min and max :
>>> jenks_breaks(
[1.3, 7.1, 7.3, 2.3, 3.9, 4.1, 7.8, 1.2, 4.3, 7.3, 5.0, 4.3],
nb_class = 3) # Should output (1.2, 2.3, 5.0, 7.8) | [
"Compute",
"jenks",
"natural",
"breaks",
"on",
"a",
"sequence",
"of",
"values",
"given",
"nb_class",
"the",
"number",
"of",
"desired",
"class",
"."
] | python | valid | 35.5 |
genialis/resolwe | resolwe/rest/serializers.py | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/rest/serializers.py#L11-L14 | def fields(self):
"""Filter fields based on request query parameters."""
fields = super().fields
return apply_subfield_projection(self, copy.copy(fields)) | [
"def",
"fields",
"(",
"self",
")",
":",
"fields",
"=",
"super",
"(",
")",
".",
"fields",
"return",
"apply_subfield_projection",
"(",
"self",
",",
"copy",
".",
"copy",
"(",
"fields",
")",
")"
] | Filter fields based on request query parameters. | [
"Filter",
"fields",
"based",
"on",
"request",
"query",
"parameters",
"."
] | python | train | 43.75 |
divio/aldryn-apphooks-config | aldryn_apphooks_config/utils.py | https://github.com/divio/aldryn-apphooks-config/blob/5b8dfc7516982a8746fc08cf919c6ab116335d62/aldryn_apphooks_config/utils.py#L59-L68 | def _get_apphook_field_names(model):
"""
Return all foreign key field names for a AppHookConfig based model
"""
from .models import AppHookConfig # avoid circular dependencies
fields = []
for field in model._meta.fields:
if isinstance(field, ForeignKey) and issubclass(field.remote_field.model, AppHookConfig):
fields.append(field)
return [field.name for field in fields] | [
"def",
"_get_apphook_field_names",
"(",
"model",
")",
":",
"from",
".",
"models",
"import",
"AppHookConfig",
"# avoid circular dependencies",
"fields",
"=",
"[",
"]",
"for",
"field",
"in",
"model",
".",
"_meta",
".",
"fields",
":",
"if",
"isinstance",
"(",
"field",
",",
"ForeignKey",
")",
"and",
"issubclass",
"(",
"field",
".",
"remote_field",
".",
"model",
",",
"AppHookConfig",
")",
":",
"fields",
".",
"append",
"(",
"field",
")",
"return",
"[",
"field",
".",
"name",
"for",
"field",
"in",
"fields",
"]"
] | Return all foreign key field names for a AppHookConfig based model | [
"Return",
"all",
"foreign",
"key",
"field",
"names",
"for",
"a",
"AppHookConfig",
"based",
"model"
] | python | train | 41.1 |
dmckeone/frosty | frosty/includes.py | https://github.com/dmckeone/frosty/blob/868d81e72b6c8e354af3697531c20f116cd1fc9a/frosty/includes.py#L52-L70 | def build_includes(include_packages, freezer=None, optional=None):
"""
Iterate the list of packages to build a complete list of those packages as well as all subpackages.
:param include_packages: list of package names
:type: include_pacakges: list of basestr
:param freezer: The freezer to use (See FREEZER constants)
:param optional: Optional pacakge names to include (will only issue a warning if they don't exist)
:return: complete set of package includes
"""
freezer = resolve_freezer(freezer)
# Import (or get reference to) all listed packages to ensure that they exist.
package_references = _import_packages(include_packages, optional=optional)
# Find all includes for the given freezer type
includes = freezer.build_includes(package_references)
return includes | [
"def",
"build_includes",
"(",
"include_packages",
",",
"freezer",
"=",
"None",
",",
"optional",
"=",
"None",
")",
":",
"freezer",
"=",
"resolve_freezer",
"(",
"freezer",
")",
"# Import (or get reference to) all listed packages to ensure that they exist.",
"package_references",
"=",
"_import_packages",
"(",
"include_packages",
",",
"optional",
"=",
"optional",
")",
"# Find all includes for the given freezer type",
"includes",
"=",
"freezer",
".",
"build_includes",
"(",
"package_references",
")",
"return",
"includes"
] | Iterate the list of packages to build a complete list of those packages as well as all subpackages.
:param include_packages: list of package names
:type: include_pacakges: list of basestr
:param freezer: The freezer to use (See FREEZER constants)
:param optional: Optional pacakge names to include (will only issue a warning if they don't exist)
:return: complete set of package includes | [
"Iterate",
"the",
"list",
"of",
"packages",
"to",
"build",
"a",
"complete",
"list",
"of",
"those",
"packages",
"as",
"well",
"as",
"all",
"subpackages",
"."
] | python | train | 42.578947 |
HPENetworking/PYHPEIMC | pyhpeimc/objects.py | https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/pyhpeimc/objects.py#L264-L274 | def addchild(self, startip, endip, name, description):
"""
Method takes inpur of str startip, str endip, name, and description and adds a child scope.
The startip and endip MUST be in the IP address range of the parent scope.
:param startip: str of ipv4 address of the first address in the child scope
:param endip: str of ipv4 address of the last address in the child scope
:param name: of the owner of the child scope
:param description: description of the child scope
:return:
"""
add_child_ip_scope(self.auth, self.url, startip, endip, name, description, self.id) | [
"def",
"addchild",
"(",
"self",
",",
"startip",
",",
"endip",
",",
"name",
",",
"description",
")",
":",
"add_child_ip_scope",
"(",
"self",
".",
"auth",
",",
"self",
".",
"url",
",",
"startip",
",",
"endip",
",",
"name",
",",
"description",
",",
"self",
".",
"id",
")"
] | Method takes inpur of str startip, str endip, name, and description and adds a child scope.
The startip and endip MUST be in the IP address range of the parent scope.
:param startip: str of ipv4 address of the first address in the child scope
:param endip: str of ipv4 address of the last address in the child scope
:param name: of the owner of the child scope
:param description: description of the child scope
:return: | [
"Method",
"takes",
"inpur",
"of",
"str",
"startip",
"str",
"endip",
"name",
"and",
"description",
"and",
"adds",
"a",
"child",
"scope",
".",
"The",
"startip",
"and",
"endip",
"MUST",
"be",
"in",
"the",
"IP",
"address",
"range",
"of",
"the",
"parent",
"scope",
".",
":",
"param",
"startip",
":",
"str",
"of",
"ipv4",
"address",
"of",
"the",
"first",
"address",
"in",
"the",
"child",
"scope",
":",
"param",
"endip",
":",
"str",
"of",
"ipv4",
"address",
"of",
"the",
"last",
"address",
"in",
"the",
"child",
"scope",
":",
"param",
"name",
":",
"of",
"the",
"owner",
"of",
"the",
"child",
"scope",
":",
"param",
"description",
":",
"description",
"of",
"the",
"child",
"scope",
":",
"return",
":"
] | python | train | 57.909091 |
cltl/KafNafParserPy | KafNafParserPy/term_data.py | https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/term_data.py#L350-L362 | def add_external_reference(self,term_id, external_ref):
"""
Adds an external reference for the given term
@type term_id: string
@param term_id: the term identifier
@type external_ref: L{CexternalReference}
@param external_ref: the external reference object
"""
if term_id in self.idx:
term_obj = Cterm(self.idx[term_id],self.type)
term_obj.add_external_reference(external_ref)
else:
print('{term_id} not in self.idx'.format(**locals())) | [
"def",
"add_external_reference",
"(",
"self",
",",
"term_id",
",",
"external_ref",
")",
":",
"if",
"term_id",
"in",
"self",
".",
"idx",
":",
"term_obj",
"=",
"Cterm",
"(",
"self",
".",
"idx",
"[",
"term_id",
"]",
",",
"self",
".",
"type",
")",
"term_obj",
".",
"add_external_reference",
"(",
"external_ref",
")",
"else",
":",
"print",
"(",
"'{term_id} not in self.idx'",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
")"
] | Adds an external reference for the given term
@type term_id: string
@param term_id: the term identifier
@type external_ref: L{CexternalReference}
@param external_ref: the external reference object | [
"Adds",
"an",
"external",
"reference",
"for",
"the",
"given",
"term"
] | python | train | 40.923077 |
nion-software/nionswift-io | nionswift_plugin/DM_IO/dm3_image_utils.py | https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/DM_IO/dm3_image_utils.py#L102-L137 | def ndarray_to_imagedatadict(nparr):
"""
Convert the numpy array nparr into a suitable ImageList entry dictionary.
Returns a dictionary with the appropriate Data, DataType, PixelDepth
to be inserted into a dm3 tag dictionary and written to a file.
"""
ret = {}
dm_type = None
for k, v in iter(dm_image_dtypes.items()):
if v[1] == nparr.dtype.type:
dm_type = k
break
if dm_type is None and nparr.dtype == numpy.uint8 and nparr.shape[-1] in (3, 4):
ret["DataType"] = 23
ret["PixelDepth"] = 4
if nparr.shape[2] == 4:
rgb_view = nparr.view(numpy.int32).reshape(nparr.shape[:-1]) # squash the color into uint32
else:
assert nparr.shape[2] == 3
rgba_image = numpy.empty(nparr.shape[:-1] + (4,), numpy.uint8)
rgba_image[:,:,0:3] = nparr
rgba_image[:,:,3] = 255
rgb_view = rgba_image.view(numpy.int32).reshape(rgba_image.shape[:-1]) # squash the color into uint32
ret["Dimensions"] = list(rgb_view.shape[::-1])
ret["Data"] = parse_dm3.array.array(platform_independent_char(rgb_view.dtype), rgb_view.flatten())
else:
ret["DataType"] = dm_type
ret["PixelDepth"] = nparr.dtype.itemsize
ret["Dimensions"] = list(nparr.shape[::-1])
if nparr.dtype.type in np_to_structarray_map:
types = np_to_structarray_map[nparr.dtype.type]
ret["Data"] = parse_dm3.structarray(types)
ret["Data"].raw_data = bytes(numpy.array(nparr, copy=False).data)
else:
ret["Data"] = parse_dm3.array.array(platform_independent_char(nparr.dtype), numpy.array(nparr, copy=False).flatten())
return ret | [
"def",
"ndarray_to_imagedatadict",
"(",
"nparr",
")",
":",
"ret",
"=",
"{",
"}",
"dm_type",
"=",
"None",
"for",
"k",
",",
"v",
"in",
"iter",
"(",
"dm_image_dtypes",
".",
"items",
"(",
")",
")",
":",
"if",
"v",
"[",
"1",
"]",
"==",
"nparr",
".",
"dtype",
".",
"type",
":",
"dm_type",
"=",
"k",
"break",
"if",
"dm_type",
"is",
"None",
"and",
"nparr",
".",
"dtype",
"==",
"numpy",
".",
"uint8",
"and",
"nparr",
".",
"shape",
"[",
"-",
"1",
"]",
"in",
"(",
"3",
",",
"4",
")",
":",
"ret",
"[",
"\"DataType\"",
"]",
"=",
"23",
"ret",
"[",
"\"PixelDepth\"",
"]",
"=",
"4",
"if",
"nparr",
".",
"shape",
"[",
"2",
"]",
"==",
"4",
":",
"rgb_view",
"=",
"nparr",
".",
"view",
"(",
"numpy",
".",
"int32",
")",
".",
"reshape",
"(",
"nparr",
".",
"shape",
"[",
":",
"-",
"1",
"]",
")",
"# squash the color into uint32",
"else",
":",
"assert",
"nparr",
".",
"shape",
"[",
"2",
"]",
"==",
"3",
"rgba_image",
"=",
"numpy",
".",
"empty",
"(",
"nparr",
".",
"shape",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"4",
",",
")",
",",
"numpy",
".",
"uint8",
")",
"rgba_image",
"[",
":",
",",
":",
",",
"0",
":",
"3",
"]",
"=",
"nparr",
"rgba_image",
"[",
":",
",",
":",
",",
"3",
"]",
"=",
"255",
"rgb_view",
"=",
"rgba_image",
".",
"view",
"(",
"numpy",
".",
"int32",
")",
".",
"reshape",
"(",
"rgba_image",
".",
"shape",
"[",
":",
"-",
"1",
"]",
")",
"# squash the color into uint32",
"ret",
"[",
"\"Dimensions\"",
"]",
"=",
"list",
"(",
"rgb_view",
".",
"shape",
"[",
":",
":",
"-",
"1",
"]",
")",
"ret",
"[",
"\"Data\"",
"]",
"=",
"parse_dm3",
".",
"array",
".",
"array",
"(",
"platform_independent_char",
"(",
"rgb_view",
".",
"dtype",
")",
",",
"rgb_view",
".",
"flatten",
"(",
")",
")",
"else",
":",
"ret",
"[",
"\"DataType\"",
"]",
"=",
"dm_type",
"ret",
"[",
"\"PixelDepth\"",
"]",
"=",
"nparr",
".",
"dtype",
".",
"itemsize",
"ret",
"[",
"\"Dimensions\"",
"]",
"=",
"list",
"(",
"nparr",
".",
"shape",
"[",
":",
":",
"-",
"1",
"]",
")",
"if",
"nparr",
".",
"dtype",
".",
"type",
"in",
"np_to_structarray_map",
":",
"types",
"=",
"np_to_structarray_map",
"[",
"nparr",
".",
"dtype",
".",
"type",
"]",
"ret",
"[",
"\"Data\"",
"]",
"=",
"parse_dm3",
".",
"structarray",
"(",
"types",
")",
"ret",
"[",
"\"Data\"",
"]",
".",
"raw_data",
"=",
"bytes",
"(",
"numpy",
".",
"array",
"(",
"nparr",
",",
"copy",
"=",
"False",
")",
".",
"data",
")",
"else",
":",
"ret",
"[",
"\"Data\"",
"]",
"=",
"parse_dm3",
".",
"array",
".",
"array",
"(",
"platform_independent_char",
"(",
"nparr",
".",
"dtype",
")",
",",
"numpy",
".",
"array",
"(",
"nparr",
",",
"copy",
"=",
"False",
")",
".",
"flatten",
"(",
")",
")",
"return",
"ret"
] | Convert the numpy array nparr into a suitable ImageList entry dictionary.
Returns a dictionary with the appropriate Data, DataType, PixelDepth
to be inserted into a dm3 tag dictionary and written to a file. | [
"Convert",
"the",
"numpy",
"array",
"nparr",
"into",
"a",
"suitable",
"ImageList",
"entry",
"dictionary",
".",
"Returns",
"a",
"dictionary",
"with",
"the",
"appropriate",
"Data",
"DataType",
"PixelDepth",
"to",
"be",
"inserted",
"into",
"a",
"dm3",
"tag",
"dictionary",
"and",
"written",
"to",
"a",
"file",
"."
] | python | train | 47.416667 |
MillionIntegrals/vel | vel/rl/api/evaluator.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/api/evaluator.py#L114-L133 | def get(self, name):
"""
Return a value from this evaluator.
Because tensor calculated is cached, it may lead to suble bugs if the same value is used multiple times
with and without no_grad() context.
It is advised in such cases to not use no_grad and stick to .detach()
"""
if name in self._storage:
return self._storage[name]
elif name in self._providers:
value = self._storage[name] = self._providers[name](self)
return value
elif name.startswith('rollout:'):
rollout_name = name[8:]
value = self._storage[name] = self.rollout.batch_tensor(rollout_name)
return value
else:
raise RuntimeError(f"Key {name} is not provided by this evaluator") | [
"def",
"get",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"in",
"self",
".",
"_storage",
":",
"return",
"self",
".",
"_storage",
"[",
"name",
"]",
"elif",
"name",
"in",
"self",
".",
"_providers",
":",
"value",
"=",
"self",
".",
"_storage",
"[",
"name",
"]",
"=",
"self",
".",
"_providers",
"[",
"name",
"]",
"(",
"self",
")",
"return",
"value",
"elif",
"name",
".",
"startswith",
"(",
"'rollout:'",
")",
":",
"rollout_name",
"=",
"name",
"[",
"8",
":",
"]",
"value",
"=",
"self",
".",
"_storage",
"[",
"name",
"]",
"=",
"self",
".",
"rollout",
".",
"batch_tensor",
"(",
"rollout_name",
")",
"return",
"value",
"else",
":",
"raise",
"RuntimeError",
"(",
"f\"Key {name} is not provided by this evaluator\"",
")"
] | Return a value from this evaluator.
Because tensor calculated is cached, it may lead to suble bugs if the same value is used multiple times
with and without no_grad() context.
It is advised in such cases to not use no_grad and stick to .detach() | [
"Return",
"a",
"value",
"from",
"this",
"evaluator",
"."
] | python | train | 39.5 |
Parsely/birding | src/birding/bolt.py | https://github.com/Parsely/birding/blob/c7f6eee56424234e361b1a455595de202e744dac/src/birding/bolt.py#L16-L27 | def fault_barrier(fn):
"""Method decorator to catch and log errors, then send fail message."""
@functools.wraps(fn)
def process(self, tup):
try:
return fn(self, tup)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
return
print(str(e), file=sys.stderr)
self.fail(tup)
return process | [
"def",
"fault_barrier",
"(",
"fn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"process",
"(",
"self",
",",
"tup",
")",
":",
"try",
":",
"return",
"fn",
"(",
"self",
",",
"tup",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"KeyboardInterrupt",
")",
":",
"return",
"print",
"(",
"str",
"(",
"e",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"self",
".",
"fail",
"(",
"tup",
")",
"return",
"process"
] | Method decorator to catch and log errors, then send fail message. | [
"Method",
"decorator",
"to",
"catch",
"and",
"log",
"errors",
"then",
"send",
"fail",
"message",
"."
] | python | train | 31.5 |
mozilla/mozdownload | mozdownload/timezones.py | https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/timezones.py#L23-L34 | def dst(self, dt):
"""Calculate delta for daylight saving."""
# Daylight saving starts on the second Sunday of March at 2AM standard
dst_start_date = self.first_sunday(dt.year, 3) + timedelta(days=7) \
+ timedelta(hours=2)
# Daylight saving ends on the first Sunday of November at 2AM standard
dst_end_date = self.first_sunday(dt.year, 11) + timedelta(hours=2)
if dst_start_date <= dt.replace(tzinfo=None) < dst_end_date:
return timedelta(hours=1)
else:
return timedelta(0) | [
"def",
"dst",
"(",
"self",
",",
"dt",
")",
":",
"# Daylight saving starts on the second Sunday of March at 2AM standard",
"dst_start_date",
"=",
"self",
".",
"first_sunday",
"(",
"dt",
".",
"year",
",",
"3",
")",
"+",
"timedelta",
"(",
"days",
"=",
"7",
")",
"+",
"timedelta",
"(",
"hours",
"=",
"2",
")",
"# Daylight saving ends on the first Sunday of November at 2AM standard",
"dst_end_date",
"=",
"self",
".",
"first_sunday",
"(",
"dt",
".",
"year",
",",
"11",
")",
"+",
"timedelta",
"(",
"hours",
"=",
"2",
")",
"if",
"dst_start_date",
"<=",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"<",
"dst_end_date",
":",
"return",
"timedelta",
"(",
"hours",
"=",
"1",
")",
"else",
":",
"return",
"timedelta",
"(",
"0",
")"
] | Calculate delta for daylight saving. | [
"Calculate",
"delta",
"for",
"daylight",
"saving",
"."
] | python | train | 49.833333 |
MSchnei/pyprf_feature | pyprf_feature/analysis/pyprf_sim_ep.py | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/pyprf_sim_ep.py#L57-L83 | def main():
"""pyprf_sim entry point."""
# Get list of input arguments (without first one, which is the path to the
# function that is called): --NOTE: This is another way of accessing
# input arguments, but since we use 'argparse' it is redundant.
# lstArgs = sys.argv[1:]
strWelcome = 'pyprf_sim ' + __version__
strDec = '=' * len(strWelcome)
print(strDec + '\n' + strWelcome + '\n' + strDec)
objNspc = get_arg_parse()
# Print info if no config argument is provided.
if any(item is None for item in [objNspc.strCsvPrf, objNspc.strStmApr]):
print('Please provide necessary file paths, e.g.:')
print(' pyprf_sim -strCsvPrf /path/to/my_config_file.csv')
print(' -strStmApr /path/to/my_stim_apertures.npy')
else:
# Signal non-test mode to lower functions (needed for pytest):
lgcTest = False
# Call to main function, to invoke pRF analysis:
pyprf_sim(objNspc.strCsvPrf, objNspc.strStmApr, lgcTest=lgcTest,
lgcNoise=objNspc.lgcNoise, lgcRtnNrl=objNspc.lgcRtnNrl,
lstRat=objNspc.supsur) | [
"def",
"main",
"(",
")",
":",
"# Get list of input arguments (without first one, which is the path to the",
"# function that is called): --NOTE: This is another way of accessing",
"# input arguments, but since we use 'argparse' it is redundant.",
"# lstArgs = sys.argv[1:]",
"strWelcome",
"=",
"'pyprf_sim '",
"+",
"__version__",
"strDec",
"=",
"'='",
"*",
"len",
"(",
"strWelcome",
")",
"print",
"(",
"strDec",
"+",
"'\\n'",
"+",
"strWelcome",
"+",
"'\\n'",
"+",
"strDec",
")",
"objNspc",
"=",
"get_arg_parse",
"(",
")",
"# Print info if no config argument is provided.",
"if",
"any",
"(",
"item",
"is",
"None",
"for",
"item",
"in",
"[",
"objNspc",
".",
"strCsvPrf",
",",
"objNspc",
".",
"strStmApr",
"]",
")",
":",
"print",
"(",
"'Please provide necessary file paths, e.g.:'",
")",
"print",
"(",
"' pyprf_sim -strCsvPrf /path/to/my_config_file.csv'",
")",
"print",
"(",
"' -strStmApr /path/to/my_stim_apertures.npy'",
")",
"else",
":",
"# Signal non-test mode to lower functions (needed for pytest):",
"lgcTest",
"=",
"False",
"# Call to main function, to invoke pRF analysis:",
"pyprf_sim",
"(",
"objNspc",
".",
"strCsvPrf",
",",
"objNspc",
".",
"strStmApr",
",",
"lgcTest",
"=",
"lgcTest",
",",
"lgcNoise",
"=",
"objNspc",
".",
"lgcNoise",
",",
"lgcRtnNrl",
"=",
"objNspc",
".",
"lgcRtnNrl",
",",
"lstRat",
"=",
"objNspc",
".",
"supsur",
")"
] | pyprf_sim entry point. | [
"pyprf_sim",
"entry",
"point",
"."
] | python | train | 41.333333 |
geometalab/pyGeoTile | pygeotile/tile.py | https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L16-L24 | def from_quad_tree(cls, quad_tree):
"""Creates a tile from a Microsoft QuadTree"""
assert bool(re.match('^[0-3]*$', quad_tree)), 'QuadTree value can only consists of the digits 0, 1, 2 and 3.'
zoom = len(str(quad_tree))
offset = int(math.pow(2, zoom)) - 1
google_x, google_y = [reduce(lambda result, bit: (result << 1) | bit, bits, 0)
for bits in zip(*(reversed(divmod(digit, 2))
for digit in (int(c) for c in str(quad_tree))))]
return cls(tms_x=google_x, tms_y=(offset - google_y), zoom=zoom) | [
"def",
"from_quad_tree",
"(",
"cls",
",",
"quad_tree",
")",
":",
"assert",
"bool",
"(",
"re",
".",
"match",
"(",
"'^[0-3]*$'",
",",
"quad_tree",
")",
")",
",",
"'QuadTree value can only consists of the digits 0, 1, 2 and 3.'",
"zoom",
"=",
"len",
"(",
"str",
"(",
"quad_tree",
")",
")",
"offset",
"=",
"int",
"(",
"math",
".",
"pow",
"(",
"2",
",",
"zoom",
")",
")",
"-",
"1",
"google_x",
",",
"google_y",
"=",
"[",
"reduce",
"(",
"lambda",
"result",
",",
"bit",
":",
"(",
"result",
"<<",
"1",
")",
"|",
"bit",
",",
"bits",
",",
"0",
")",
"for",
"bits",
"in",
"zip",
"(",
"*",
"(",
"reversed",
"(",
"divmod",
"(",
"digit",
",",
"2",
")",
")",
"for",
"digit",
"in",
"(",
"int",
"(",
"c",
")",
"for",
"c",
"in",
"str",
"(",
"quad_tree",
")",
")",
")",
")",
"]",
"return",
"cls",
"(",
"tms_x",
"=",
"google_x",
",",
"tms_y",
"=",
"(",
"offset",
"-",
"google_y",
")",
",",
"zoom",
"=",
"zoom",
")"
] | Creates a tile from a Microsoft QuadTree | [
"Creates",
"a",
"tile",
"from",
"a",
"Microsoft",
"QuadTree"
] | python | train | 67.888889 |
diging/tethne | tethne/readers/zotero.py | https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/zotero.py#L53-L83 | def _infer_spaces(s):
"""
Uses dynamic programming to infer the location of spaces in a string
without spaces.
"""
s = s.lower()
# Find the best match for the i first characters, assuming cost has
# been built for the i-1 first characters.
# Returns a pair (match_cost, match_length).
def best_match(i):
candidates = enumerate(reversed(cost[max(0, i - MAXWORD):i]))
return min((c + WORDCOST.get(s[i-k-1: i], 9e999), k + 1)
for k, c in candidates)
# Build the cost array.
cost = [0]
for i in range(1, len(s) + 1):
c, k = best_match(i)
cost.append(c)
# Backtrack to recover the minimal-cost string.
out = []
i = len(s)
while i > 0:
c, k = best_match(i)
assert c == cost[i]
out.append(s[i-k:i])
i -= k
return u" ".join(reversed(out)) | [
"def",
"_infer_spaces",
"(",
"s",
")",
":",
"s",
"=",
"s",
".",
"lower",
"(",
")",
"# Find the best match for the i first characters, assuming cost has",
"# been built for the i-1 first characters.",
"# Returns a pair (match_cost, match_length).",
"def",
"best_match",
"(",
"i",
")",
":",
"candidates",
"=",
"enumerate",
"(",
"reversed",
"(",
"cost",
"[",
"max",
"(",
"0",
",",
"i",
"-",
"MAXWORD",
")",
":",
"i",
"]",
")",
")",
"return",
"min",
"(",
"(",
"c",
"+",
"WORDCOST",
".",
"get",
"(",
"s",
"[",
"i",
"-",
"k",
"-",
"1",
":",
"i",
"]",
",",
"9e999",
")",
",",
"k",
"+",
"1",
")",
"for",
"k",
",",
"c",
"in",
"candidates",
")",
"# Build the cost array.",
"cost",
"=",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"s",
")",
"+",
"1",
")",
":",
"c",
",",
"k",
"=",
"best_match",
"(",
"i",
")",
"cost",
".",
"append",
"(",
"c",
")",
"# Backtrack to recover the minimal-cost string.",
"out",
"=",
"[",
"]",
"i",
"=",
"len",
"(",
"s",
")",
"while",
"i",
">",
"0",
":",
"c",
",",
"k",
"=",
"best_match",
"(",
"i",
")",
"assert",
"c",
"==",
"cost",
"[",
"i",
"]",
"out",
".",
"append",
"(",
"s",
"[",
"i",
"-",
"k",
":",
"i",
"]",
")",
"i",
"-=",
"k",
"return",
"u\" \"",
".",
"join",
"(",
"reversed",
"(",
"out",
")",
")"
] | Uses dynamic programming to infer the location of spaces in a string
without spaces. | [
"Uses",
"dynamic",
"programming",
"to",
"infer",
"the",
"location",
"of",
"spaces",
"in",
"a",
"string",
"without",
"spaces",
"."
] | python | train | 27.612903 |
apple/turicreate | src/unity/python/turicreate/data_structures/sarray.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sarray.py#L478-L508 | def from_const(cls, value, size, dtype=type(None)):
"""
Constructs an SArray of size with a const value.
Parameters
----------
value : [int | float | str | array.array | list | dict | datetime]
The value to fill the SArray
size : int
The size of the SArray
dtype : type
The type of the SArray. If not specified, is automatically detected
from the value. This should be specified if value=None since the
actual type of the SArray can be anything.
Examples
--------
Construct an SArray consisting of 10 zeroes:
>>> turicreate.SArray.from_const(0, 10)
Construct an SArray consisting of 10 missing string values:
>>> turicreate.SArray.from_const(None, 10, str)
"""
assert isinstance(size, (int, long)) and size >= 0, "size must be a positive int"
if not isinstance(value, (type(None), int, float, str, array.array, list, dict, datetime.datetime)):
raise TypeError('Cannot create sarray of value type %s' % str(type(value)))
proxy = UnitySArrayProxy()
proxy.load_from_const(value, size, dtype)
return cls(_proxy=proxy) | [
"def",
"from_const",
"(",
"cls",
",",
"value",
",",
"size",
",",
"dtype",
"=",
"type",
"(",
"None",
")",
")",
":",
"assert",
"isinstance",
"(",
"size",
",",
"(",
"int",
",",
"long",
")",
")",
"and",
"size",
">=",
"0",
",",
"\"size must be a positive int\"",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"type",
"(",
"None",
")",
",",
"int",
",",
"float",
",",
"str",
",",
"array",
".",
"array",
",",
"list",
",",
"dict",
",",
"datetime",
".",
"datetime",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Cannot create sarray of value type %s'",
"%",
"str",
"(",
"type",
"(",
"value",
")",
")",
")",
"proxy",
"=",
"UnitySArrayProxy",
"(",
")",
"proxy",
".",
"load_from_const",
"(",
"value",
",",
"size",
",",
"dtype",
")",
"return",
"cls",
"(",
"_proxy",
"=",
"proxy",
")"
] | Constructs an SArray of size with a const value.
Parameters
----------
value : [int | float | str | array.array | list | dict | datetime]
The value to fill the SArray
size : int
The size of the SArray
dtype : type
The type of the SArray. If not specified, is automatically detected
from the value. This should be specified if value=None since the
actual type of the SArray can be anything.
Examples
--------
Construct an SArray consisting of 10 zeroes:
>>> turicreate.SArray.from_const(0, 10)
Construct an SArray consisting of 10 missing string values:
>>> turicreate.SArray.from_const(None, 10, str) | [
"Constructs",
"an",
"SArray",
"of",
"size",
"with",
"a",
"const",
"value",
"."
] | python | train | 38.774194 |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py#L269-L281 | def show_support_save_status_output_show_support_save_status_percentage_of_completion(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_support_save_status = ET.Element("show_support_save_status")
config = show_support_save_status
output = ET.SubElement(show_support_save_status, "output")
show_support_save_status = ET.SubElement(output, "show-support-save-status")
percentage_of_completion = ET.SubElement(show_support_save_status, "percentage-of-completion")
percentage_of_completion.text = kwargs.pop('percentage_of_completion')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"show_support_save_status_output_show_support_save_status_percentage_of_completion",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"show_support_save_status",
"=",
"ET",
".",
"Element",
"(",
"\"show_support_save_status\"",
")",
"config",
"=",
"show_support_save_status",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"show_support_save_status",
",",
"\"output\"",
")",
"show_support_save_status",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"show-support-save-status\"",
")",
"percentage_of_completion",
"=",
"ET",
".",
"SubElement",
"(",
"show_support_save_status",
",",
"\"percentage-of-completion\"",
")",
"percentage_of_completion",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'percentage_of_completion'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train | 54.769231 |
xflr6/gsheets | gsheets/models.py | https://github.com/xflr6/gsheets/blob/ca4f1273044704e529c1138e3f942836fc496e1b/gsheets/models.py#L170-L190 | def to_csv(self, encoding=export.ENCODING, dialect=export.DIALECT,
make_filename=export.MAKE_FILENAME):
"""Dump all worksheets of the spreadsheet to individual CSV files.
Args:
encoding (str): result string encoding
dialect (str): :mod:`csv` dialect name or object to use
make_filename: template or one-argument callable returning the filename
If ``make_filename`` is a string, it is string-interpolated with an
infos-dictionary with the fields ``id`` (spreadhseet id), ``title``
(spreadsheet title), ``sheet`` (worksheet title), ``gid`` (worksheet
id), ``index`` (worksheet index), and ``dialect`` CSV dialect to
generate the filename: ``filename = make_filename % infos``.
If ``make_filename`` is a callable, it will be called with the
infos-dictionary to generate the filename:
``filename = make_filename(infos)``.
"""
for s in self._sheets:
s.to_csv(None, encoding, dialect, make_filename) | [
"def",
"to_csv",
"(",
"self",
",",
"encoding",
"=",
"export",
".",
"ENCODING",
",",
"dialect",
"=",
"export",
".",
"DIALECT",
",",
"make_filename",
"=",
"export",
".",
"MAKE_FILENAME",
")",
":",
"for",
"s",
"in",
"self",
".",
"_sheets",
":",
"s",
".",
"to_csv",
"(",
"None",
",",
"encoding",
",",
"dialect",
",",
"make_filename",
")"
] | Dump all worksheets of the spreadsheet to individual CSV files.
Args:
encoding (str): result string encoding
dialect (str): :mod:`csv` dialect name or object to use
make_filename: template or one-argument callable returning the filename
If ``make_filename`` is a string, it is string-interpolated with an
infos-dictionary with the fields ``id`` (spreadhseet id), ``title``
(spreadsheet title), ``sheet`` (worksheet title), ``gid`` (worksheet
id), ``index`` (worksheet index), and ``dialect`` CSV dialect to
generate the filename: ``filename = make_filename % infos``.
If ``make_filename`` is a callable, it will be called with the
infos-dictionary to generate the filename:
``filename = make_filename(infos)``. | [
"Dump",
"all",
"worksheets",
"of",
"the",
"spreadsheet",
"to",
"individual",
"CSV",
"files",
"."
] | python | train | 49.285714 |
idlesign/django-yaturbo | yaturbo/toolbox.py | https://github.com/idlesign/django-yaturbo/blob/a5ac9053bb800ea8082dc0615b93398917c3290a/yaturbo/toolbox.py#L18-L28 | def sanitize_turbo(html, allowed_tags=TURBO_ALLOWED_TAGS, allowed_attrs=TURBO_ALLOWED_ATTRS):
"""Sanitizes HTML, removing not allowed tags and attributes.
:param str|unicode html:
:param list allowed_tags: List of allowed tags.
:param dict allowed_attrs: Dictionary with attributes allowed for tags.
:rtype: unicode
"""
return clean(html, tags=allowed_tags, attributes=allowed_attrs, strip=True) | [
"def",
"sanitize_turbo",
"(",
"html",
",",
"allowed_tags",
"=",
"TURBO_ALLOWED_TAGS",
",",
"allowed_attrs",
"=",
"TURBO_ALLOWED_ATTRS",
")",
":",
"return",
"clean",
"(",
"html",
",",
"tags",
"=",
"allowed_tags",
",",
"attributes",
"=",
"allowed_attrs",
",",
"strip",
"=",
"True",
")"
] | Sanitizes HTML, removing not allowed tags and attributes.
:param str|unicode html:
:param list allowed_tags: List of allowed tags.
:param dict allowed_attrs: Dictionary with attributes allowed for tags.
:rtype: unicode | [
"Sanitizes",
"HTML",
"removing",
"not",
"allowed",
"tags",
"and",
"attributes",
"."
] | python | test | 37.818182 |
kubernetes-client/python | kubernetes/client/apis/node_v1beta1_api.py | https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/node_v1beta1_api.py#L596-L620 | def patch_runtime_class(self, name, body, **kwargs):
"""
partially update the specified RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_runtime_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RuntimeClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_runtime_class_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_runtime_class_with_http_info(name, body, **kwargs)
return data | [
"def",
"patch_runtime_class",
"(",
"self",
",",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"patch_runtime_class_with_http_info",
"(",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"patch_runtime_class_with_http_info",
"(",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | partially update the specified RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_runtime_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RuntimeClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread. | [
"partially",
"update",
"the",
"specified",
"RuntimeClass",
"This",
"method",
"makes",
"a",
"synchronous",
"HTTP",
"request",
"by",
"default",
".",
"To",
"make",
"an",
"asynchronous",
"HTTP",
"request",
"please",
"pass",
"async_req",
"=",
"True",
">>>",
"thread",
"=",
"api",
".",
"patch_runtime_class",
"(",
"name",
"body",
"async_req",
"=",
"True",
")",
">>>",
"result",
"=",
"thread",
".",
"get",
"()"
] | python | train | 75 |
ewels/MultiQC | multiqc/modules/star/star.py | https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/star/star.py#L186-L207 | def star_stats_table(self):
""" Take the parsed stats from the STAR report and add them to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['uniquely_mapped_percent'] = {
'title': '% Aligned',
'description': '% Uniquely mapped reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
headers['uniquely_mapped'] = {
'title': '{} Aligned'.format(config.read_count_prefix),
'description': 'Uniquely mapped reads ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuRd',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
self.general_stats_addcols(self.star_data, headers) | [
"def",
"star_stats_table",
"(",
"self",
")",
":",
"headers",
"=",
"OrderedDict",
"(",
")",
"headers",
"[",
"'uniquely_mapped_percent'",
"]",
"=",
"{",
"'title'",
":",
"'% Aligned'",
",",
"'description'",
":",
"'% Uniquely mapped reads'",
",",
"'max'",
":",
"100",
",",
"'min'",
":",
"0",
",",
"'suffix'",
":",
"'%'",
",",
"'scale'",
":",
"'YlGn'",
"}",
"headers",
"[",
"'uniquely_mapped'",
"]",
"=",
"{",
"'title'",
":",
"'{} Aligned'",
".",
"format",
"(",
"config",
".",
"read_count_prefix",
")",
",",
"'description'",
":",
"'Uniquely mapped reads ({})'",
".",
"format",
"(",
"config",
".",
"read_count_desc",
")",
",",
"'min'",
":",
"0",
",",
"'scale'",
":",
"'PuRd'",
",",
"'modify'",
":",
"lambda",
"x",
":",
"x",
"*",
"config",
".",
"read_count_multiplier",
",",
"'shared_key'",
":",
"'read_count'",
"}",
"self",
".",
"general_stats_addcols",
"(",
"self",
".",
"star_data",
",",
"headers",
")"
] | Take the parsed stats from the STAR report and add them to the
basic stats table at the top of the report | [
"Take",
"the",
"parsed",
"stats",
"from",
"the",
"STAR",
"report",
"and",
"add",
"them",
"to",
"the",
"basic",
"stats",
"table",
"at",
"the",
"top",
"of",
"the",
"report"
] | python | train | 38 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L337-L356 | def get_capabilities_by_ext(self, strict_type_matching: bool = False) -> Dict[str, Dict[Type, Dict[str, Parser]]]:
"""
For all extensions that are supported,
lists all types that can be parsed from this extension.
For each type, provide the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return:
"""
check_var(strict_type_matching, var_types=bool, var_name='strict_matching')
res = dict()
# For all extensions that are supported,
for ext in self.get_all_supported_exts_for_type(type_to_match=JOKER, strict=strict_type_matching):
res[ext] = self.get_capabilities_for_ext(ext, strict_type_matching)
return res | [
"def",
"get_capabilities_by_ext",
"(",
"self",
",",
"strict_type_matching",
":",
"bool",
"=",
"False",
")",
"->",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"Type",
",",
"Dict",
"[",
"str",
",",
"Parser",
"]",
"]",
"]",
":",
"check_var",
"(",
"strict_type_matching",
",",
"var_types",
"=",
"bool",
",",
"var_name",
"=",
"'strict_matching'",
")",
"res",
"=",
"dict",
"(",
")",
"# For all extensions that are supported,",
"for",
"ext",
"in",
"self",
".",
"get_all_supported_exts_for_type",
"(",
"type_to_match",
"=",
"JOKER",
",",
"strict",
"=",
"strict_type_matching",
")",
":",
"res",
"[",
"ext",
"]",
"=",
"self",
".",
"get_capabilities_for_ext",
"(",
"ext",
",",
"strict_type_matching",
")",
"return",
"res"
] | For all extensions that are supported,
lists all types that can be parsed from this extension.
For each type, provide the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return: | [
"For",
"all",
"extensions",
"that",
"are",
"supported",
"lists",
"all",
"types",
"that",
"can",
"be",
"parsed",
"from",
"this",
"extension",
".",
"For",
"each",
"type",
"provide",
"the",
"list",
"of",
"parsers",
"supported",
".",
"The",
"order",
"is",
"most",
"pertinent",
"first"
] | python | train | 45.95 |
fulfilio/python-magento | magento/utils.py | https://github.com/fulfilio/python-magento/blob/720ec136a6e438a9ee4ee92848a9820b91732750/magento/utils.py#L12-L26 | def expand_url(url, protocol):
"""
Expands the given URL to a full URL by adding
the magento soap/wsdl parts
:param url: URL to be expanded
:param service: 'xmlrpc' or 'soap'
"""
if protocol == 'soap':
ws_part = 'api/?wsdl'
elif protocol == 'xmlrpc':
ws_part = 'index.php/api/xmlrpc'
else:
ws_part = 'index.php/rest/V1'
return url.endswith('/') and url + ws_part or url + '/' + ws_part | [
"def",
"expand_url",
"(",
"url",
",",
"protocol",
")",
":",
"if",
"protocol",
"==",
"'soap'",
":",
"ws_part",
"=",
"'api/?wsdl'",
"elif",
"protocol",
"==",
"'xmlrpc'",
":",
"ws_part",
"=",
"'index.php/api/xmlrpc'",
"else",
":",
"ws_part",
"=",
"'index.php/rest/V1'",
"return",
"url",
".",
"endswith",
"(",
"'/'",
")",
"and",
"url",
"+",
"ws_part",
"or",
"url",
"+",
"'/'",
"+",
"ws_part"
] | Expands the given URL to a full URL by adding
the magento soap/wsdl parts
:param url: URL to be expanded
:param service: 'xmlrpc' or 'soap' | [
"Expands",
"the",
"given",
"URL",
"to",
"a",
"full",
"URL",
"by",
"adding",
"the",
"magento",
"soap",
"/",
"wsdl",
"parts"
] | python | train | 29.066667 |
blue-yonder/tsfresh | tsfresh/utilities/dataframe_functions.py | https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/utilities/dataframe_functions.py#L89-L147 | def impute_dataframe_range(df_impute, col_to_max, col_to_min, col_to_median):
"""
Columnwise replaces all ``NaNs``, ``-inf`` and ``+inf`` from the DataFrame `df_impute` with average/extreme values
from the provided dictionaries.
This is done as follows: Each occurring ``inf`` or ``NaN`` in `df_impute` is replaced by
* ``-inf`` -> by value in col_to_min
* ``+inf`` -> by value in col_to_max
* ``NaN`` -> by value in col_to_median
If a column of df_impute is not found in the one of the dictionaries, this method will raise a ValueError.
Also, if one of the values to replace is not finite a ValueError is returned
This function modifies `df_impute` in place. Afterwards df_impute is
guaranteed to not contain any non-finite values.
Also, all columns will be guaranteed to be of type ``np.float64``.
:param df_impute: DataFrame to impute
:type df_impute: pandas.DataFrame
:param col_to_max: Dictionary mapping column names to max values
:type col_to_max: dict
:param col_to_min: Dictionary mapping column names to min values
:type col_to_max: dict
:param col_to_median: Dictionary mapping column names to median values
:type col_to_max: dict
:return df_impute: imputed DataFrame
:rtype df_impute: pandas.DataFrame
:raise ValueError: if a column of df_impute is missing in col_to_max, col_to_min or col_to_median or a value
to replace is non finite
"""
columns = df_impute.columns
# Making sure col_to_median, col_to_max and col_to_min have entries for every column
if not set(columns) <= set(col_to_median.keys()) or \
not set(columns) <= set(col_to_max.keys()) or \
not set(columns) <= set(col_to_min.keys()):
raise ValueError("Some of the dictionaries col_to_median, col_to_max, col_to_min contains more or less keys "
"than the column names in df")
# check if there are non finite values for the replacement
if np.any(~np.isfinite(list(col_to_median.values()))) or \
np.any(~np.isfinite(list(col_to_min.values()))) or \
np.any(~np.isfinite(list(col_to_max.values()))):
raise ValueError("Some of the dictionaries col_to_median, col_to_max, col_to_min contains non finite values "
"to replace")
# Make the replacement dataframes as large as the real one
col_to_max = pd.DataFrame([col_to_max]*len(df_impute), index=df_impute.index)
col_to_min = pd.DataFrame([col_to_min]*len(df_impute), index=df_impute.index)
col_to_median = pd.DataFrame([col_to_median]*len(df_impute), index=df_impute.index)
df_impute.where(df_impute.values != np.PINF, other=col_to_max, inplace=True)
df_impute.where(df_impute.values != np.NINF, other=col_to_min, inplace=True)
df_impute.where(~np.isnan(df_impute.values), other=col_to_median, inplace=True)
df_impute.astype(np.float64, copy=False)
return df_impute | [
"def",
"impute_dataframe_range",
"(",
"df_impute",
",",
"col_to_max",
",",
"col_to_min",
",",
"col_to_median",
")",
":",
"columns",
"=",
"df_impute",
".",
"columns",
"# Making sure col_to_median, col_to_max and col_to_min have entries for every column",
"if",
"not",
"set",
"(",
"columns",
")",
"<=",
"set",
"(",
"col_to_median",
".",
"keys",
"(",
")",
")",
"or",
"not",
"set",
"(",
"columns",
")",
"<=",
"set",
"(",
"col_to_max",
".",
"keys",
"(",
")",
")",
"or",
"not",
"set",
"(",
"columns",
")",
"<=",
"set",
"(",
"col_to_min",
".",
"keys",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Some of the dictionaries col_to_median, col_to_max, col_to_min contains more or less keys \"",
"\"than the column names in df\"",
")",
"# check if there are non finite values for the replacement",
"if",
"np",
".",
"any",
"(",
"~",
"np",
".",
"isfinite",
"(",
"list",
"(",
"col_to_median",
".",
"values",
"(",
")",
")",
")",
")",
"or",
"np",
".",
"any",
"(",
"~",
"np",
".",
"isfinite",
"(",
"list",
"(",
"col_to_min",
".",
"values",
"(",
")",
")",
")",
")",
"or",
"np",
".",
"any",
"(",
"~",
"np",
".",
"isfinite",
"(",
"list",
"(",
"col_to_max",
".",
"values",
"(",
")",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Some of the dictionaries col_to_median, col_to_max, col_to_min contains non finite values \"",
"\"to replace\"",
")",
"# Make the replacement dataframes as large as the real one",
"col_to_max",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"col_to_max",
"]",
"*",
"len",
"(",
"df_impute",
")",
",",
"index",
"=",
"df_impute",
".",
"index",
")",
"col_to_min",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"col_to_min",
"]",
"*",
"len",
"(",
"df_impute",
")",
",",
"index",
"=",
"df_impute",
".",
"index",
")",
"col_to_median",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"col_to_median",
"]",
"*",
"len",
"(",
"df_impute",
")",
",",
"index",
"=",
"df_impute",
".",
"index",
")",
"df_impute",
".",
"where",
"(",
"df_impute",
".",
"values",
"!=",
"np",
".",
"PINF",
",",
"other",
"=",
"col_to_max",
",",
"inplace",
"=",
"True",
")",
"df_impute",
".",
"where",
"(",
"df_impute",
".",
"values",
"!=",
"np",
".",
"NINF",
",",
"other",
"=",
"col_to_min",
",",
"inplace",
"=",
"True",
")",
"df_impute",
".",
"where",
"(",
"~",
"np",
".",
"isnan",
"(",
"df_impute",
".",
"values",
")",
",",
"other",
"=",
"col_to_median",
",",
"inplace",
"=",
"True",
")",
"df_impute",
".",
"astype",
"(",
"np",
".",
"float64",
",",
"copy",
"=",
"False",
")",
"return",
"df_impute"
] | Columnwise replaces all ``NaNs``, ``-inf`` and ``+inf`` from the DataFrame `df_impute` with average/extreme values
from the provided dictionaries.
This is done as follows: Each occurring ``inf`` or ``NaN`` in `df_impute` is replaced by
* ``-inf`` -> by value in col_to_min
* ``+inf`` -> by value in col_to_max
* ``NaN`` -> by value in col_to_median
If a column of df_impute is not found in the one of the dictionaries, this method will raise a ValueError.
Also, if one of the values to replace is not finite a ValueError is returned
This function modifies `df_impute` in place. Afterwards df_impute is
guaranteed to not contain any non-finite values.
Also, all columns will be guaranteed to be of type ``np.float64``.
:param df_impute: DataFrame to impute
:type df_impute: pandas.DataFrame
:param col_to_max: Dictionary mapping column names to max values
:type col_to_max: dict
:param col_to_min: Dictionary mapping column names to min values
:type col_to_max: dict
:param col_to_median: Dictionary mapping column names to median values
:type col_to_max: dict
:return df_impute: imputed DataFrame
:rtype df_impute: pandas.DataFrame
:raise ValueError: if a column of df_impute is missing in col_to_max, col_to_min or col_to_median or a value
to replace is non finite | [
"Columnwise",
"replaces",
"all",
"NaNs",
"-",
"inf",
"and",
"+",
"inf",
"from",
"the",
"DataFrame",
"df_impute",
"with",
"average",
"/",
"extreme",
"values",
"from",
"the",
"provided",
"dictionaries",
"."
] | python | train | 49.830508 |
kmmbvnr/django-any | django_any/forms.py | https://github.com/kmmbvnr/django-any/blob/6f64ebd05476e2149e2e71deeefbb10f8edfc412/django_any/forms.py#L353-L371 | def multiple_choice_field_data(field, **kwargs):
"""
Return random value for MultipleChoiceField
>>> CHOICES = [('YNG', 'Child'), ('MIDDLE', 'Parent') ,('OLD', 'GrandParent')]
>>> result = any_form_field(forms.MultipleChoiceField(choices=CHOICES))
>>> type(result)
<type 'str'>
"""
if field.choices:
from django_any.functions import valid_choices
l = list(valid_choices(field.choices))
random.shuffle(l)
choices = []
count = xunit.any_int(min_value=1, max_value=len(field.choices))
for i in xrange(0, count):
choices.append(l[i])
return ' '.join(choices)
return 'None' | [
"def",
"multiple_choice_field_data",
"(",
"field",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"field",
".",
"choices",
":",
"from",
"django_any",
".",
"functions",
"import",
"valid_choices",
"l",
"=",
"list",
"(",
"valid_choices",
"(",
"field",
".",
"choices",
")",
")",
"random",
".",
"shuffle",
"(",
"l",
")",
"choices",
"=",
"[",
"]",
"count",
"=",
"xunit",
".",
"any_int",
"(",
"min_value",
"=",
"1",
",",
"max_value",
"=",
"len",
"(",
"field",
".",
"choices",
")",
")",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"count",
")",
":",
"choices",
".",
"append",
"(",
"l",
"[",
"i",
"]",
")",
"return",
"' '",
".",
"join",
"(",
"choices",
")",
"return",
"'None'"
] | Return random value for MultipleChoiceField
>>> CHOICES = [('YNG', 'Child'), ('MIDDLE', 'Parent') ,('OLD', 'GrandParent')]
>>> result = any_form_field(forms.MultipleChoiceField(choices=CHOICES))
>>> type(result)
<type 'str'> | [
"Return",
"random",
"value",
"for",
"MultipleChoiceField"
] | python | test | 34.526316 |
b3j0f/utils | b3j0f/utils/property.py | https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/property.py#L187-L208 | def get_properties(elt, keys=None, ctx=None):
"""Get elt properties.
:param elt: properties elt. Not None methods or unhashable types.
:param keys: key(s) of properties to get from elt.
If None, get all properties.
:type keys: list or str
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
:return: list of properties by elt and name.
:rtype: list
"""
# initialize keys if str
if isinstance(keys, string_types):
keys = (keys,)
result = _get_properties(elt, keys=keys, local=False, ctx=ctx)
return result | [
"def",
"get_properties",
"(",
"elt",
",",
"keys",
"=",
"None",
",",
"ctx",
"=",
"None",
")",
":",
"# initialize keys if str",
"if",
"isinstance",
"(",
"keys",
",",
"string_types",
")",
":",
"keys",
"=",
"(",
"keys",
",",
")",
"result",
"=",
"_get_properties",
"(",
"elt",
",",
"keys",
"=",
"keys",
",",
"local",
"=",
"False",
",",
"ctx",
"=",
"ctx",
")",
"return",
"result"
] | Get elt properties.
:param elt: properties elt. Not None methods or unhashable types.
:param keys: key(s) of properties to get from elt.
If None, get all properties.
:type keys: list or str
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
:return: list of properties by elt and name.
:rtype: list | [
"Get",
"elt",
"properties",
"."
] | python | train | 31.681818 |
pixelogik/NearPy | nearpy/experiments/recallprecisionexperiment.py | https://github.com/pixelogik/NearPy/blob/1b534b864d320d875508e95cd2b76b6d8c07a90b/nearpy/experiments/recallprecisionexperiment.py#L105-L200 | def perform_experiment(self, engine_list):
"""
Performs nearest neighbour recall experiments with custom vector data
for all engines in the specified list.
Returns self.result contains list of (recall, precision, search_time)
tuple. All are the averaged values over all request vectors.
search_time is the average retrieval/search time compared to the
average exact search time.
"""
# We will fill this array with measures for all the engines.
result = []
# For each engine, first index vectors and then retrieve neighbours
for endine_idx, engine in enumerate(engine_list):
print('Engine %d / %d' % (endine_idx, len(engine_list)))
# Clean storage
engine.clean_all_buckets()
# Use this to compute average recall
avg_recall = 0.0
# Use this to compute average precision
avg_precision = 0.0
# Use this to compute average search time
avg_search_time = 0.0
# Index all vectors and store them
for index, v in enumerate(self.vectors):
engine.store_vector(v, 'data_%d' % index)
# Look for N nearest neighbours for query vectors
for index in self.query_indices:
# Get indices of the real nearest as set
real_nearest = set(self.closest[index])
# We have to time the search
search_time_start = time.time()
# Get nearest N according to engine
nearest = engine.neighbours(self.vectors[index])
# Get search time
search_time = time.time() - search_time_start
# For comparance we need their indices (as set)
nearest = set([self.__index_of_vector(x[0]) for x in nearest])
# Remove query index from search result to make sure that
# recall and precision make sense in terms of "neighbours".
# If ONLY the query vector is retrieved, we want recall to be
# zero!
nearest.remove(index)
# If the result list is empty, recall and precision are 0.0
if len(nearest) == 0:
recall = 0.0
precision = 0.0
else:
# Get intersection count
inter_count = float(len(real_nearest & nearest))
# Normalize recall for this vector
recall = inter_count/float(len(real_nearest))
# Normalize precision for this vector
precision = inter_count/float(len(nearest))
# Add to accumulator
avg_recall += recall
# Add to accumulator
avg_precision += precision
# Add to accumulator
avg_search_time += search_time
# Normalize recall over query set
avg_recall /= float(len(self.query_indices))
# Normalize precision over query set
avg_precision /= float(len(self.query_indices))
# Normalize search time over query set
avg_search_time = avg_search_time / float(len(self.query_indices))
# Normalize search time with respect to exact search
avg_search_time /= self.exact_search_time_per_vector
print(' recall=%f, precision=%f, time=%f' % (avg_recall,
avg_precision,
avg_search_time))
result.append((avg_recall, avg_precision, avg_search_time))
# Return (recall, precision, search_time) tuple
return result | [
"def",
"perform_experiment",
"(",
"self",
",",
"engine_list",
")",
":",
"# We will fill this array with measures for all the engines.",
"result",
"=",
"[",
"]",
"# For each engine, first index vectors and then retrieve neighbours",
"for",
"endine_idx",
",",
"engine",
"in",
"enumerate",
"(",
"engine_list",
")",
":",
"print",
"(",
"'Engine %d / %d'",
"%",
"(",
"endine_idx",
",",
"len",
"(",
"engine_list",
")",
")",
")",
"# Clean storage",
"engine",
".",
"clean_all_buckets",
"(",
")",
"# Use this to compute average recall",
"avg_recall",
"=",
"0.0",
"# Use this to compute average precision",
"avg_precision",
"=",
"0.0",
"# Use this to compute average search time",
"avg_search_time",
"=",
"0.0",
"# Index all vectors and store them",
"for",
"index",
",",
"v",
"in",
"enumerate",
"(",
"self",
".",
"vectors",
")",
":",
"engine",
".",
"store_vector",
"(",
"v",
",",
"'data_%d'",
"%",
"index",
")",
"# Look for N nearest neighbours for query vectors",
"for",
"index",
"in",
"self",
".",
"query_indices",
":",
"# Get indices of the real nearest as set",
"real_nearest",
"=",
"set",
"(",
"self",
".",
"closest",
"[",
"index",
"]",
")",
"# We have to time the search",
"search_time_start",
"=",
"time",
".",
"time",
"(",
")",
"# Get nearest N according to engine",
"nearest",
"=",
"engine",
".",
"neighbours",
"(",
"self",
".",
"vectors",
"[",
"index",
"]",
")",
"# Get search time",
"search_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"search_time_start",
"# For comparance we need their indices (as set)",
"nearest",
"=",
"set",
"(",
"[",
"self",
".",
"__index_of_vector",
"(",
"x",
"[",
"0",
"]",
")",
"for",
"x",
"in",
"nearest",
"]",
")",
"# Remove query index from search result to make sure that",
"# recall and precision make sense in terms of \"neighbours\".",
"# If ONLY the query vector is retrieved, we want recall to be",
"# zero!",
"nearest",
".",
"remove",
"(",
"index",
")",
"# If the result list is empty, recall and precision are 0.0",
"if",
"len",
"(",
"nearest",
")",
"==",
"0",
":",
"recall",
"=",
"0.0",
"precision",
"=",
"0.0",
"else",
":",
"# Get intersection count",
"inter_count",
"=",
"float",
"(",
"len",
"(",
"real_nearest",
"&",
"nearest",
")",
")",
"# Normalize recall for this vector",
"recall",
"=",
"inter_count",
"/",
"float",
"(",
"len",
"(",
"real_nearest",
")",
")",
"# Normalize precision for this vector",
"precision",
"=",
"inter_count",
"/",
"float",
"(",
"len",
"(",
"nearest",
")",
")",
"# Add to accumulator",
"avg_recall",
"+=",
"recall",
"# Add to accumulator",
"avg_precision",
"+=",
"precision",
"# Add to accumulator",
"avg_search_time",
"+=",
"search_time",
"# Normalize recall over query set",
"avg_recall",
"/=",
"float",
"(",
"len",
"(",
"self",
".",
"query_indices",
")",
")",
"# Normalize precision over query set",
"avg_precision",
"/=",
"float",
"(",
"len",
"(",
"self",
".",
"query_indices",
")",
")",
"# Normalize search time over query set",
"avg_search_time",
"=",
"avg_search_time",
"/",
"float",
"(",
"len",
"(",
"self",
".",
"query_indices",
")",
")",
"# Normalize search time with respect to exact search",
"avg_search_time",
"/=",
"self",
".",
"exact_search_time_per_vector",
"print",
"(",
"' recall=%f, precision=%f, time=%f'",
"%",
"(",
"avg_recall",
",",
"avg_precision",
",",
"avg_search_time",
")",
")",
"result",
".",
"append",
"(",
"(",
"avg_recall",
",",
"avg_precision",
",",
"avg_search_time",
")",
")",
"# Return (recall, precision, search_time) tuple",
"return",
"result"
] | Performs nearest neighbour recall experiments with custom vector data
for all engines in the specified list.
Returns self.result contains list of (recall, precision, search_time)
tuple. All are the averaged values over all request vectors.
search_time is the average retrieval/search time compared to the
average exact search time. | [
"Performs",
"nearest",
"neighbour",
"recall",
"experiments",
"with",
"custom",
"vector",
"data",
"for",
"all",
"engines",
"in",
"the",
"specified",
"list",
"."
] | python | train | 38.96875 |
PGower/PyCanvas | builder.py | https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/builder.py#L133-L140 | def build_model_classes(metadata):
"""Generate a model class for any models contained in the specified spec file."""
i = importlib.import_module(metadata)
env = get_jinja_env()
model_template = env.get_template('model.py.jinja2')
for model in i.models:
with open(model_path(model.name.lower()), 'w') as t:
t.write(model_template.render(model_md=model)) | [
"def",
"build_model_classes",
"(",
"metadata",
")",
":",
"i",
"=",
"importlib",
".",
"import_module",
"(",
"metadata",
")",
"env",
"=",
"get_jinja_env",
"(",
")",
"model_template",
"=",
"env",
".",
"get_template",
"(",
"'model.py.jinja2'",
")",
"for",
"model",
"in",
"i",
".",
"models",
":",
"with",
"open",
"(",
"model_path",
"(",
"model",
".",
"name",
".",
"lower",
"(",
")",
")",
",",
"'w'",
")",
"as",
"t",
":",
"t",
".",
"write",
"(",
"model_template",
".",
"render",
"(",
"model_md",
"=",
"model",
")",
")"
] | Generate a model class for any models contained in the specified spec file. | [
"Generate",
"a",
"model",
"class",
"for",
"any",
"models",
"contained",
"in",
"the",
"specified",
"spec",
"file",
"."
] | python | train | 49 |
ggaughan/pipe2py | pipe2py/lib/utils.py | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/utils.py#L241-L262 | def dispatch(splits, *funcs, **kwargs):
"""takes multiple iterables (returned by dispatch or broadcast) and delivers
the items to multiple functions
/-----> _INPUT1 --> double(_INPUT1) --> \
/ \
splits ------> _INPUT2 --> triple(_INPUT2) ---> _OUTPUT
\ /
\--> _INPUT3 --> quadruple(_INPUT3) --> /
One way to construct such a flow in code would be::
splits = repeat(('bar', 'baz', 'qux'), 3)
double = lambda word: word * 2
triple = lambda word: word * 3
quadruple = lambda word: word * 4
_OUTPUT = dispatch(splits, double, triple, quadruple)
_OUTPUT == repeat(('barbar', 'bazbazbaz', 'quxquxquxqux'), 3)
"""
map_func = kwargs.get('map_func', _map_func)
apply_func = kwargs.get('apply_func', _apply_func)
return map_func(partial(apply_func, funcs), splits) | [
"def",
"dispatch",
"(",
"splits",
",",
"*",
"funcs",
",",
"*",
"*",
"kwargs",
")",
":",
"map_func",
"=",
"kwargs",
".",
"get",
"(",
"'map_func'",
",",
"_map_func",
")",
"apply_func",
"=",
"kwargs",
".",
"get",
"(",
"'apply_func'",
",",
"_apply_func",
")",
"return",
"map_func",
"(",
"partial",
"(",
"apply_func",
",",
"funcs",
")",
",",
"splits",
")"
] | takes multiple iterables (returned by dispatch or broadcast) and delivers
the items to multiple functions
/-----> _INPUT1 --> double(_INPUT1) --> \
/ \
splits ------> _INPUT2 --> triple(_INPUT2) ---> _OUTPUT
\ /
\--> _INPUT3 --> quadruple(_INPUT3) --> /
One way to construct such a flow in code would be::
splits = repeat(('bar', 'baz', 'qux'), 3)
double = lambda word: word * 2
triple = lambda word: word * 3
quadruple = lambda word: word * 4
_OUTPUT = dispatch(splits, double, triple, quadruple)
_OUTPUT == repeat(('barbar', 'bazbazbaz', 'quxquxquxqux'), 3) | [
"takes",
"multiple",
"iterables",
"(",
"returned",
"by",
"dispatch",
"or",
"broadcast",
")",
"and",
"delivers",
"the",
"items",
"to",
"multiple",
"functions"
] | python | train | 42.636364 |
penguinmenac3/starttf | starttf/estimators/tf_estimator.py | https://github.com/penguinmenac3/starttf/blob/f4086489d169757c0504e822165db2fea534b944/starttf/estimators/tf_estimator.py#L204-L224 | def create_prediction_estimator(hyper_params, model, checkpoint_path=None):
"""
Create an estimator for prediction purpose only.
:param hyper_params: The hyper params file.
:param model: The keras model.
:param checkpoint_path: (Optional) Path to the specific checkpoint to use.
:return:
"""
if checkpoint_path is None:
chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)])
checkpoint_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1]
print("Latest found checkpoint: {}".format(checkpoint_path))
estimator_spec = create_tf_estimator_spec(checkpoint_path, model, create_loss=None)
# Create the estimator.
estimator = tf.estimator.Estimator(estimator_spec,
model_dir=checkpoint_path,
params=hyper_params)
return estimator | [
"def",
"create_prediction_estimator",
"(",
"hyper_params",
",",
"model",
",",
"checkpoint_path",
"=",
"None",
")",
":",
"if",
"checkpoint_path",
"is",
"None",
":",
"chkpts",
"=",
"sorted",
"(",
"[",
"name",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"hyper_params",
".",
"train",
".",
"checkpoint_path",
")",
"]",
")",
"checkpoint_path",
"=",
"hyper_params",
".",
"train",
".",
"checkpoint_path",
"+",
"\"/\"",
"+",
"chkpts",
"[",
"-",
"1",
"]",
"print",
"(",
"\"Latest found checkpoint: {}\"",
".",
"format",
"(",
"checkpoint_path",
")",
")",
"estimator_spec",
"=",
"create_tf_estimator_spec",
"(",
"checkpoint_path",
",",
"model",
",",
"create_loss",
"=",
"None",
")",
"# Create the estimator.",
"estimator",
"=",
"tf",
".",
"estimator",
".",
"Estimator",
"(",
"estimator_spec",
",",
"model_dir",
"=",
"checkpoint_path",
",",
"params",
"=",
"hyper_params",
")",
"return",
"estimator"
] | Create an estimator for prediction purpose only.
:param hyper_params: The hyper params file.
:param model: The keras model.
:param checkpoint_path: (Optional) Path to the specific checkpoint to use.
:return: | [
"Create",
"an",
"estimator",
"for",
"prediction",
"purpose",
"only",
".",
":",
"param",
"hyper_params",
":",
"The",
"hyper",
"params",
"file",
".",
":",
"param",
"model",
":",
"The",
"keras",
"model",
".",
":",
"param",
"checkpoint_path",
":",
"(",
"Optional",
")",
"Path",
"to",
"the",
"specific",
"checkpoint",
"to",
"use",
".",
":",
"return",
":"
] | python | train | 42.47619 |
edx/edx-enterprise | enterprise/views.py | https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/views.py#L183-L189 | def course_or_program_exist(self, course_id, program_uuid):
"""
Return whether the input course or program exist.
"""
course_exists = course_id and CourseApiClient().get_course_details(course_id)
program_exists = program_uuid and CourseCatalogApiServiceClient().program_exists(program_uuid)
return course_exists or program_exists | [
"def",
"course_or_program_exist",
"(",
"self",
",",
"course_id",
",",
"program_uuid",
")",
":",
"course_exists",
"=",
"course_id",
"and",
"CourseApiClient",
"(",
")",
".",
"get_course_details",
"(",
"course_id",
")",
"program_exists",
"=",
"program_uuid",
"and",
"CourseCatalogApiServiceClient",
"(",
")",
".",
"program_exists",
"(",
"program_uuid",
")",
"return",
"course_exists",
"or",
"program_exists"
] | Return whether the input course or program exist. | [
"Return",
"whether",
"the",
"input",
"course",
"or",
"program",
"exist",
"."
] | python | valid | 53 |
Jajcus/pyxmpp2 | pyxmpp2/cache.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/cache.py#L506-L528 | def get_item(self, address, state = 'fresh'):
"""Get an item from the cache.
:Parameters:
- `address`: its address.
- `state`: the worst state that is acceptable.
:Types:
- `address`: any hashable
- `state`: `str`
:return: the item or `None` if it was not found.
:returntype: `CacheItem`"""
self._lock.acquire()
try:
item = self._items.get(address)
if not item:
return None
self.update_item(item)
if _state_values[state] >= item.state_value:
return item
return None
finally:
self._lock.release() | [
"def",
"get_item",
"(",
"self",
",",
"address",
",",
"state",
"=",
"'fresh'",
")",
":",
"self",
".",
"_lock",
".",
"acquire",
"(",
")",
"try",
":",
"item",
"=",
"self",
".",
"_items",
".",
"get",
"(",
"address",
")",
"if",
"not",
"item",
":",
"return",
"None",
"self",
".",
"update_item",
"(",
"item",
")",
"if",
"_state_values",
"[",
"state",
"]",
">=",
"item",
".",
"state_value",
":",
"return",
"item",
"return",
"None",
"finally",
":",
"self",
".",
"_lock",
".",
"release",
"(",
")"
] | Get an item from the cache.
:Parameters:
- `address`: its address.
- `state`: the worst state that is acceptable.
:Types:
- `address`: any hashable
- `state`: `str`
:return: the item or `None` if it was not found.
:returntype: `CacheItem` | [
"Get",
"an",
"item",
"from",
"the",
"cache",
"."
] | python | valid | 30.043478 |
joeyespo/grip | grip/renderers.py | https://github.com/joeyespo/grip/blob/ce933ccc4ca8e0d3718f271c59bd530a4518bf63/grip/renderers.py#L52-L83 | def render(self, text, auth=None):
"""
Renders the specified markdown content and embedded styles.
Raises TypeError if text is not a Unicode string.
Raises requests.HTTPError if the request fails.
"""
# Ensure text is Unicode
expected = str if sys.version_info[0] >= 3 else unicode # noqa
if not isinstance(text, expected):
raise TypeError(
'Expected a Unicode string, got {!r}.'.format(text))
if self.user_content:
url = '{0}/markdown'.format(self.api_url)
data = {'text': text, 'mode': 'gfm'}
if self.context:
data['context'] = self.context
data = json.dumps(data, ensure_ascii=False).encode('utf-8')
headers = {'content-type': 'application/json; charset=UTF-8'}
else:
url = '{0}/markdown/raw'.format(self.api_url)
data = text.encode('utf-8')
headers = {'content-type': 'text/x-markdown; charset=UTF-8'}
r = requests.post(url, headers=headers, data=data, auth=auth)
r.raise_for_status()
# FUTURE: Remove this once GitHub API properly handles Unicode markdown
r.encoding = 'utf-8'
return r.text if self.raw else patch(r.text) | [
"def",
"render",
"(",
"self",
",",
"text",
",",
"auth",
"=",
"None",
")",
":",
"# Ensure text is Unicode",
"expected",
"=",
"str",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
"else",
"unicode",
"# noqa",
"if",
"not",
"isinstance",
"(",
"text",
",",
"expected",
")",
":",
"raise",
"TypeError",
"(",
"'Expected a Unicode string, got {!r}.'",
".",
"format",
"(",
"text",
")",
")",
"if",
"self",
".",
"user_content",
":",
"url",
"=",
"'{0}/markdown'",
".",
"format",
"(",
"self",
".",
"api_url",
")",
"data",
"=",
"{",
"'text'",
":",
"text",
",",
"'mode'",
":",
"'gfm'",
"}",
"if",
"self",
".",
"context",
":",
"data",
"[",
"'context'",
"]",
"=",
"self",
".",
"context",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"ensure_ascii",
"=",
"False",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json; charset=UTF-8'",
"}",
"else",
":",
"url",
"=",
"'{0}/markdown/raw'",
".",
"format",
"(",
"self",
".",
"api_url",
")",
"data",
"=",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
"headers",
"=",
"{",
"'content-type'",
":",
"'text/x-markdown; charset=UTF-8'",
"}",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"data",
",",
"auth",
"=",
"auth",
")",
"r",
".",
"raise_for_status",
"(",
")",
"# FUTURE: Remove this once GitHub API properly handles Unicode markdown",
"r",
".",
"encoding",
"=",
"'utf-8'",
"return",
"r",
".",
"text",
"if",
"self",
".",
"raw",
"else",
"patch",
"(",
"r",
".",
"text",
")"
] | Renders the specified markdown content and embedded styles.
Raises TypeError if text is not a Unicode string.
Raises requests.HTTPError if the request fails. | [
"Renders",
"the",
"specified",
"markdown",
"content",
"and",
"embedded",
"styles",
"."
] | python | train | 39.40625 |
s1s1ty/py-jsonq | pyjsonq/query.py | https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L46-L60 | def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data) | [
"def",
"__parse_json_file",
"(",
"self",
",",
"file_path",
")",
":",
"if",
"file_path",
"==",
"''",
"or",
"os",
".",
"path",
".",
"splitext",
"(",
"file_path",
")",
"[",
"1",
"]",
"!=",
"'.json'",
":",
"raise",
"IOError",
"(",
"'Invalid Json file'",
")",
"with",
"open",
"(",
"file_path",
")",
"as",
"json_file",
":",
"self",
".",
"_raw_data",
"=",
"json",
".",
"load",
"(",
"json_file",
")",
"self",
".",
"_json_data",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_raw_data",
")"
] | Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError | [
"Process",
"Json",
"file",
"data"
] | python | train | 28.533333 |
sony/nnabla | python/src/nnabla/utils/data_iterator.py | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/data_iterator.py#L199-L228 | def next(self):
'''next
It generates tuple of data.
For example,
if :py:meth:`self._variables == ('x', 'y')`
This method returns :py:meth:` ( [[X] * batch_size], [[Y] * batch_size] )`
Returns:
tuple: tuple of data for mini-batch in numpy.ndarray.
'''
if self._use_thread:
# Wait for finish previous thread.
self._next_thread.join()
if self._current_data is None:
logger.log(99, 'next() got None retrying.')
self._next_thread = threading.Thread(target=self._next)
self._next_thread.start()
self._next_thread.join()
self._current_epoch, data = self._current_data
# Start next thread.
self._next_thread = threading.Thread(target=self._next)
self._next_thread.start()
else:
self._next()
self._current_epoch, data = self._current_data
return data | [
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"_use_thread",
":",
"# Wait for finish previous thread.",
"self",
".",
"_next_thread",
".",
"join",
"(",
")",
"if",
"self",
".",
"_current_data",
"is",
"None",
":",
"logger",
".",
"log",
"(",
"99",
",",
"'next() got None retrying.'",
")",
"self",
".",
"_next_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_next",
")",
"self",
".",
"_next_thread",
".",
"start",
"(",
")",
"self",
".",
"_next_thread",
".",
"join",
"(",
")",
"self",
".",
"_current_epoch",
",",
"data",
"=",
"self",
".",
"_current_data",
"# Start next thread.",
"self",
".",
"_next_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_next",
")",
"self",
".",
"_next_thread",
".",
"start",
"(",
")",
"else",
":",
"self",
".",
"_next",
"(",
")",
"self",
".",
"_current_epoch",
",",
"data",
"=",
"self",
".",
"_current_data",
"return",
"data"
] | next
It generates tuple of data.
For example,
if :py:meth:`self._variables == ('x', 'y')`
This method returns :py:meth:` ( [[X] * batch_size], [[Y] * batch_size] )`
Returns:
tuple: tuple of data for mini-batch in numpy.ndarray. | [
"next"
] | python | train | 32.7 |
rm-hull/luma.core | luma/core/interface/serial.py | https://github.com/rm-hull/luma.core/blob/034b628fb304a01e77732a299c0b42e94d6443db/luma/core/interface/serial.py#L72-L92 | def command(self, *cmd):
"""
Sends a command or sequence of commands through to the I²C address
- maximum allowed is 32 bytes in one go.
:param cmd: A spread of commands.
:type cmd: int
:raises luma.core.error.DeviceNotFoundError: I2C device could not be found.
"""
assert(len(cmd) <= 32)
try:
self._bus.write_i2c_block_data(self._addr, self._cmd_mode,
list(cmd))
except (IOError, OSError) as e:
if e.errno in [errno.EREMOTEIO, errno.EIO]:
# I/O error
raise luma.core.error.DeviceNotFoundError(
'I2C device not found on address: 0x{0:02X}'.format(self._addr))
else: # pragma: no cover
raise | [
"def",
"command",
"(",
"self",
",",
"*",
"cmd",
")",
":",
"assert",
"(",
"len",
"(",
"cmd",
")",
"<=",
"32",
")",
"try",
":",
"self",
".",
"_bus",
".",
"write_i2c_block_data",
"(",
"self",
".",
"_addr",
",",
"self",
".",
"_cmd_mode",
",",
"list",
"(",
"cmd",
")",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"in",
"[",
"errno",
".",
"EREMOTEIO",
",",
"errno",
".",
"EIO",
"]",
":",
"# I/O error",
"raise",
"luma",
".",
"core",
".",
"error",
".",
"DeviceNotFoundError",
"(",
"'I2C device not found on address: 0x{0:02X}'",
".",
"format",
"(",
"self",
".",
"_addr",
")",
")",
"else",
":",
"# pragma: no cover",
"raise"
] | Sends a command or sequence of commands through to the I²C address
- maximum allowed is 32 bytes in one go.
:param cmd: A spread of commands.
:type cmd: int
:raises luma.core.error.DeviceNotFoundError: I2C device could not be found. | [
"Sends",
"a",
"command",
"or",
"sequence",
"of",
"commands",
"through",
"to",
"the",
"I²C",
"address",
"-",
"maximum",
"allowed",
"is",
"32",
"bytes",
"in",
"one",
"go",
"."
] | python | train | 38.095238 |
dwavesystems/dwave_networkx | dwave_networkx/generators/markov.py | https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/generators/markov.py#L63-L126 | def markov_network(potentials):
"""Creates a Markov Network from potentials.
A Markov Network is also knows as a `Markov Random Field`_
Parameters
----------
potentials : dict[tuple, dict]
A dict where the keys are either nodes or edges and the values are a
dictionary of potentials. The potential dict should map each possible
assignment of the nodes/edges to their energy.
Returns
-------
MN : :obj:`networkx.Graph`
A markov network as a graph where each node/edge stores its potential
dict as above.
Examples
--------
>>> potentials = {('a', 'b'): {(0, 0): -1,
... (0, 1): .5,
... (1, 0): .5,
... (1, 1): 2}}
>>> MN = dnx.markov_network(potentials)
>>> MN['a']['b']['potential'][(0, 0)]
-1
.. _Markov Random Field: https://en.wikipedia.org/wiki/Markov_random_field
"""
G = nx.Graph()
G.name = 'markov_network({!r})'.format(potentials)
# we use 'clique' because the keys of potentials can be either nodes or
# edges, but in either case they are fully connected.
for clique, phis in potentials.items():
num_vars = len(clique)
# because this data potentially wont be used for a while, let's do some
# input checking now and save some debugging issues later
if not isinstance(phis, abc.Mapping):
raise TypeError("phis should be a dict")
elif not all(config in phis for config in itertools.product((0, 1), repeat=num_vars)):
raise ValueError("not all potentials provided for {!r}".format(clique))
if num_vars == 1:
u, = clique
G.add_node(u, potential=phis)
elif num_vars == 2:
u, v = clique
# in python<=3.5 the edge order might not be consistent so we store
# the relevant order of the variables relative to the potentials
G.add_edge(u, v, potential=phis, order=(u, v))
else:
# developer note: in principle supporting larger cliques can be done
# using higher-order, but it would make the use of networkx graphs
# far more difficult
raise ValueError("Only supports cliques up to size 2")
return G | [
"def",
"markov_network",
"(",
"potentials",
")",
":",
"G",
"=",
"nx",
".",
"Graph",
"(",
")",
"G",
".",
"name",
"=",
"'markov_network({!r})'",
".",
"format",
"(",
"potentials",
")",
"# we use 'clique' because the keys of potentials can be either nodes or",
"# edges, but in either case they are fully connected.",
"for",
"clique",
",",
"phis",
"in",
"potentials",
".",
"items",
"(",
")",
":",
"num_vars",
"=",
"len",
"(",
"clique",
")",
"# because this data potentially wont be used for a while, let's do some",
"# input checking now and save some debugging issues later",
"if",
"not",
"isinstance",
"(",
"phis",
",",
"abc",
".",
"Mapping",
")",
":",
"raise",
"TypeError",
"(",
"\"phis should be a dict\"",
")",
"elif",
"not",
"all",
"(",
"config",
"in",
"phis",
"for",
"config",
"in",
"itertools",
".",
"product",
"(",
"(",
"0",
",",
"1",
")",
",",
"repeat",
"=",
"num_vars",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"not all potentials provided for {!r}\"",
".",
"format",
"(",
"clique",
")",
")",
"if",
"num_vars",
"==",
"1",
":",
"u",
",",
"=",
"clique",
"G",
".",
"add_node",
"(",
"u",
",",
"potential",
"=",
"phis",
")",
"elif",
"num_vars",
"==",
"2",
":",
"u",
",",
"v",
"=",
"clique",
"# in python<=3.5 the edge order might not be consistent so we store",
"# the relevant order of the variables relative to the potentials",
"G",
".",
"add_edge",
"(",
"u",
",",
"v",
",",
"potential",
"=",
"phis",
",",
"order",
"=",
"(",
"u",
",",
"v",
")",
")",
"else",
":",
"# developer note: in principle supporting larger cliques can be done",
"# using higher-order, but it would make the use of networkx graphs",
"# far more difficult",
"raise",
"ValueError",
"(",
"\"Only supports cliques up to size 2\"",
")",
"return",
"G"
] | Creates a Markov Network from potentials.
A Markov Network is also knows as a `Markov Random Field`_
Parameters
----------
potentials : dict[tuple, dict]
A dict where the keys are either nodes or edges and the values are a
dictionary of potentials. The potential dict should map each possible
assignment of the nodes/edges to their energy.
Returns
-------
MN : :obj:`networkx.Graph`
A markov network as a graph where each node/edge stores its potential
dict as above.
Examples
--------
>>> potentials = {('a', 'b'): {(0, 0): -1,
... (0, 1): .5,
... (1, 0): .5,
... (1, 1): 2}}
>>> MN = dnx.markov_network(potentials)
>>> MN['a']['b']['potential'][(0, 0)]
-1
.. _Markov Random Field: https://en.wikipedia.org/wiki/Markov_random_field | [
"Creates",
"a",
"Markov",
"Network",
"from",
"potentials",
"."
] | python | train | 35.53125 |
openego/eDisGo | edisgo/grid/connect.py | https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/connect.py#L483-L666 | def _connect_mv_node(network, node, target_obj):
"""Connects MV node to target object in MV grid
If the target object is a node, a new line is created to it.
If the target object is a line, the node is connected to a newly created branch tee
(using perpendicular projection) on this line.
New lines are created using standard equipment.
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
node : :class:`~.grid.components.Component`
Node to connect (e.g. :class:`~.grid.components.Generator`)
Node must be a member of MV grid's graph (network.mv_grid.graph)
target_obj : :class:`~.grid.components.Component`
Object that node shall be connected to
Returns
-------
:class:`~.grid.components.Component` or None
Node that node was connected to
Notes
-----
Adapted from `Ding0 <https://github.com/openego/ding0/blob/\
21a52048f84ec341fe54e0204ac62228a9e8a32a/\
ding0/grid/mv_grid/mv_connect.py#L311>`_.
"""
# get standard equipment
std_line_type = network.equipment_data['mv_cables'].loc[
network.config['grid_expansion_standard_equipment']['mv_line']]
std_line_kind = 'cable'
target_obj_result = None
node_shp = transform(proj2equidistant(network), node.geom)
# MV line is nearest connection point
if isinstance(target_obj['shp'], LineString):
adj_node1 = target_obj['obj']['adj_nodes'][0]
adj_node2 = target_obj['obj']['adj_nodes'][1]
# find nearest point on MV line
conn_point_shp = target_obj['shp'].interpolate(target_obj['shp'].project(node_shp))
conn_point_shp = transform(proj2conformal(network), conn_point_shp)
line = network.mv_grid.graph.edge[adj_node1][adj_node2]
# target MV line does currently not connect a load area of type aggregated
if not line['type'] == 'line_aggr':
# create branch tee and add it to grid
branch_tee = BranchTee(geom=conn_point_shp,
grid=network.mv_grid,
in_building=False)
network.mv_grid.graph.add_node(branch_tee,
type='branch_tee')
# split old branch into 2 segments
# (delete old branch and create 2 new ones along cable_dist)
# ==========================================================
# backup kind and type of branch
line_kind = line['line'].kind
line_type = line['line'].type
# remove line from graph
network.mv_grid.graph.remove_edge(adj_node1, adj_node2)
# delete line from equipment changes if existing
_del_cable_from_equipment_changes(network=network,
line=line['line'])
line_length = calc_geo_dist_vincenty(network=network,
node_source=adj_node1,
node_target=branch_tee)
line = Line(id=random.randint(10 ** 8, 10 ** 9),
length=line_length / 1e3,
quantity=1,
kind=line_kind,
type=line_type,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(adj_node1,
branch_tee,
line=line,
type='line')
# add line to equipment changes to track costs
_add_cable_to_equipment_changes(network=network,
line=line)
line_length = calc_geo_dist_vincenty(network=network,
node_source=adj_node2,
node_target=branch_tee)
line = Line(id=random.randint(10 ** 8, 10 ** 9),
length=line_length / 1e3,
quantity=1,
kind=line_kind,
type=line_type,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(adj_node2,
branch_tee,
line=line,
type='line')
# add line to equipment changes to track costs
_add_cable_to_equipment_changes(network=network,
line=line)
# add new branch for new node (node to branch tee)
# ================================================
line_length = calc_geo_dist_vincenty(network=network,
node_source=node,
node_target=branch_tee)
line = Line(id=random.randint(10 ** 8, 10 ** 9),
length=line_length / 1e3,
quantity=1,
kind=std_line_kind,
type=std_line_type,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(node,
branch_tee,
line=line,
type='line')
# add line to equipment changes to track costs
_add_cable_to_equipment_changes(network=network,
line=line)
target_obj_result = branch_tee
# node ist nearest connection point
else:
# what kind of node is to be connected? (which type is node of?)
# LVStation: Connect to LVStation or BranchTee
# Generator: Connect to LVStation, BranchTee or Generator
if isinstance(node, LVStation):
valid_conn_objects = (LVStation, BranchTee)
elif isinstance(node, Generator):
valid_conn_objects = (LVStation, BranchTee, Generator)
else:
raise ValueError('Oops, the node you are trying to connect is not a valid connection object')
# if target is generator or Load, check if it is aggregated (=> connection not allowed)
if isinstance(target_obj['obj'], (Generator, Load)):
target_is_aggregated = any([_ for _ in network.mv_grid.graph.edge[target_obj['obj']].values()
if _['type'] == 'line_aggr'])
else:
target_is_aggregated = False
# target node is not a load area of type aggregated
if isinstance(target_obj['obj'], valid_conn_objects) and not target_is_aggregated:
# add new branch for satellite (station to station)
line_length = calc_geo_dist_vincenty(network=network,
node_source=node,
node_target=target_obj['obj'])
line = Line(id=random.randint(10 ** 8, 10 ** 9),
type=std_line_type,
kind=std_line_kind,
quantity=1,
length=line_length / 1e3,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(node,
target_obj['obj'],
line=line,
type='line')
# add line to equipment changes to track costs
_add_cable_to_equipment_changes(network=network,
line=line)
target_obj_result = target_obj['obj']
return target_obj_result | [
"def",
"_connect_mv_node",
"(",
"network",
",",
"node",
",",
"target_obj",
")",
":",
"# get standard equipment",
"std_line_type",
"=",
"network",
".",
"equipment_data",
"[",
"'mv_cables'",
"]",
".",
"loc",
"[",
"network",
".",
"config",
"[",
"'grid_expansion_standard_equipment'",
"]",
"[",
"'mv_line'",
"]",
"]",
"std_line_kind",
"=",
"'cable'",
"target_obj_result",
"=",
"None",
"node_shp",
"=",
"transform",
"(",
"proj2equidistant",
"(",
"network",
")",
",",
"node",
".",
"geom",
")",
"# MV line is nearest connection point",
"if",
"isinstance",
"(",
"target_obj",
"[",
"'shp'",
"]",
",",
"LineString",
")",
":",
"adj_node1",
"=",
"target_obj",
"[",
"'obj'",
"]",
"[",
"'adj_nodes'",
"]",
"[",
"0",
"]",
"adj_node2",
"=",
"target_obj",
"[",
"'obj'",
"]",
"[",
"'adj_nodes'",
"]",
"[",
"1",
"]",
"# find nearest point on MV line",
"conn_point_shp",
"=",
"target_obj",
"[",
"'shp'",
"]",
".",
"interpolate",
"(",
"target_obj",
"[",
"'shp'",
"]",
".",
"project",
"(",
"node_shp",
")",
")",
"conn_point_shp",
"=",
"transform",
"(",
"proj2conformal",
"(",
"network",
")",
",",
"conn_point_shp",
")",
"line",
"=",
"network",
".",
"mv_grid",
".",
"graph",
".",
"edge",
"[",
"adj_node1",
"]",
"[",
"adj_node2",
"]",
"# target MV line does currently not connect a load area of type aggregated",
"if",
"not",
"line",
"[",
"'type'",
"]",
"==",
"'line_aggr'",
":",
"# create branch tee and add it to grid",
"branch_tee",
"=",
"BranchTee",
"(",
"geom",
"=",
"conn_point_shp",
",",
"grid",
"=",
"network",
".",
"mv_grid",
",",
"in_building",
"=",
"False",
")",
"network",
".",
"mv_grid",
".",
"graph",
".",
"add_node",
"(",
"branch_tee",
",",
"type",
"=",
"'branch_tee'",
")",
"# split old branch into 2 segments",
"# (delete old branch and create 2 new ones along cable_dist)",
"# ==========================================================",
"# backup kind and type of branch",
"line_kind",
"=",
"line",
"[",
"'line'",
"]",
".",
"kind",
"line_type",
"=",
"line",
"[",
"'line'",
"]",
".",
"type",
"# remove line from graph",
"network",
".",
"mv_grid",
".",
"graph",
".",
"remove_edge",
"(",
"adj_node1",
",",
"adj_node2",
")",
"# delete line from equipment changes if existing",
"_del_cable_from_equipment_changes",
"(",
"network",
"=",
"network",
",",
"line",
"=",
"line",
"[",
"'line'",
"]",
")",
"line_length",
"=",
"calc_geo_dist_vincenty",
"(",
"network",
"=",
"network",
",",
"node_source",
"=",
"adj_node1",
",",
"node_target",
"=",
"branch_tee",
")",
"line",
"=",
"Line",
"(",
"id",
"=",
"random",
".",
"randint",
"(",
"10",
"**",
"8",
",",
"10",
"**",
"9",
")",
",",
"length",
"=",
"line_length",
"/",
"1e3",
",",
"quantity",
"=",
"1",
",",
"kind",
"=",
"line_kind",
",",
"type",
"=",
"line_type",
",",
"grid",
"=",
"network",
".",
"mv_grid",
")",
"network",
".",
"mv_grid",
".",
"graph",
".",
"add_edge",
"(",
"adj_node1",
",",
"branch_tee",
",",
"line",
"=",
"line",
",",
"type",
"=",
"'line'",
")",
"# add line to equipment changes to track costs",
"_add_cable_to_equipment_changes",
"(",
"network",
"=",
"network",
",",
"line",
"=",
"line",
")",
"line_length",
"=",
"calc_geo_dist_vincenty",
"(",
"network",
"=",
"network",
",",
"node_source",
"=",
"adj_node2",
",",
"node_target",
"=",
"branch_tee",
")",
"line",
"=",
"Line",
"(",
"id",
"=",
"random",
".",
"randint",
"(",
"10",
"**",
"8",
",",
"10",
"**",
"9",
")",
",",
"length",
"=",
"line_length",
"/",
"1e3",
",",
"quantity",
"=",
"1",
",",
"kind",
"=",
"line_kind",
",",
"type",
"=",
"line_type",
",",
"grid",
"=",
"network",
".",
"mv_grid",
")",
"network",
".",
"mv_grid",
".",
"graph",
".",
"add_edge",
"(",
"adj_node2",
",",
"branch_tee",
",",
"line",
"=",
"line",
",",
"type",
"=",
"'line'",
")",
"# add line to equipment changes to track costs",
"_add_cable_to_equipment_changes",
"(",
"network",
"=",
"network",
",",
"line",
"=",
"line",
")",
"# add new branch for new node (node to branch tee)",
"# ================================================",
"line_length",
"=",
"calc_geo_dist_vincenty",
"(",
"network",
"=",
"network",
",",
"node_source",
"=",
"node",
",",
"node_target",
"=",
"branch_tee",
")",
"line",
"=",
"Line",
"(",
"id",
"=",
"random",
".",
"randint",
"(",
"10",
"**",
"8",
",",
"10",
"**",
"9",
")",
",",
"length",
"=",
"line_length",
"/",
"1e3",
",",
"quantity",
"=",
"1",
",",
"kind",
"=",
"std_line_kind",
",",
"type",
"=",
"std_line_type",
",",
"grid",
"=",
"network",
".",
"mv_grid",
")",
"network",
".",
"mv_grid",
".",
"graph",
".",
"add_edge",
"(",
"node",
",",
"branch_tee",
",",
"line",
"=",
"line",
",",
"type",
"=",
"'line'",
")",
"# add line to equipment changes to track costs",
"_add_cable_to_equipment_changes",
"(",
"network",
"=",
"network",
",",
"line",
"=",
"line",
")",
"target_obj_result",
"=",
"branch_tee",
"# node ist nearest connection point",
"else",
":",
"# what kind of node is to be connected? (which type is node of?)",
"# LVStation: Connect to LVStation or BranchTee",
"# Generator: Connect to LVStation, BranchTee or Generator",
"if",
"isinstance",
"(",
"node",
",",
"LVStation",
")",
":",
"valid_conn_objects",
"=",
"(",
"LVStation",
",",
"BranchTee",
")",
"elif",
"isinstance",
"(",
"node",
",",
"Generator",
")",
":",
"valid_conn_objects",
"=",
"(",
"LVStation",
",",
"BranchTee",
",",
"Generator",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Oops, the node you are trying to connect is not a valid connection object'",
")",
"# if target is generator or Load, check if it is aggregated (=> connection not allowed)",
"if",
"isinstance",
"(",
"target_obj",
"[",
"'obj'",
"]",
",",
"(",
"Generator",
",",
"Load",
")",
")",
":",
"target_is_aggregated",
"=",
"any",
"(",
"[",
"_",
"for",
"_",
"in",
"network",
".",
"mv_grid",
".",
"graph",
".",
"edge",
"[",
"target_obj",
"[",
"'obj'",
"]",
"]",
".",
"values",
"(",
")",
"if",
"_",
"[",
"'type'",
"]",
"==",
"'line_aggr'",
"]",
")",
"else",
":",
"target_is_aggregated",
"=",
"False",
"# target node is not a load area of type aggregated",
"if",
"isinstance",
"(",
"target_obj",
"[",
"'obj'",
"]",
",",
"valid_conn_objects",
")",
"and",
"not",
"target_is_aggregated",
":",
"# add new branch for satellite (station to station)",
"line_length",
"=",
"calc_geo_dist_vincenty",
"(",
"network",
"=",
"network",
",",
"node_source",
"=",
"node",
",",
"node_target",
"=",
"target_obj",
"[",
"'obj'",
"]",
")",
"line",
"=",
"Line",
"(",
"id",
"=",
"random",
".",
"randint",
"(",
"10",
"**",
"8",
",",
"10",
"**",
"9",
")",
",",
"type",
"=",
"std_line_type",
",",
"kind",
"=",
"std_line_kind",
",",
"quantity",
"=",
"1",
",",
"length",
"=",
"line_length",
"/",
"1e3",
",",
"grid",
"=",
"network",
".",
"mv_grid",
")",
"network",
".",
"mv_grid",
".",
"graph",
".",
"add_edge",
"(",
"node",
",",
"target_obj",
"[",
"'obj'",
"]",
",",
"line",
"=",
"line",
",",
"type",
"=",
"'line'",
")",
"# add line to equipment changes to track costs",
"_add_cable_to_equipment_changes",
"(",
"network",
"=",
"network",
",",
"line",
"=",
"line",
")",
"target_obj_result",
"=",
"target_obj",
"[",
"'obj'",
"]",
"return",
"target_obj_result"
] | Connects MV node to target object in MV grid
If the target object is a node, a new line is created to it.
If the target object is a line, the node is connected to a newly created branch tee
(using perpendicular projection) on this line.
New lines are created using standard equipment.
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
node : :class:`~.grid.components.Component`
Node to connect (e.g. :class:`~.grid.components.Generator`)
Node must be a member of MV grid's graph (network.mv_grid.graph)
target_obj : :class:`~.grid.components.Component`
Object that node shall be connected to
Returns
-------
:class:`~.grid.components.Component` or None
Node that node was connected to
Notes
-----
Adapted from `Ding0 <https://github.com/openego/ding0/blob/\
21a52048f84ec341fe54e0204ac62228a9e8a32a/\
ding0/grid/mv_grid/mv_connect.py#L311>`_. | [
"Connects",
"MV",
"node",
"to",
"target",
"object",
"in",
"MV",
"grid"
] | python | train | 41.831522 |
google/apitools | apitools/base/protorpclite/messages.py | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/messages.py#L1827-L1841 | def default(self):
"""Default for enum field.
Will cause resolution of Enum type and unresolved default value.
"""
try:
return self.__resolved_default
except AttributeError:
resolved_default = super(EnumField, self).default
if isinstance(resolved_default, (six.string_types,
six.integer_types)):
# pylint:disable=not-callable
resolved_default = self.type(resolved_default)
self.__resolved_default = resolved_default
return self.__resolved_default | [
"def",
"default",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"__resolved_default",
"except",
"AttributeError",
":",
"resolved_default",
"=",
"super",
"(",
"EnumField",
",",
"self",
")",
".",
"default",
"if",
"isinstance",
"(",
"resolved_default",
",",
"(",
"six",
".",
"string_types",
",",
"six",
".",
"integer_types",
")",
")",
":",
"# pylint:disable=not-callable",
"resolved_default",
"=",
"self",
".",
"type",
"(",
"resolved_default",
")",
"self",
".",
"__resolved_default",
"=",
"resolved_default",
"return",
"self",
".",
"__resolved_default"
] | Default for enum field.
Will cause resolution of Enum type and unresolved default value. | [
"Default",
"for",
"enum",
"field",
"."
] | python | train | 40.666667 |
lmjohns3/theanets | theanets/layers/base.py | https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/layers/base.py#L271-L289 | def resolve_inputs(self, layers):
'''Resolve the names of inputs for this layer into shape tuples.
Parameters
----------
layers : list of :class:`Layer`
A list of the layers that are available for resolving inputs.
Raises
------
theanets.util.ConfigurationError :
If an input cannot be resolved.
'''
resolved = {}
for name, shape in self._input_shapes.items():
if shape is None:
name, shape = self._resolve_shape(name, layers)
resolved[name] = shape
self._input_shapes = resolved | [
"def",
"resolve_inputs",
"(",
"self",
",",
"layers",
")",
":",
"resolved",
"=",
"{",
"}",
"for",
"name",
",",
"shape",
"in",
"self",
".",
"_input_shapes",
".",
"items",
"(",
")",
":",
"if",
"shape",
"is",
"None",
":",
"name",
",",
"shape",
"=",
"self",
".",
"_resolve_shape",
"(",
"name",
",",
"layers",
")",
"resolved",
"[",
"name",
"]",
"=",
"shape",
"self",
".",
"_input_shapes",
"=",
"resolved"
] | Resolve the names of inputs for this layer into shape tuples.
Parameters
----------
layers : list of :class:`Layer`
A list of the layers that are available for resolving inputs.
Raises
------
theanets.util.ConfigurationError :
If an input cannot be resolved. | [
"Resolve",
"the",
"names",
"of",
"inputs",
"for",
"this",
"layer",
"into",
"shape",
"tuples",
"."
] | python | test | 32.368421 |
spyder-ide/spyder | spyder/plugins/editor/widgets/editor.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L254-L259 | def refresh(self):
"""Remove editors that are not longer open."""
self._update_id_list()
for _id in self.history[:]:
if _id not in self.id_list:
self.history.remove(_id) | [
"def",
"refresh",
"(",
"self",
")",
":",
"self",
".",
"_update_id_list",
"(",
")",
"for",
"_id",
"in",
"self",
".",
"history",
"[",
":",
"]",
":",
"if",
"_id",
"not",
"in",
"self",
".",
"id_list",
":",
"self",
".",
"history",
".",
"remove",
"(",
"_id",
")"
] | Remove editors that are not longer open. | [
"Remove",
"editors",
"that",
"are",
"not",
"longer",
"open",
"."
] | python | train | 36.833333 |
marvin-ai/marvin-python-toolbox | marvin_python_toolbox/common/data_source_provider.py | https://github.com/marvin-ai/marvin-python-toolbox/blob/7c95cb2f9698b989150ab94c1285f3a9eaaba423/marvin_python_toolbox/common/data_source_provider.py#L25-L43 | def get_spark_session(enable_hive=False, app_name='marvin-engine', configs=[]):
"""Return a Spark Session object"""
# Prepare spark context to be used
import findspark
findspark.init()
from pyspark.sql import SparkSession
# prepare spark sesseion to be returned
spark = SparkSession.builder
spark = spark.appName(app_name)
spark = spark.enableHiveSupport() if enable_hive else spark
# if has configs
for config in configs:
spark = spark.config(config)
return spark.getOrCreate() | [
"def",
"get_spark_session",
"(",
"enable_hive",
"=",
"False",
",",
"app_name",
"=",
"'marvin-engine'",
",",
"configs",
"=",
"[",
"]",
")",
":",
"# Prepare spark context to be used",
"import",
"findspark",
"findspark",
".",
"init",
"(",
")",
"from",
"pyspark",
".",
"sql",
"import",
"SparkSession",
"# prepare spark sesseion to be returned",
"spark",
"=",
"SparkSession",
".",
"builder",
"spark",
"=",
"spark",
".",
"appName",
"(",
"app_name",
")",
"spark",
"=",
"spark",
".",
"enableHiveSupport",
"(",
")",
"if",
"enable_hive",
"else",
"spark",
"# if has configs",
"for",
"config",
"in",
"configs",
":",
"spark",
"=",
"spark",
".",
"config",
"(",
"config",
")",
"return",
"spark",
".",
"getOrCreate",
"(",
")"
] | Return a Spark Session object | [
"Return",
"a",
"Spark",
"Session",
"object"
] | python | train | 27.421053 |
defunkt/pystache | pystache/locator.py | https://github.com/defunkt/pystache/blob/17a5dfdcd56eb76af731d141de395a7632a905b8/pystache/locator.py#L139-L152 | def find_name(self, template_name, search_dirs):
"""
Return the path to a template with the given name.
Arguments:
template_name: the name of the template.
search_dirs: the list of directories in which to search.
"""
file_name = self.make_file_name(template_name)
return self._find_path_required(search_dirs, file_name) | [
"def",
"find_name",
"(",
"self",
",",
"template_name",
",",
"search_dirs",
")",
":",
"file_name",
"=",
"self",
".",
"make_file_name",
"(",
"template_name",
")",
"return",
"self",
".",
"_find_path_required",
"(",
"search_dirs",
",",
"file_name",
")"
] | Return the path to a template with the given name.
Arguments:
template_name: the name of the template.
search_dirs: the list of directories in which to search. | [
"Return",
"the",
"path",
"to",
"a",
"template",
"with",
"the",
"given",
"name",
"."
] | python | train | 27.071429 |
cirruscluster/cirruscluster | cirruscluster/ext/ansible/runner/action_plugins/script.py | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/action_plugins/script.py#L31-L65 | def run(self, conn, tmp, module_name, module_args, inject):
''' handler for file transfer operations '''
tokens = shlex.split(module_args)
source = tokens[0]
# FIXME: error handling
args = " ".join(tokens[1:])
source = utils.template(self.runner.basedir, source, inject)
source = utils.path_dwim(self.runner.basedir, source)
# transfer the file to a remote tmp location
source = source.replace('\x00','') # why does this happen here?
args = args.replace('\x00','') # why does this happen here?
tmp_src = os.path.join(tmp, os.path.basename(source))
tmp_src = tmp_src.replace('\x00', '')
conn.put_file(source, tmp_src)
# fix file permissions when the copy is done as a different user
if self.runner.sudo and self.runner.sudo_user != 'root':
prepcmd = 'chmod a+rx %s' % tmp_src
else:
prepcmd = 'chmod +x %s' % tmp_src
# add preparation steps to one ssh roundtrip executing the script
module_args = prepcmd + '; ' + tmp_src + ' ' + args
handler = utils.plugins.action_loader.get('raw', self.runner)
result = handler.run(conn, tmp, 'raw', module_args, inject)
# clean up after
if tmp.find("tmp") != -1 and C.DEFAULT_KEEP_REMOTE_FILES != '1':
self.runner._low_level_exec_command(conn, 'rm -rf %s >/dev/null 2>&1' % tmp, tmp)
return result | [
"def",
"run",
"(",
"self",
",",
"conn",
",",
"tmp",
",",
"module_name",
",",
"module_args",
",",
"inject",
")",
":",
"tokens",
"=",
"shlex",
".",
"split",
"(",
"module_args",
")",
"source",
"=",
"tokens",
"[",
"0",
"]",
"# FIXME: error handling",
"args",
"=",
"\" \"",
".",
"join",
"(",
"tokens",
"[",
"1",
":",
"]",
")",
"source",
"=",
"utils",
".",
"template",
"(",
"self",
".",
"runner",
".",
"basedir",
",",
"source",
",",
"inject",
")",
"source",
"=",
"utils",
".",
"path_dwim",
"(",
"self",
".",
"runner",
".",
"basedir",
",",
"source",
")",
"# transfer the file to a remote tmp location",
"source",
"=",
"source",
".",
"replace",
"(",
"'\\x00'",
",",
"''",
")",
"# why does this happen here?",
"args",
"=",
"args",
".",
"replace",
"(",
"'\\x00'",
",",
"''",
")",
"# why does this happen here?",
"tmp_src",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp",
",",
"os",
".",
"path",
".",
"basename",
"(",
"source",
")",
")",
"tmp_src",
"=",
"tmp_src",
".",
"replace",
"(",
"'\\x00'",
",",
"''",
")",
"conn",
".",
"put_file",
"(",
"source",
",",
"tmp_src",
")",
"# fix file permissions when the copy is done as a different user",
"if",
"self",
".",
"runner",
".",
"sudo",
"and",
"self",
".",
"runner",
".",
"sudo_user",
"!=",
"'root'",
":",
"prepcmd",
"=",
"'chmod a+rx %s'",
"%",
"tmp_src",
"else",
":",
"prepcmd",
"=",
"'chmod +x %s'",
"%",
"tmp_src",
"# add preparation steps to one ssh roundtrip executing the script",
"module_args",
"=",
"prepcmd",
"+",
"'; '",
"+",
"tmp_src",
"+",
"' '",
"+",
"args",
"handler",
"=",
"utils",
".",
"plugins",
".",
"action_loader",
".",
"get",
"(",
"'raw'",
",",
"self",
".",
"runner",
")",
"result",
"=",
"handler",
".",
"run",
"(",
"conn",
",",
"tmp",
",",
"'raw'",
",",
"module_args",
",",
"inject",
")",
"# clean up after",
"if",
"tmp",
".",
"find",
"(",
"\"tmp\"",
")",
"!=",
"-",
"1",
"and",
"C",
".",
"DEFAULT_KEEP_REMOTE_FILES",
"!=",
"'1'",
":",
"self",
".",
"runner",
".",
"_low_level_exec_command",
"(",
"conn",
",",
"'rm -rf %s >/dev/null 2>&1'",
"%",
"tmp",
",",
"tmp",
")",
"return",
"result"
] | handler for file transfer operations | [
"handler",
"for",
"file",
"transfer",
"operations"
] | python | train | 41.085714 |
mdgoldberg/sportsref | sportsref/nfl/pbp.py | https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/pbp.py#L22-L52 | def expand_details(df, detailCol='detail'):
"""Expands the details column of the given dataframe and returns the
resulting DataFrame.
:df: The input DataFrame.
:detailCol: The detail column name.
:returns: Returns DataFrame with new columns from pbp parsing.
"""
df = copy.deepcopy(df)
df['detail'] = df[detailCol]
dicts = [sportsref.nfl.pbp.parse_play_details(detail) for detail in df['detail'].values]
# clean up unmatched details
cols = {c for d in dicts if d for c in d.keys()}
blankEntry = {c: np.nan for c in cols}
newDicts = [d if d else blankEntry for d in dicts]
# get details DataFrame and merge it with original to create main DataFrame
details = pd.DataFrame(newDicts)
df = pd.merge(df, details, left_index=True, right_index=True)
# add isError column
errors = [i for i, d in enumerate(dicts) if d is None]
df['isError'] = False
df.loc[errors, 'isError'] = True
# fill in some NaN's necessary for _clean_features
df.loc[0, 'qtr_time_remain'] = '15:00'
df.qtr_time_remain.fillna(method='bfill', inplace=True)
df.qtr_time_remain.fillna(
pd.Series(np.where(df.quarter == 4, '0:00', '15:00')), inplace=True
)
# use _clean_features to clean up and add columns
new_df = df.apply(_clean_features, axis=1)
return new_df | [
"def",
"expand_details",
"(",
"df",
",",
"detailCol",
"=",
"'detail'",
")",
":",
"df",
"=",
"copy",
".",
"deepcopy",
"(",
"df",
")",
"df",
"[",
"'detail'",
"]",
"=",
"df",
"[",
"detailCol",
"]",
"dicts",
"=",
"[",
"sportsref",
".",
"nfl",
".",
"pbp",
".",
"parse_play_details",
"(",
"detail",
")",
"for",
"detail",
"in",
"df",
"[",
"'detail'",
"]",
".",
"values",
"]",
"# clean up unmatched details",
"cols",
"=",
"{",
"c",
"for",
"d",
"in",
"dicts",
"if",
"d",
"for",
"c",
"in",
"d",
".",
"keys",
"(",
")",
"}",
"blankEntry",
"=",
"{",
"c",
":",
"np",
".",
"nan",
"for",
"c",
"in",
"cols",
"}",
"newDicts",
"=",
"[",
"d",
"if",
"d",
"else",
"blankEntry",
"for",
"d",
"in",
"dicts",
"]",
"# get details DataFrame and merge it with original to create main DataFrame",
"details",
"=",
"pd",
".",
"DataFrame",
"(",
"newDicts",
")",
"df",
"=",
"pd",
".",
"merge",
"(",
"df",
",",
"details",
",",
"left_index",
"=",
"True",
",",
"right_index",
"=",
"True",
")",
"# add isError column",
"errors",
"=",
"[",
"i",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"dicts",
")",
"if",
"d",
"is",
"None",
"]",
"df",
"[",
"'isError'",
"]",
"=",
"False",
"df",
".",
"loc",
"[",
"errors",
",",
"'isError'",
"]",
"=",
"True",
"# fill in some NaN's necessary for _clean_features",
"df",
".",
"loc",
"[",
"0",
",",
"'qtr_time_remain'",
"]",
"=",
"'15:00'",
"df",
".",
"qtr_time_remain",
".",
"fillna",
"(",
"method",
"=",
"'bfill'",
",",
"inplace",
"=",
"True",
")",
"df",
".",
"qtr_time_remain",
".",
"fillna",
"(",
"pd",
".",
"Series",
"(",
"np",
".",
"where",
"(",
"df",
".",
"quarter",
"==",
"4",
",",
"'0:00'",
",",
"'15:00'",
")",
")",
",",
"inplace",
"=",
"True",
")",
"# use _clean_features to clean up and add columns",
"new_df",
"=",
"df",
".",
"apply",
"(",
"_clean_features",
",",
"axis",
"=",
"1",
")",
"return",
"new_df"
] | Expands the details column of the given dataframe and returns the
resulting DataFrame.
:df: The input DataFrame.
:detailCol: The detail column name.
:returns: Returns DataFrame with new columns from pbp parsing. | [
"Expands",
"the",
"details",
"column",
"of",
"the",
"given",
"dataframe",
"and",
"returns",
"the",
"resulting",
"DataFrame",
"."
] | python | test | 42.387097 |
sdispater/pendulum | pendulum/datetime.py | https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/datetime.py#L674-L723 | def subtract(
self,
years=0,
months=0,
weeks=0,
days=0,
hours=0,
minutes=0,
seconds=0,
microseconds=0,
):
"""
Remove duration from the instance.
:param years: The number of years
:type years: int
:param months: The number of months
:type months: int
:param weeks: The number of weeks
:type weeks: int
:param days: The number of days
:type days: int
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: DateTime
"""
return self.add(
years=-years,
months=-months,
weeks=-weeks,
days=-days,
hours=-hours,
minutes=-minutes,
seconds=-seconds,
microseconds=-microseconds,
) | [
"def",
"subtract",
"(",
"self",
",",
"years",
"=",
"0",
",",
"months",
"=",
"0",
",",
"weeks",
"=",
"0",
",",
"days",
"=",
"0",
",",
"hours",
"=",
"0",
",",
"minutes",
"=",
"0",
",",
"seconds",
"=",
"0",
",",
"microseconds",
"=",
"0",
",",
")",
":",
"return",
"self",
".",
"add",
"(",
"years",
"=",
"-",
"years",
",",
"months",
"=",
"-",
"months",
",",
"weeks",
"=",
"-",
"weeks",
",",
"days",
"=",
"-",
"days",
",",
"hours",
"=",
"-",
"hours",
",",
"minutes",
"=",
"-",
"minutes",
",",
"seconds",
"=",
"-",
"seconds",
",",
"microseconds",
"=",
"-",
"microseconds",
",",
")"
] | Remove duration from the instance.
:param years: The number of years
:type years: int
:param months: The number of months
:type months: int
:param weeks: The number of weeks
:type weeks: int
:param days: The number of days
:type days: int
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: DateTime | [
"Remove",
"duration",
"from",
"the",
"instance",
"."
] | python | train | 21.34 |
dubsmash/config-reader | config_reader/reader.py | https://github.com/dubsmash/config-reader/blob/0c76bb96925a44945e2f79c8cd1b57354d4e4562/config_reader/reader.py#L73-L80 | def get_float(self, key, optional=False):
"""
Tries to fetch a variable from the config and expects it to be strictly a float
:param key: Variable to look for
:param optional: Whether to raise ConfigKeyNotFoundError if key was not found
:return: float
"""
return self._get_typed_value(key, float, lambda x: float(x), optional) | [
"def",
"get_float",
"(",
"self",
",",
"key",
",",
"optional",
"=",
"False",
")",
":",
"return",
"self",
".",
"_get_typed_value",
"(",
"key",
",",
"float",
",",
"lambda",
"x",
":",
"float",
"(",
"x",
")",
",",
"optional",
")"
] | Tries to fetch a variable from the config and expects it to be strictly a float
:param key: Variable to look for
:param optional: Whether to raise ConfigKeyNotFoundError if key was not found
:return: float | [
"Tries",
"to",
"fetch",
"a",
"variable",
"from",
"the",
"config",
"and",
"expects",
"it",
"to",
"be",
"strictly",
"a",
"float",
":",
"param",
"key",
":",
"Variable",
"to",
"look",
"for",
":",
"param",
"optional",
":",
"Whether",
"to",
"raise",
"ConfigKeyNotFoundError",
"if",
"key",
"was",
"not",
"found",
":",
"return",
":",
"float"
] | python | train | 46.875 |
quiltdata/quilt | compiler/quilt/tools/store.py | https://github.com/quiltdata/quilt/blob/651853e7e89a8af86e0ff26167e752efa5878c12/compiler/quilt/tools/store.py#L546-L570 | def save_package_contents(self, root, team, owner, pkgname):
"""
Saves the in-memory contents to a file in the local
package repository.
"""
assert isinstance(root, RootNode)
instance_hash = hash_contents(root)
pkg_path = self.package_path(team, owner, pkgname)
if not os.path.isdir(pkg_path):
os.makedirs(pkg_path)
os.mkdir(os.path.join(pkg_path, self.CONTENTS_DIR))
os.mkdir(os.path.join(pkg_path, self.TAGS_DIR))
os.mkdir(os.path.join(pkg_path, self.VERSIONS_DIR))
dest = os.path.join(pkg_path, self.CONTENTS_DIR, instance_hash)
with open(dest, 'w') as contents_file:
json.dump(root, contents_file, default=encode_node, indent=2, sort_keys=True)
tag_dir = os.path.join(pkg_path, self.TAGS_DIR)
if not os.path.isdir(tag_dir):
os.mkdir(tag_dir)
latest_tag = os.path.join(pkg_path, self.TAGS_DIR, self.LATEST)
with open (latest_tag, 'w') as tagfile:
tagfile.write("{hsh}".format(hsh=instance_hash)) | [
"def",
"save_package_contents",
"(",
"self",
",",
"root",
",",
"team",
",",
"owner",
",",
"pkgname",
")",
":",
"assert",
"isinstance",
"(",
"root",
",",
"RootNode",
")",
"instance_hash",
"=",
"hash_contents",
"(",
"root",
")",
"pkg_path",
"=",
"self",
".",
"package_path",
"(",
"team",
",",
"owner",
",",
"pkgname",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"pkg_path",
")",
":",
"os",
".",
"makedirs",
"(",
"pkg_path",
")",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pkg_path",
",",
"self",
".",
"CONTENTS_DIR",
")",
")",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pkg_path",
",",
"self",
".",
"TAGS_DIR",
")",
")",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pkg_path",
",",
"self",
".",
"VERSIONS_DIR",
")",
")",
"dest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pkg_path",
",",
"self",
".",
"CONTENTS_DIR",
",",
"instance_hash",
")",
"with",
"open",
"(",
"dest",
",",
"'w'",
")",
"as",
"contents_file",
":",
"json",
".",
"dump",
"(",
"root",
",",
"contents_file",
",",
"default",
"=",
"encode_node",
",",
"indent",
"=",
"2",
",",
"sort_keys",
"=",
"True",
")",
"tag_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pkg_path",
",",
"self",
".",
"TAGS_DIR",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"tag_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"tag_dir",
")",
"latest_tag",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pkg_path",
",",
"self",
".",
"TAGS_DIR",
",",
"self",
".",
"LATEST",
")",
"with",
"open",
"(",
"latest_tag",
",",
"'w'",
")",
"as",
"tagfile",
":",
"tagfile",
".",
"write",
"(",
"\"{hsh}\"",
".",
"format",
"(",
"hsh",
"=",
"instance_hash",
")",
")"
] | Saves the in-memory contents to a file in the local
package repository. | [
"Saves",
"the",
"in",
"-",
"memory",
"contents",
"to",
"a",
"file",
"in",
"the",
"local",
"package",
"repository",
"."
] | python | train | 42.92 |
wbond/asn1crypto | dev/coverage.py | https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/dev/coverage.py#L395-L440 | def _gitignore(root):
"""
Parses a .gitignore file and returns patterns to match dirs and files.
Only basic gitignore patterns are supported. Pattern negation, ** wildcards
and anchored patterns are not currently implemented.
:param root:
A unicode string of the path to the git repository
:return:
A 2-element tuple:
- 0: a list of unicode strings to match against dirs
- 1: a list of unicode strings to match against dirs and files
"""
gitignore_path = os.path.join(root, '.gitignore')
dir_patterns = ['.git']
file_patterns = []
if not os.path.exists(gitignore_path):
return (dir_patterns, file_patterns)
with open(gitignore_path, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
if '**' in line:
raise NotImplementedError('gitignore ** wildcards are not implemented')
if line.startswith('!'):
raise NotImplementedError('gitignore pattern negation is not implemented')
if line.startswith('/'):
raise NotImplementedError('gitignore anchored patterns are not implemented')
if line.startswith('\\#'):
line = '#' + line[2:]
if line.startswith('\\!'):
line = '!' + line[2:]
if line.endswith('/'):
dir_patterns.append(line[:-1])
else:
file_patterns.append(line)
return (dir_patterns, file_patterns) | [
"def",
"_gitignore",
"(",
"root",
")",
":",
"gitignore_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"'.gitignore'",
")",
"dir_patterns",
"=",
"[",
"'.git'",
"]",
"file_patterns",
"=",
"[",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"gitignore_path",
")",
":",
"return",
"(",
"dir_patterns",
",",
"file_patterns",
")",
"with",
"open",
"(",
"gitignore_path",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"not",
"line",
":",
"continue",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"continue",
"if",
"'**'",
"in",
"line",
":",
"raise",
"NotImplementedError",
"(",
"'gitignore ** wildcards are not implemented'",
")",
"if",
"line",
".",
"startswith",
"(",
"'!'",
")",
":",
"raise",
"NotImplementedError",
"(",
"'gitignore pattern negation is not implemented'",
")",
"if",
"line",
".",
"startswith",
"(",
"'/'",
")",
":",
"raise",
"NotImplementedError",
"(",
"'gitignore anchored patterns are not implemented'",
")",
"if",
"line",
".",
"startswith",
"(",
"'\\\\#'",
")",
":",
"line",
"=",
"'#'",
"+",
"line",
"[",
"2",
":",
"]",
"if",
"line",
".",
"startswith",
"(",
"'\\\\!'",
")",
":",
"line",
"=",
"'!'",
"+",
"line",
"[",
"2",
":",
"]",
"if",
"line",
".",
"endswith",
"(",
"'/'",
")",
":",
"dir_patterns",
".",
"append",
"(",
"line",
"[",
":",
"-",
"1",
"]",
")",
"else",
":",
"file_patterns",
".",
"append",
"(",
"line",
")",
"return",
"(",
"dir_patterns",
",",
"file_patterns",
")"
] | Parses a .gitignore file and returns patterns to match dirs and files.
Only basic gitignore patterns are supported. Pattern negation, ** wildcards
and anchored patterns are not currently implemented.
:param root:
A unicode string of the path to the git repository
:return:
A 2-element tuple:
- 0: a list of unicode strings to match against dirs
- 1: a list of unicode strings to match against dirs and files | [
"Parses",
"a",
".",
"gitignore",
"file",
"and",
"returns",
"patterns",
"to",
"match",
"dirs",
"and",
"files",
".",
"Only",
"basic",
"gitignore",
"patterns",
"are",
"supported",
".",
"Pattern",
"negation",
"**",
"wildcards",
"and",
"anchored",
"patterns",
"are",
"not",
"currently",
"implemented",
"."
] | python | train | 34.891304 |
pandas-dev/pandas | pandas/plotting/_converter.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L1002-L1012 | def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator['min'], locator['val'])
return np.compress(locator['maj'], locator['val']) | [
"def",
"_get_default_locs",
"(",
"self",
",",
"vmin",
",",
"vmax",
")",
":",
"if",
"self",
".",
"plot_obj",
".",
"date_axis_info",
"is",
"None",
":",
"self",
".",
"plot_obj",
".",
"date_axis_info",
"=",
"self",
".",
"finder",
"(",
"vmin",
",",
"vmax",
",",
"self",
".",
"freq",
")",
"locator",
"=",
"self",
".",
"plot_obj",
".",
"date_axis_info",
"if",
"self",
".",
"isminor",
":",
"return",
"np",
".",
"compress",
"(",
"locator",
"[",
"'min'",
"]",
",",
"locator",
"[",
"'val'",
"]",
")",
"return",
"np",
".",
"compress",
"(",
"locator",
"[",
"'maj'",
"]",
",",
"locator",
"[",
"'val'",
"]",
")"
] | Returns the default locations of ticks. | [
"Returns",
"the",
"default",
"locations",
"of",
"ticks",
"."
] | python | train | 36.727273 |
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L550-L572 | def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram | [
"def",
"decode",
"(",
"encoded_histogram",
",",
"b64_wrap",
"=",
"True",
")",
":",
"hdr_payload",
"=",
"HdrHistogramEncoder",
".",
"decode",
"(",
"encoded_histogram",
",",
"b64_wrap",
")",
"payload",
"=",
"hdr_payload",
".",
"payload",
"histogram",
"=",
"HdrHistogram",
"(",
"payload",
".",
"lowest_trackable_value",
",",
"payload",
".",
"highest_trackable_value",
",",
"payload",
".",
"significant_figures",
",",
"hdr_payload",
"=",
"hdr_payload",
")",
"return",
"histogram"
] | Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error | [
"Decode",
"an",
"encoded",
"histogram",
"and",
"return",
"a",
"new",
"histogram",
"instance",
"that",
"has",
"been",
"initialized",
"with",
"the",
"decoded",
"content",
"Return",
":",
"a",
"new",
"histogram",
"instance",
"representing",
"the",
"decoded",
"content",
"Exception",
":",
"TypeError",
"in",
"case",
"of",
"base64",
"decode",
"error",
"HdrCookieException",
":",
"the",
"main",
"header",
"has",
"an",
"invalid",
"cookie",
"the",
"compressed",
"payload",
"header",
"has",
"an",
"invalid",
"cookie",
"HdrLengthException",
":",
"the",
"decompressed",
"size",
"is",
"too",
"small",
"for",
"the",
"HdrPayload",
"structure",
"or",
"is",
"not",
"aligned",
"or",
"is",
"too",
"large",
"for",
"the",
"passed",
"payload",
"class",
"zlib",
".",
"error",
":",
"in",
"case",
"of",
"zlib",
"decompression",
"error"
] | python | train | 49.565217 |
gem/oq-engine | openquake/commands/from_shapefile.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commands/from_shapefile.py#L24-L34 | def from_shapefile(output, input_shp_files, validate):
"""
Convert multiple ESRI Shapefile(s) into a single NRML source model file.
"""
input_parser = shapefileparser.ShapefileParser()
source_model = input_parser.read(input_shp_files[0], validate)
for f in input_shp_files[1:]:
source_model.sources.extend(input_parser.read(f, validate).sources)
if not output:
output = os.path.splitext(input_shp_files[0])[0]
shapefileparser.SourceModelParser().write(output + '.xml', source_model) | [
"def",
"from_shapefile",
"(",
"output",
",",
"input_shp_files",
",",
"validate",
")",
":",
"input_parser",
"=",
"shapefileparser",
".",
"ShapefileParser",
"(",
")",
"source_model",
"=",
"input_parser",
".",
"read",
"(",
"input_shp_files",
"[",
"0",
"]",
",",
"validate",
")",
"for",
"f",
"in",
"input_shp_files",
"[",
"1",
":",
"]",
":",
"source_model",
".",
"sources",
".",
"extend",
"(",
"input_parser",
".",
"read",
"(",
"f",
",",
"validate",
")",
".",
"sources",
")",
"if",
"not",
"output",
":",
"output",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"input_shp_files",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"shapefileparser",
".",
"SourceModelParser",
"(",
")",
".",
"write",
"(",
"output",
"+",
"'.xml'",
",",
"source_model",
")"
] | Convert multiple ESRI Shapefile(s) into a single NRML source model file. | [
"Convert",
"multiple",
"ESRI",
"Shapefile",
"(",
"s",
")",
"into",
"a",
"single",
"NRML",
"source",
"model",
"file",
"."
] | python | train | 47.272727 |
zxylvlp/PingPHP | pingphp/grammar.py | https://github.com/zxylvlp/PingPHP/blob/2e9a5f1ef4b5b13310e3f8ff350fa91032357bc5/pingphp/grammar.py#L854-L862 | def p_InSwitchDefList(p):
'''
InSwitchDefList : InSwitchDef
| InSwitchDefList InSwitchDef
'''
if len(p) <= 2:
p[0] = InSwitchDefList(None, p[1])
else:
p[0] = InSwitchDefList(p[1], p[2]) | [
"def",
"p_InSwitchDefList",
"(",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"<=",
"2",
":",
"p",
"[",
"0",
"]",
"=",
"InSwitchDefList",
"(",
"None",
",",
"p",
"[",
"1",
"]",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"InSwitchDefList",
"(",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"2",
"]",
")"
] | InSwitchDefList : InSwitchDef
| InSwitchDefList InSwitchDef | [
"InSwitchDefList",
":",
"InSwitchDef",
"|",
"InSwitchDefList",
"InSwitchDef"
] | python | train | 25.777778 |
slackapi/python-slackclient | slack/web/client.py | https://github.com/slackapi/python-slackclient/blob/901341c0284fd81e6d2719d6a0502308760d83e4/slack/web/client.py#L1050-L1053 | def reminders_list(self, **kwargs) -> SlackResponse:
"""Lists all reminders created by or for a given user."""
self._validate_xoxp_token()
return self.api_call("reminders.list", http_verb="GET", params=kwargs) | [
"def",
"reminders_list",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"->",
"SlackResponse",
":",
"self",
".",
"_validate_xoxp_token",
"(",
")",
"return",
"self",
".",
"api_call",
"(",
"\"reminders.list\"",
",",
"http_verb",
"=",
"\"GET\"",
",",
"params",
"=",
"kwargs",
")"
] | Lists all reminders created by or for a given user. | [
"Lists",
"all",
"reminders",
"created",
"by",
"or",
"for",
"a",
"given",
"user",
"."
] | python | train | 57.5 |
gitpython-developers/GitPython | git/__init__.py | https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/__init__.py#L19-L27 | def _init_externals():
"""Initialize external projects by putting them into the path"""
if __version__ == 'git':
sys.path.insert(0, osp.join(osp.dirname(__file__), 'ext', 'gitdb'))
try:
import gitdb
except ImportError:
raise ImportError("'gitdb' could not be found in your PYTHONPATH") | [
"def",
"_init_externals",
"(",
")",
":",
"if",
"__version__",
"==",
"'git'",
":",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"osp",
".",
"join",
"(",
"osp",
".",
"dirname",
"(",
"__file__",
")",
",",
"'ext'",
",",
"'gitdb'",
")",
")",
"try",
":",
"import",
"gitdb",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"'gitdb' could not be found in your PYTHONPATH\"",
")"
] | Initialize external projects by putting them into the path | [
"Initialize",
"external",
"projects",
"by",
"putting",
"them",
"into",
"the",
"path"
] | python | train | 35.333333 |
saltstack/salt | salt/engines/docker_events.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/docker_events.py#L42-L105 | def start(docker_url='unix://var/run/docker.sock',
timeout=CLIENT_TIMEOUT,
tag='salt/engines/docker_events',
filters=None):
'''
Scan for Docker events and fire events
Example Config
.. code-block:: yaml
engines:
- docker_events:
docker_url: unix://var/run/docker.sock
filters:
event:
- start
- stop
- die
- oom
The config above sets up engines to listen
for events from the Docker daemon and publish
them to the Salt event bus.
For filter reference, see https://docs.docker.com/engine/reference/commandline/events/
'''
if __opts__.get('__role') == 'master':
fire_master = salt.utils.event.get_master_event(
__opts__,
__opts__['sock_dir']).fire_event
else:
fire_master = None
def fire(tag, msg):
'''
How to fire the event
'''
if fire_master:
fire_master(msg, tag)
else:
__salt__['event.send'](tag, msg)
try:
# docker-py 2.0 renamed this client attribute
client = docker.APIClient(base_url=docker_url, timeout=timeout)
except AttributeError:
client = docker.Client(base_url=docker_url, timeout=timeout)
try:
events = client.events(filters=filters)
for event in events:
data = salt.utils.json.loads(event.decode(__salt_system_encoding__, errors='replace'))
# https://github.com/docker/cli/blob/master/cli/command/system/events.go#L109
# https://github.com/docker/engine-api/blob/master/types/events/events.go
# Each output includes the event type, actor id, name and action.
# status field can be ommited
if data['Action']:
fire('{0}/{1}'.format(tag, data['Action']), data)
else:
fire('{0}/{1}'.format(tag, data['status']), data)
except Exception:
traceback.print_exc() | [
"def",
"start",
"(",
"docker_url",
"=",
"'unix://var/run/docker.sock'",
",",
"timeout",
"=",
"CLIENT_TIMEOUT",
",",
"tag",
"=",
"'salt/engines/docker_events'",
",",
"filters",
"=",
"None",
")",
":",
"if",
"__opts__",
".",
"get",
"(",
"'__role'",
")",
"==",
"'master'",
":",
"fire_master",
"=",
"salt",
".",
"utils",
".",
"event",
".",
"get_master_event",
"(",
"__opts__",
",",
"__opts__",
"[",
"'sock_dir'",
"]",
")",
".",
"fire_event",
"else",
":",
"fire_master",
"=",
"None",
"def",
"fire",
"(",
"tag",
",",
"msg",
")",
":",
"'''\n How to fire the event\n '''",
"if",
"fire_master",
":",
"fire_master",
"(",
"msg",
",",
"tag",
")",
"else",
":",
"__salt__",
"[",
"'event.send'",
"]",
"(",
"tag",
",",
"msg",
")",
"try",
":",
"# docker-py 2.0 renamed this client attribute",
"client",
"=",
"docker",
".",
"APIClient",
"(",
"base_url",
"=",
"docker_url",
",",
"timeout",
"=",
"timeout",
")",
"except",
"AttributeError",
":",
"client",
"=",
"docker",
".",
"Client",
"(",
"base_url",
"=",
"docker_url",
",",
"timeout",
"=",
"timeout",
")",
"try",
":",
"events",
"=",
"client",
".",
"events",
"(",
"filters",
"=",
"filters",
")",
"for",
"event",
"in",
"events",
":",
"data",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"event",
".",
"decode",
"(",
"__salt_system_encoding__",
",",
"errors",
"=",
"'replace'",
")",
")",
"# https://github.com/docker/cli/blob/master/cli/command/system/events.go#L109",
"# https://github.com/docker/engine-api/blob/master/types/events/events.go",
"# Each output includes the event type, actor id, name and action.",
"# status field can be ommited",
"if",
"data",
"[",
"'Action'",
"]",
":",
"fire",
"(",
"'{0}/{1}'",
".",
"format",
"(",
"tag",
",",
"data",
"[",
"'Action'",
"]",
")",
",",
"data",
")",
"else",
":",
"fire",
"(",
"'{0}/{1}'",
".",
"format",
"(",
"tag",
",",
"data",
"[",
"'status'",
"]",
")",
",",
"data",
")",
"except",
"Exception",
":",
"traceback",
".",
"print_exc",
"(",
")"
] | Scan for Docker events and fire events
Example Config
.. code-block:: yaml
engines:
- docker_events:
docker_url: unix://var/run/docker.sock
filters:
event:
- start
- stop
- die
- oom
The config above sets up engines to listen
for events from the Docker daemon and publish
them to the Salt event bus.
For filter reference, see https://docs.docker.com/engine/reference/commandline/events/ | [
"Scan",
"for",
"Docker",
"events",
"and",
"fire",
"events"
] | python | train | 31.25 |
LuqueDaniel/pybooru | pybooru/api_danbooru.py | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L431-L438 | def favorite_remove(self, post_id):
"""Remove a post from favorites (Requires login).
Parameters:
post_id (int): Where post_id is the post id.
"""
return self._get('favorites/{0}.json'.format(post_id), method='DELETE',
auth=True) | [
"def",
"favorite_remove",
"(",
"self",
",",
"post_id",
")",
":",
"return",
"self",
".",
"_get",
"(",
"'favorites/{0}.json'",
".",
"format",
"(",
"post_id",
")",
",",
"method",
"=",
"'DELETE'",
",",
"auth",
"=",
"True",
")"
] | Remove a post from favorites (Requires login).
Parameters:
post_id (int): Where post_id is the post id. | [
"Remove",
"a",
"post",
"from",
"favorites",
"(",
"Requires",
"login",
")",
"."
] | python | train | 36.5 |
kytos/python-openflow | pyof/v0x04/controller2switch/common.py | https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/v0x04/controller2switch/common.py#L95-L104 | def find_class(self):
"""Return a class related with this type."""
if self.value <= 1:
return InstructionsProperty
elif self.value <= 3:
return NextTablesProperty
elif self.value <= 7:
return ActionsProperty
return OxmProperty | [
"def",
"find_class",
"(",
"self",
")",
":",
"if",
"self",
".",
"value",
"<=",
"1",
":",
"return",
"InstructionsProperty",
"elif",
"self",
".",
"value",
"<=",
"3",
":",
"return",
"NextTablesProperty",
"elif",
"self",
".",
"value",
"<=",
"7",
":",
"return",
"ActionsProperty",
"return",
"OxmProperty"
] | Return a class related with this type. | [
"Return",
"a",
"class",
"related",
"with",
"this",
"type",
"."
] | python | train | 29.4 |
libyal/dtfabric | dtfabric/reader.py | https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/reader.py#L490-L510 | def _ReadFloatingPointDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a floating-point data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FloatingPointDefinition: floating-point data type definition.
"""
return self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.FloatingPointDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE,
is_member=is_member, supported_size_values=(4, 8)) | [
"def",
"_ReadFloatingPointDataTypeDefinition",
"(",
"self",
",",
"definitions_registry",
",",
"definition_values",
",",
"definition_name",
",",
"is_member",
"=",
"False",
")",
":",
"return",
"self",
".",
"_ReadFixedSizeDataTypeDefinition",
"(",
"definitions_registry",
",",
"definition_values",
",",
"data_types",
".",
"FloatingPointDefinition",
",",
"definition_name",
",",
"self",
".",
"_SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE",
",",
"is_member",
"=",
"is_member",
",",
"supported_size_values",
"=",
"(",
"4",
",",
"8",
")",
")"
] | Reads a floating-point data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FloatingPointDefinition: floating-point data type definition. | [
"Reads",
"a",
"floating",
"-",
"point",
"data",
"type",
"definition",
"."
] | python | train | 41.52381 |
brechtm/rinohtype | src/rinoh/backend/pdf/xobject/purepng.py | https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/backend/pdf/xobject/purepng.py#L1092-L1100 | def set_rendering_intent(self, rendering_intent):
"""Set rendering intent variant for sRGB chunk"""
if rendering_intent not in (None,
PERCEPTUAL,
RELATIVE_COLORIMETRIC,
SATURATION,
ABSOLUTE_COLORIMETRIC):
raise FormatError('Unknown redering intent')
self.rendering_intent = rendering_intent | [
"def",
"set_rendering_intent",
"(",
"self",
",",
"rendering_intent",
")",
":",
"if",
"rendering_intent",
"not",
"in",
"(",
"None",
",",
"PERCEPTUAL",
",",
"RELATIVE_COLORIMETRIC",
",",
"SATURATION",
",",
"ABSOLUTE_COLORIMETRIC",
")",
":",
"raise",
"FormatError",
"(",
"'Unknown redering intent'",
")",
"self",
".",
"rendering_intent",
"=",
"rendering_intent"
] | Set rendering intent variant for sRGB chunk | [
"Set",
"rendering",
"intent",
"variant",
"for",
"sRGB",
"chunk"
] | python | train | 51.333333 |
tetframework/Tonnikala | tonnikala/loader.py | https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/loader.py#L253-L281 | def load(self, name):
"""
If not yet in the cache, load the named template and compiles it,
placing it into the cache.
If in cache, return the cached template.
"""
if self.reload:
self._maybe_purge_cache()
template = self.cache.get(name)
if template:
return template
path = self.resolve(name)
if not path:
raise OSError(errno.ENOENT, "File not found: %s" % name)
with codecs.open(path, 'r', encoding='UTF-8') as f:
contents = f.read()
mtime = os.fstat(f.fileno()).st_mtime
template = self.load_string(contents, filename=path)
template.mtime = mtime
template.path = path
self.cache[name] = template
return template | [
"def",
"load",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"reload",
":",
"self",
".",
"_maybe_purge_cache",
"(",
")",
"template",
"=",
"self",
".",
"cache",
".",
"get",
"(",
"name",
")",
"if",
"template",
":",
"return",
"template",
"path",
"=",
"self",
".",
"resolve",
"(",
"name",
")",
"if",
"not",
"path",
":",
"raise",
"OSError",
"(",
"errno",
".",
"ENOENT",
",",
"\"File not found: %s\"",
"%",
"name",
")",
"with",
"codecs",
".",
"open",
"(",
"path",
",",
"'r'",
",",
"encoding",
"=",
"'UTF-8'",
")",
"as",
"f",
":",
"contents",
"=",
"f",
".",
"read",
"(",
")",
"mtime",
"=",
"os",
".",
"fstat",
"(",
"f",
".",
"fileno",
"(",
")",
")",
".",
"st_mtime",
"template",
"=",
"self",
".",
"load_string",
"(",
"contents",
",",
"filename",
"=",
"path",
")",
"template",
".",
"mtime",
"=",
"mtime",
"template",
".",
"path",
"=",
"path",
"self",
".",
"cache",
"[",
"name",
"]",
"=",
"template",
"return",
"template"
] | If not yet in the cache, load the named template and compiles it,
placing it into the cache.
If in cache, return the cached template. | [
"If",
"not",
"yet",
"in",
"the",
"cache",
"load",
"the",
"named",
"template",
"and",
"compiles",
"it",
"placing",
"it",
"into",
"the",
"cache",
"."
] | python | train | 26.896552 |
angr/claripy | claripy/balancer.py | https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/balancer.py#L548-L584 | def _handle_comparison(self, truism):
"""
Handles all comparisons.
"""
# print("COMP:", truism)
is_lt, is_equal, is_unsigned = self.comparison_info[truism.op]
size = len(truism.args[0])
int_max = 2**size-1 if is_unsigned else 2**(size-1)-1
int_min = -2**(size-1)
left_min = self._min(truism.args[0], signed=not is_unsigned)
left_max = self._max(truism.args[0], signed=not is_unsigned)
right_min = self._min(truism.args[1], signed=not is_unsigned)
right_max = self._max(truism.args[1], signed=not is_unsigned)
bound_max = right_max if is_equal else (right_max-1 if is_lt else right_max+1)
bound_min = right_min if is_equal else (right_min-1 if is_lt else right_min+1)
if is_lt and bound_max < int_min:
# if the bound max is negative and we're unsigned less than, we're fucked
raise ClaripyBalancerUnsatError()
elif not is_lt and bound_min > int_max:
# if the bound min is too big, we're fucked
raise ClaripyBalancerUnsatError()
current_min = int_min
current_max = int_max
if is_lt:
current_max = min(int_max, left_max, bound_max)
self._add_upper_bound(truism.args[0], current_max)
else:
current_min = max(int_min, left_min, bound_min)
self._add_lower_bound(truism.args[0], current_min) | [
"def",
"_handle_comparison",
"(",
"self",
",",
"truism",
")",
":",
"# print(\"COMP:\", truism)",
"is_lt",
",",
"is_equal",
",",
"is_unsigned",
"=",
"self",
".",
"comparison_info",
"[",
"truism",
".",
"op",
"]",
"size",
"=",
"len",
"(",
"truism",
".",
"args",
"[",
"0",
"]",
")",
"int_max",
"=",
"2",
"**",
"size",
"-",
"1",
"if",
"is_unsigned",
"else",
"2",
"**",
"(",
"size",
"-",
"1",
")",
"-",
"1",
"int_min",
"=",
"-",
"2",
"**",
"(",
"size",
"-",
"1",
")",
"left_min",
"=",
"self",
".",
"_min",
"(",
"truism",
".",
"args",
"[",
"0",
"]",
",",
"signed",
"=",
"not",
"is_unsigned",
")",
"left_max",
"=",
"self",
".",
"_max",
"(",
"truism",
".",
"args",
"[",
"0",
"]",
",",
"signed",
"=",
"not",
"is_unsigned",
")",
"right_min",
"=",
"self",
".",
"_min",
"(",
"truism",
".",
"args",
"[",
"1",
"]",
",",
"signed",
"=",
"not",
"is_unsigned",
")",
"right_max",
"=",
"self",
".",
"_max",
"(",
"truism",
".",
"args",
"[",
"1",
"]",
",",
"signed",
"=",
"not",
"is_unsigned",
")",
"bound_max",
"=",
"right_max",
"if",
"is_equal",
"else",
"(",
"right_max",
"-",
"1",
"if",
"is_lt",
"else",
"right_max",
"+",
"1",
")",
"bound_min",
"=",
"right_min",
"if",
"is_equal",
"else",
"(",
"right_min",
"-",
"1",
"if",
"is_lt",
"else",
"right_min",
"+",
"1",
")",
"if",
"is_lt",
"and",
"bound_max",
"<",
"int_min",
":",
"# if the bound max is negative and we're unsigned less than, we're fucked",
"raise",
"ClaripyBalancerUnsatError",
"(",
")",
"elif",
"not",
"is_lt",
"and",
"bound_min",
">",
"int_max",
":",
"# if the bound min is too big, we're fucked",
"raise",
"ClaripyBalancerUnsatError",
"(",
")",
"current_min",
"=",
"int_min",
"current_max",
"=",
"int_max",
"if",
"is_lt",
":",
"current_max",
"=",
"min",
"(",
"int_max",
",",
"left_max",
",",
"bound_max",
")",
"self",
".",
"_add_upper_bound",
"(",
"truism",
".",
"args",
"[",
"0",
"]",
",",
"current_max",
")",
"else",
":",
"current_min",
"=",
"max",
"(",
"int_min",
",",
"left_min",
",",
"bound_min",
")",
"self",
".",
"_add_lower_bound",
"(",
"truism",
".",
"args",
"[",
"0",
"]",
",",
"current_min",
")"
] | Handles all comparisons. | [
"Handles",
"all",
"comparisons",
"."
] | python | train | 38.162162 |
ambitioninc/python-logentries-api | logentries_api/resources.py | https://github.com/ambitioninc/python-logentries-api/blob/77ff1a7a2995d7ea2725b74e34c0f880f4ee23bc/logentries_api/resources.py#L362-L376 | def list(self):
"""
Get all current hooks
:return: All hooks
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
return self._post(
request=ApiActions.LIST.value,
uri=ApiUri.HOOKS.value,
).get('hooks') | [
"def",
"list",
"(",
"self",
")",
":",
"return",
"self",
".",
"_post",
"(",
"request",
"=",
"ApiActions",
".",
"LIST",
".",
"value",
",",
"uri",
"=",
"ApiUri",
".",
"HOOKS",
".",
"value",
",",
")",
".",
"get",
"(",
"'hooks'",
")"
] | Get all current hooks
:return: All hooks
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries | [
"Get",
"all",
"current",
"hooks"
] | python | test | 27.066667 |
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card