hexsha
stringlengths 40
40
| repo
stringlengths 5
121
| path
stringlengths 4
227
| license
sequence | language
stringclasses 1
value | identifier
stringlengths 1
107
| return_type
stringlengths 2
237
⌀ | original_string
stringlengths 75
13.4k
| original_docstring
stringlengths 13
12.9k
| docstring
stringlengths 13
2.57k
| docstring_tokens
sequence | code
stringlengths 23
1.88k
| code_tokens
sequence | short_docstring
stringlengths 1
1.32k
| short_docstring_tokens
sequence | comment
sequence | parameters
list | docstring_params
dict | code_with_imports
stringlengths 23
1.88k
| idxs
int64 0
611k
| cluster
int64 0
1.02k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7540540e177a568c94169af73b1747f4da9025d7 | akersten/cute-kitchen | src/router/annotations.py | [
"MIT"
] | Python | route_login_required | <not_specific> | def route_login_required():
"""
Decorates a function to indicate the requirement for a logged-in session.
:return: The decorated function.
"""
def decorator(fn):
fn.route_login_required = True
return fn
return decorator |
Decorates a function to indicate the requirement for a logged-in session.
:return: The decorated function.
| Decorates a function to indicate the requirement for a logged-in session. | [
"Decorates",
"a",
"function",
"to",
"indicate",
"the",
"requirement",
"for",
"a",
"logged",
"-",
"in",
"session",
"."
] | def route_login_required():
def decorator(fn):
fn.route_login_required = True
return fn
return decorator | [
"def",
"route_login_required",
"(",
")",
":",
"def",
"decorator",
"(",
"fn",
")",
":",
"fn",
".",
"route_login_required",
"=",
"True",
"return",
"fn",
"return",
"decorator"
] | Decorates a function to indicate the requirement for a logged-in session. | [
"Decorates",
"a",
"function",
"to",
"indicate",
"the",
"requirement",
"for",
"a",
"logged",
"-",
"in",
"session",
"."
] | [
"\"\"\"\n Decorates a function to indicate the requirement for a logged-in session.\n :return: The decorated function.\n \"\"\""
] | [] | {
"returns": [
{
"docstring": "The decorated function.",
"docstring_tokens": [
"The",
"decorated",
"function",
"."
],
"type": null
}
],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | def route_login_required():
def decorator(fn):
fn.route_login_required = True
return fn
return decorator | 0 | 56 |
c5b891ac420cbeffb65019d562bc382e7bf6367b | MacFarlaneBro/compose-mode | compose_mode/generate.py | [
"MIT"
] | Python | fix_restart | <not_specific> | def fix_restart(restart_config):
""" Fix output of docker-compose.
docker-compose's "show config" mechanism--the internals of which we use to
merge configs--doesn't actually return valid configurations for the
"restart" property as they convert it to an internal representation which
they then forget to convert back to the yaml format. We do that by hand
here.
"""
try:
mrc = restart_config['MaximumRetryCount']
except TypeError:
name = restart_config
else:
name = restart_config['Name']
if name in ['always', 'unless-stopped', 'no']:
return name
else:
return '{}:{}'.format(name, mrc) | Fix output of docker-compose.
docker-compose's "show config" mechanism--the internals of which we use to
merge configs--doesn't actually return valid configurations for the
"restart" property as they convert it to an internal representation which
they then forget to convert back to the yaml format. We do that by hand
here.
| Fix output of docker-compose. | [
"Fix",
"output",
"of",
"docker",
"-",
"compose",
"."
] | def fix_restart(restart_config):
try:
mrc = restart_config['MaximumRetryCount']
except TypeError:
name = restart_config
else:
name = restart_config['Name']
if name in ['always', 'unless-stopped', 'no']:
return name
else:
return '{}:{}'.format(name, mrc) | [
"def",
"fix_restart",
"(",
"restart_config",
")",
":",
"try",
":",
"mrc",
"=",
"restart_config",
"[",
"'MaximumRetryCount'",
"]",
"except",
"TypeError",
":",
"name",
"=",
"restart_config",
"else",
":",
"name",
"=",
"restart_config",
"[",
"'Name'",
"]",
"if",
"name",
"in",
"[",
"'always'",
",",
"'unless-stopped'",
",",
"'no'",
"]",
":",
"return",
"name",
"else",
":",
"return",
"'{}:{}'",
".",
"format",
"(",
"name",
",",
"mrc",
")"
] | Fix output of docker-compose. | [
"Fix",
"output",
"of",
"docker",
"-",
"compose",
"."
] | [
"\"\"\" Fix output of docker-compose.\n\n docker-compose's \"show config\" mechanism--the internals of which we use to\n merge configs--doesn't actually return valid configurations for the\n \"restart\" property as they convert it to an internal representation which\n they then forget to convert back to the yaml format. We do that by hand\n here.\n \"\"\""
] | [
{
"param": "restart_config",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "restart_config",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def fix_restart(restart_config):
try:
mrc = restart_config['MaximumRetryCount']
except TypeError:
name = restart_config
else:
name = restart_config['Name']
if name in ['always', 'unless-stopped', 'no']:
return name
else:
return '{}:{}'.format(name, mrc) | 1 | 921 |
65ef5aa7f6bb80020bfccdca5945fcdda9118ad3 | netceteragroup/da4e | assembler.py | [
"MIT"
] | Python | _read_file_and_join_lines | <not_specific> | def _read_file_and_join_lines(file_name):
'''
Reads all contents of a file and appends each line into one ',' separated string.
Leading and trailing whitespace is removed. Lines having a '#' character as first non-
whitespace character are considered as comment and are therefore ignored in the output.
@param fileName: the file to read
'''
def _normalizeString(line):
return line.replace('\n', '').strip()
def _isComment(line):
return not line.startswith("#")
def _isEmptyLine(line):
return line != ""
with open(file_name, 'r') as file:
return ','.join(filter(_isEmptyLine, filter(_isComment, map(_normalizeString, file.readlines())))) |
Reads all contents of a file and appends each line into one ',' separated string.
Leading and trailing whitespace is removed. Lines having a '#' character as first non-
whitespace character are considered as comment and are therefore ignored in the output.
@param fileName: the file to read
| Reads all contents of a file and appends each line into one ',' separated string.
Leading and trailing whitespace is removed. Lines having a '#' character as first non
whitespace character are considered as comment and are therefore ignored in the output. | [
"Reads",
"all",
"contents",
"of",
"a",
"file",
"and",
"appends",
"each",
"line",
"into",
"one",
"'",
"'",
"separated",
"string",
".",
"Leading",
"and",
"trailing",
"whitespace",
"is",
"removed",
".",
"Lines",
"having",
"a",
"'",
"#",
"'",
"character",
"as",
"first",
"non",
"whitespace",
"character",
"are",
"considered",
"as",
"comment",
"and",
"are",
"therefore",
"ignored",
"in",
"the",
"output",
"."
] | def _read_file_and_join_lines(file_name):
def _normalizeString(line):
return line.replace('\n', '').strip()
def _isComment(line):
return not line.startswith("#")
def _isEmptyLine(line):
return line != ""
with open(file_name, 'r') as file:
return ','.join(filter(_isEmptyLine, filter(_isComment, map(_normalizeString, file.readlines())))) | [
"def",
"_read_file_and_join_lines",
"(",
"file_name",
")",
":",
"def",
"_normalizeString",
"(",
"line",
")",
":",
"return",
"line",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
".",
"strip",
"(",
")",
"def",
"_isComment",
"(",
"line",
")",
":",
"return",
"not",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
"def",
"_isEmptyLine",
"(",
"line",
")",
":",
"return",
"line",
"!=",
"\"\"",
"with",
"open",
"(",
"file_name",
",",
"'r'",
")",
"as",
"file",
":",
"return",
"','",
".",
"join",
"(",
"filter",
"(",
"_isEmptyLine",
",",
"filter",
"(",
"_isComment",
",",
"map",
"(",
"_normalizeString",
",",
"file",
".",
"readlines",
"(",
")",
")",
")",
")",
")"
] | Reads all contents of a file and appends each line into one ',' separated string. | [
"Reads",
"all",
"contents",
"of",
"a",
"file",
"and",
"appends",
"each",
"line",
"into",
"one",
"'",
"'",
"separated",
"string",
"."
] | [
"'''\n Reads all contents of a file and appends each line into one ',' separated string. \n Leading and trailing whitespace is removed. Lines having a '#' character as first non-\n whitespace character are considered as comment and are therefore ignored in the output. \n @param fileName: the file to read\n '''"
] | [
{
"param": "file_name",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "file_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "fileName",
"type": null,
"docstring": "the file to read",
"docstring_tokens": [
"the",
"file",
"to",
"read"
],
"default": null,
"is_optional": false
}
],
"others": []
} | def _read_file_and_join_lines(file_name):
def _normalizeString(line):
return line.replace('\n', '').strip()
def _isComment(line):
return not line.startswith("#")
def _isEmptyLine(line):
return line != ""
with open(file_name, 'r') as file:
return ','.join(filter(_isEmptyLine, filter(_isComment, map(_normalizeString, file.readlines())))) | 2 | 312 |
4ba9d06164e542a8b6d024e64d49a8773a1ff650 | WolfgangFahl/pyOnlineSpreadSheetEditing | onlinespreadsheet/tablequery.py | [
"Apache-2.0"
] | Python | match | <not_specific> | def match(pattern:str,string:str):
'''
re match search for the given pattern with ignore case
'''
return re.search(pattern=pattern, string=string, flags=re.IGNORECASE) |
re match search for the given pattern with ignore case
| re match search for the given pattern with ignore case | [
"re",
"match",
"search",
"for",
"the",
"given",
"pattern",
"with",
"ignore",
"case"
] | def match(pattern:str,string:str):
return re.search(pattern=pattern, string=string, flags=re.IGNORECASE) | [
"def",
"match",
"(",
"pattern",
":",
"str",
",",
"string",
":",
"str",
")",
":",
"return",
"re",
".",
"search",
"(",
"pattern",
"=",
"pattern",
",",
"string",
"=",
"string",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")"
] | re match search for the given pattern with ignore case | [
"re",
"match",
"search",
"for",
"the",
"given",
"pattern",
"with",
"ignore",
"case"
] | [
"'''\n re match search for the given pattern with ignore case\n '''"
] | [
{
"param": "pattern",
"type": "str"
},
{
"param": "string",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "pattern",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "string",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
def match(pattern:str,string:str):
return re.search(pattern=pattern, string=string, flags=re.IGNORECASE) | 3 | 994 |
ed05c8d76b4e4beff6fca1cee61e2aadf3732abe | tomerten/PyElegantSDDS | pyelegantsdds/elegantrun.py | [
"MIT"
] | Python | write_parallel_run_script | null | def write_parallel_run_script(sif):
"""
Method to generate parallel elegant run
script.
Parameters:
-----------
sif: str
path to singularity container
"""
bashstrlist = [
"#!/bin/bash",
"pele={}".format(sif),
'cmd="bash temp_run_pelegant.sh"',
"",
"$pele $cmd $1",
]
bashstr = "\n".join(bashstrlist)
# write to file
with open("run_pelegant.sh", "w") as f:
f.write(bashstr) |
Method to generate parallel elegant run
script.
Parameters:
-----------
sif: str
path to singularity container
| Method to generate parallel elegant run
script.
str
path to singularity container | [
"Method",
"to",
"generate",
"parallel",
"elegant",
"run",
"script",
".",
"str",
"path",
"to",
"singularity",
"container"
] | def write_parallel_run_script(sif):
bashstrlist = [
"#!/bin/bash",
"pele={}".format(sif),
'cmd="bash temp_run_pelegant.sh"',
"",
"$pele $cmd $1",
]
bashstr = "\n".join(bashstrlist)
with open("run_pelegant.sh", "w") as f:
f.write(bashstr) | [
"def",
"write_parallel_run_script",
"(",
"sif",
")",
":",
"bashstrlist",
"=",
"[",
"\"#!/bin/bash\"",
",",
"\"pele={}\"",
".",
"format",
"(",
"sif",
")",
",",
"'cmd=\"bash temp_run_pelegant.sh\"'",
",",
"\"\"",
",",
"\"$pele $cmd $1\"",
",",
"]",
"bashstr",
"=",
"\"\\n\"",
".",
"join",
"(",
"bashstrlist",
")",
"with",
"open",
"(",
"\"run_pelegant.sh\"",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"bashstr",
")"
] | Method to generate parallel elegant run
script. | [
"Method",
"to",
"generate",
"parallel",
"elegant",
"run",
"script",
"."
] | [
"\"\"\"\n Method to generate parallel elegant run\n script.\n\n Parameters:\n -----------\n sif: str\n path to singularity container\n\n \"\"\"",
"# write to file"
] | [
{
"param": "sif",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "sif",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def write_parallel_run_script(sif):
bashstrlist = [
"#!/bin/bash",
"pele={}".format(sif),
'cmd="bash temp_run_pelegant.sh"',
"",
"$pele $cmd $1",
]
bashstr = "\n".join(bashstrlist)
with open("run_pelegant.sh", "w") as f:
f.write(bashstr) | 5 | 673 |
bf09424b7fd16830410dec98a364876163fae903 | erikagardini/InferringMusicAndVisualArtStyleEvolution | python/utilities.py | [
"MIT"
] | Python | find_min_max_indexes | <not_specific> | def find_min_max_indexes(dates):
"""
Get the index of the min value and the max value (centroids are not considered)
Args:
[ndarray int] dates: dates matrix
Returns:
[float] index_min: index of the min value
[int] index_max: index of the max value
"""
min = dates[0]
index_min = 0
max = dates[0]
index_max = 0
for i in range(0, dates.shape[0]):
if dates[i] < min and dates[i] != -1:
min = dates[i]
index_min = i
if dates[i] > max and dates[i] != -1:
max = dates[i]
index_max = i
return index_min, index_max |
Get the index of the min value and the max value (centroids are not considered)
Args:
[ndarray int] dates: dates matrix
Returns:
[float] index_min: index of the min value
[int] index_max: index of the max value
| Get the index of the min value and the max value (centroids are not considered) | [
"Get",
"the",
"index",
"of",
"the",
"min",
"value",
"and",
"the",
"max",
"value",
"(",
"centroids",
"are",
"not",
"considered",
")"
] | def find_min_max_indexes(dates):
min = dates[0]
index_min = 0
max = dates[0]
index_max = 0
for i in range(0, dates.shape[0]):
if dates[i] < min and dates[i] != -1:
min = dates[i]
index_min = i
if dates[i] > max and dates[i] != -1:
max = dates[i]
index_max = i
return index_min, index_max | [
"def",
"find_min_max_indexes",
"(",
"dates",
")",
":",
"min",
"=",
"dates",
"[",
"0",
"]",
"index_min",
"=",
"0",
"max",
"=",
"dates",
"[",
"0",
"]",
"index_max",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"dates",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"dates",
"[",
"i",
"]",
"<",
"min",
"and",
"dates",
"[",
"i",
"]",
"!=",
"-",
"1",
":",
"min",
"=",
"dates",
"[",
"i",
"]",
"index_min",
"=",
"i",
"if",
"dates",
"[",
"i",
"]",
">",
"max",
"and",
"dates",
"[",
"i",
"]",
"!=",
"-",
"1",
":",
"max",
"=",
"dates",
"[",
"i",
"]",
"index_max",
"=",
"i",
"return",
"index_min",
",",
"index_max"
] | Get the index of the min value and the max value (centroids are not considered) | [
"Get",
"the",
"index",
"of",
"the",
"min",
"value",
"and",
"the",
"max",
"value",
"(",
"centroids",
"are",
"not",
"considered",
")"
] | [
"\"\"\"\n Get the index of the min value and the max value (centroids are not considered)\n Args:\n [ndarray int] dates: dates matrix\n\n Returns:\n [float] index_min: index of the min value\n [int] index_max: index of the max value\n \"\"\""
] | [
{
"param": "dates",
"type": null
}
] | {
"returns": [
{
"docstring": "[float] index_min: index of the min value\n[int] index_max: index of the max value",
"docstring_tokens": [
"[",
"float",
"]",
"index_min",
":",
"index",
"of",
"the",
"min",
"value",
"[",
"int",
"]",
"index_max",
":",
"index",
"of",
"the",
"max",
"value"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "dates",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "[ndarray int] dates",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"others": []
} | def find_min_max_indexes(dates):
min = dates[0]
index_min = 0
max = dates[0]
index_max = 0
for i in range(0, dates.shape[0]):
if dates[i] < min and dates[i] != -1:
min = dates[i]
index_min = i
if dates[i] > max and dates[i] != -1:
max = dates[i]
index_max = i
return index_min, index_max | 6 | 721 |
8eae126dbf8a8baeab1e2e976d363b1bcf02863a | muvr/muvr-ml | sensorcnn/dataset/utils.py | [
"BSD-3-Clause"
] | Python | csv_file_iterator | null | def csv_file_iterator(root_directory):
"""Returns a generator (iterator) of absolute file paths for CSV files in a given directory"""
for root_path, _, files in os.walk(root_directory, followlinks=True):
for f in files:
if f.endswith("csv"):
yield os.path.join(root_path, f) | Returns a generator (iterator) of absolute file paths for CSV files in a given directory | Returns a generator (iterator) of absolute file paths for CSV files in a given directory | [
"Returns",
"a",
"generator",
"(",
"iterator",
")",
"of",
"absolute",
"file",
"paths",
"for",
"CSV",
"files",
"in",
"a",
"given",
"directory"
] | def csv_file_iterator(root_directory):
for root_path, _, files in os.walk(root_directory, followlinks=True):
for f in files:
if f.endswith("csv"):
yield os.path.join(root_path, f) | [
"def",
"csv_file_iterator",
"(",
"root_directory",
")",
":",
"for",
"root_path",
",",
"_",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"root_directory",
",",
"followlinks",
"=",
"True",
")",
":",
"for",
"f",
"in",
"files",
":",
"if",
"f",
".",
"endswith",
"(",
"\"csv\"",
")",
":",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"root_path",
",",
"f",
")"
] | Returns a generator (iterator) of absolute file paths for CSV files in a given directory | [
"Returns",
"a",
"generator",
"(",
"iterator",
")",
"of",
"absolute",
"file",
"paths",
"for",
"CSV",
"files",
"in",
"a",
"given",
"directory"
] | [
"\"\"\"Returns a generator (iterator) of absolute file paths for CSV files in a given directory\"\"\""
] | [
{
"param": "root_directory",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "root_directory",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
def csv_file_iterator(root_directory):
for root_path, _, files in os.walk(root_directory, followlinks=True):
for f in files:
if f.endswith("csv"):
yield os.path.join(root_path, f) | 7 | 671 |
4c8f7cefbccbd3770a96057fbf07a756c0ce334d | intrlocutr/nightcoreify | nightcorei.py | [
"0BSD"
] | Python | create_tags | list | def create_tags(tags: list) -> list:
"""Prepares tags for a new upload. Keeps as many old tags as possible while adding a "nightcore" tag."""
to_add = 'nightcore'
# The total number of characters in YouTube video tags can't exceed 400.
# We're adding the "nightcore" tag, so we'll only keep this many characters of the original tags.
target_len = 400 - len(to_add)
new_tags = []
length = 0
# Keep tags up until they can no longer fit within our target.
for tag in tags:
length += len(tag)
if length < target_len:
new_tags.append(tag)
else:
break
new_tags.append(to_add)
return new_tags | Prepares tags for a new upload. Keeps as many old tags as possible while adding a "nightcore" tag. | Prepares tags for a new upload. Keeps as many old tags as possible while adding a "nightcore" tag. | [
"Prepares",
"tags",
"for",
"a",
"new",
"upload",
".",
"Keeps",
"as",
"many",
"old",
"tags",
"as",
"possible",
"while",
"adding",
"a",
"\"",
"nightcore",
"\"",
"tag",
"."
] | def create_tags(tags: list) -> list:
to_add = 'nightcore'
target_len = 400 - len(to_add)
new_tags = []
length = 0
for tag in tags:
length += len(tag)
if length < target_len:
new_tags.append(tag)
else:
break
new_tags.append(to_add)
return new_tags | [
"def",
"create_tags",
"(",
"tags",
":",
"list",
")",
"->",
"list",
":",
"to_add",
"=",
"'nightcore'",
"target_len",
"=",
"400",
"-",
"len",
"(",
"to_add",
")",
"new_tags",
"=",
"[",
"]",
"length",
"=",
"0",
"for",
"tag",
"in",
"tags",
":",
"length",
"+=",
"len",
"(",
"tag",
")",
"if",
"length",
"<",
"target_len",
":",
"new_tags",
".",
"append",
"(",
"tag",
")",
"else",
":",
"break",
"new_tags",
".",
"append",
"(",
"to_add",
")",
"return",
"new_tags"
] | Prepares tags for a new upload. | [
"Prepares",
"tags",
"for",
"a",
"new",
"upload",
"."
] | [
"\"\"\"Prepares tags for a new upload. Keeps as many old tags as possible while adding a \"nightcore\" tag.\"\"\"",
"# The total number of characters in YouTube video tags can't exceed 400.",
"# We're adding the \"nightcore\" tag, so we'll only keep this many characters of the original tags.",
"# Keep tags up until they can no longer fit within our target."
] | [
{
"param": "tags",
"type": "list"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "tags",
"type": "list",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def create_tags(tags: list) -> list:
to_add = 'nightcore'
target_len = 400 - len(to_add)
new_tags = []
length = 0
for tag in tags:
length += len(tag)
if length < target_len:
new_tags.append(tag)
else:
break
new_tags.append(to_add)
return new_tags | 8 | 208 |
903fc63fede8ca3f8a8dc0b5f4ca496131e1a105 | JustKowalski/sound_separation | datasets/fuss/reverberate_and_mix.py | [
"Apache-2.0"
] | Python | write_item_dict | null | def write_item_dict(item_dict, item_file, separate=False):
"""Write tab separated source/rir lists in files for train/validate/eval."""
if not separate:
with open(item_file, 'w') as f:
for subfolder in item_dict:
for example in item_dict[subfolder]:
line = '\t'.join([example] + item_dict[subfolder][example])
f.write(line + '\n')
else:
for subfolder in item_dict:
item_base, item_ext = item_file.split('.')
item_file_sub = item_base + '_' + subfolder + '.' + item_ext
with open(item_file_sub, 'w') as f:
for example in item_dict[subfolder]:
line = '\t'.join([example] + item_dict[subfolder][example])
f.write(line + '\n') | Write tab separated source/rir lists in files for train/validate/eval. | Write tab separated source/rir lists in files for train/validate/eval. | [
"Write",
"tab",
"separated",
"source",
"/",
"rir",
"lists",
"in",
"files",
"for",
"train",
"/",
"validate",
"/",
"eval",
"."
] | def write_item_dict(item_dict, item_file, separate=False):
if not separate:
with open(item_file, 'w') as f:
for subfolder in item_dict:
for example in item_dict[subfolder]:
line = '\t'.join([example] + item_dict[subfolder][example])
f.write(line + '\n')
else:
for subfolder in item_dict:
item_base, item_ext = item_file.split('.')
item_file_sub = item_base + '_' + subfolder + '.' + item_ext
with open(item_file_sub, 'w') as f:
for example in item_dict[subfolder]:
line = '\t'.join([example] + item_dict[subfolder][example])
f.write(line + '\n') | [
"def",
"write_item_dict",
"(",
"item_dict",
",",
"item_file",
",",
"separate",
"=",
"False",
")",
":",
"if",
"not",
"separate",
":",
"with",
"open",
"(",
"item_file",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"subfolder",
"in",
"item_dict",
":",
"for",
"example",
"in",
"item_dict",
"[",
"subfolder",
"]",
":",
"line",
"=",
"'\\t'",
".",
"join",
"(",
"[",
"example",
"]",
"+",
"item_dict",
"[",
"subfolder",
"]",
"[",
"example",
"]",
")",
"f",
".",
"write",
"(",
"line",
"+",
"'\\n'",
")",
"else",
":",
"for",
"subfolder",
"in",
"item_dict",
":",
"item_base",
",",
"item_ext",
"=",
"item_file",
".",
"split",
"(",
"'.'",
")",
"item_file_sub",
"=",
"item_base",
"+",
"'_'",
"+",
"subfolder",
"+",
"'.'",
"+",
"item_ext",
"with",
"open",
"(",
"item_file_sub",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"example",
"in",
"item_dict",
"[",
"subfolder",
"]",
":",
"line",
"=",
"'\\t'",
".",
"join",
"(",
"[",
"example",
"]",
"+",
"item_dict",
"[",
"subfolder",
"]",
"[",
"example",
"]",
")",
"f",
".",
"write",
"(",
"line",
"+",
"'\\n'",
")"
] | Write tab separated source/rir lists in files for train/validate/eval. | [
"Write",
"tab",
"separated",
"source",
"/",
"rir",
"lists",
"in",
"files",
"for",
"train",
"/",
"validate",
"/",
"eval",
"."
] | [
"\"\"\"Write tab separated source/rir lists in files for train/validate/eval.\"\"\""
] | [
{
"param": "item_dict",
"type": null
},
{
"param": "item_file",
"type": null
},
{
"param": "separate",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "item_dict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "item_file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "separate",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def write_item_dict(item_dict, item_file, separate=False):
if not separate:
with open(item_file, 'w') as f:
for subfolder in item_dict:
for example in item_dict[subfolder]:
line = '\t'.join([example] + item_dict[subfolder][example])
f.write(line + '\n')
else:
for subfolder in item_dict:
item_base, item_ext = item_file.split('.')
item_file_sub = item_base + '_' + subfolder + '.' + item_ext
with open(item_file_sub, 'w') as f:
for example in item_dict[subfolder]:
line = '\t'.join([example] + item_dict[subfolder][example])
f.write(line + '\n') | 9 | 677 |
8f34f83cd4a60a8d8e72e31e4fde3b9e819b893f | BAMWelDX/weldx | weldx/tests/test_time.py | [
"BSD-3-Clause"
] | Python | _transform_array | <not_specific> | def _transform_array(data, is_array, is_scalar):
"""Transform an array into a scalar, single value array or return in unmodified."""
if not is_array:
return data[0]
if is_scalar:
return [data[0]]
return data | Transform an array into a scalar, single value array or return in unmodified. | Transform an array into a scalar, single value array or return in unmodified. | [
"Transform",
"an",
"array",
"into",
"a",
"scalar",
"single",
"value",
"array",
"or",
"return",
"in",
"unmodified",
"."
] | def _transform_array(data, is_array, is_scalar):
if not is_array:
return data[0]
if is_scalar:
return [data[0]]
return data | [
"def",
"_transform_array",
"(",
"data",
",",
"is_array",
",",
"is_scalar",
")",
":",
"if",
"not",
"is_array",
":",
"return",
"data",
"[",
"0",
"]",
"if",
"is_scalar",
":",
"return",
"[",
"data",
"[",
"0",
"]",
"]",
"return",
"data"
] | Transform an array into a scalar, single value array or return in unmodified. | [
"Transform",
"an",
"array",
"into",
"a",
"scalar",
"single",
"value",
"array",
"or",
"return",
"in",
"unmodified",
"."
] | [
"\"\"\"Transform an array into a scalar, single value array or return in unmodified.\"\"\""
] | [
{
"param": "data",
"type": null
},
{
"param": "is_array",
"type": null
},
{
"param": "is_scalar",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "is_array",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "is_scalar",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _transform_array(data, is_array, is_scalar):
if not is_array:
return data[0]
if is_scalar:
return [data[0]]
return data | 10 | 524 |
cc94e3893f2982011fc08d2790d839d5a009c4f5 | EmersonAires/Introducao_a_ciencia_da_computacao_com_Python | Exercicios_Resolvidos/Parte 2/Semana 1/exercicio_12_1.py | [
"MIT"
] | Python | extrair_coluna | <not_specific> | def extrair_coluna(b_mat, col):
'''retorna uma lista com os elementos da coluna indicada'''
coluna_b_mat = []
for linha in b_mat:
elemento = linha[col]
coluna_b_mat.append(elemento)
return coluna_b_mat | retorna uma lista com os elementos da coluna indicada | retorna uma lista com os elementos da coluna indicada | [
"retorna",
"uma",
"lista",
"com",
"os",
"elementos",
"da",
"coluna",
"indicada"
] | def extrair_coluna(b_mat, col):
coluna_b_mat = []
for linha in b_mat:
elemento = linha[col]
coluna_b_mat.append(elemento)
return coluna_b_mat | [
"def",
"extrair_coluna",
"(",
"b_mat",
",",
"col",
")",
":",
"coluna_b_mat",
"=",
"[",
"]",
"for",
"linha",
"in",
"b_mat",
":",
"elemento",
"=",
"linha",
"[",
"col",
"]",
"coluna_b_mat",
".",
"append",
"(",
"elemento",
")",
"return",
"coluna_b_mat"
] | retorna uma lista com os elementos da coluna indicada | [
"retorna",
"uma",
"lista",
"com",
"os",
"elementos",
"da",
"coluna",
"indicada"
] | [
"'''retorna uma lista com os elementos da coluna indicada'''"
] | [
{
"param": "b_mat",
"type": null
},
{
"param": "col",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "b_mat",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "col",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def extrair_coluna(b_mat, col):
coluna_b_mat = []
for linha in b_mat:
elemento = linha[col]
coluna_b_mat.append(elemento)
return coluna_b_mat | 11 | 667 |
5bb08e1a378e746bf276fdb1704d41f5409d7d65 | trisongz/file_io | fileio/core/py_utils.py | [
"MIT"
] | Python | temporary_assignment | null | def temporary_assignment(obj, attr, value):
"""Temporarily assign obj.attr to value."""
original = getattr(obj, attr)
setattr(obj, attr, value)
try:
yield
finally:
setattr(obj, attr, original) | Temporarily assign obj.attr to value. | Temporarily assign obj.attr to value. | [
"Temporarily",
"assign",
"obj",
".",
"attr",
"to",
"value",
"."
] | def temporary_assignment(obj, attr, value):
original = getattr(obj, attr)
setattr(obj, attr, value)
try:
yield
finally:
setattr(obj, attr, original) | [
"def",
"temporary_assignment",
"(",
"obj",
",",
"attr",
",",
"value",
")",
":",
"original",
"=",
"getattr",
"(",
"obj",
",",
"attr",
")",
"setattr",
"(",
"obj",
",",
"attr",
",",
"value",
")",
"try",
":",
"yield",
"finally",
":",
"setattr",
"(",
"obj",
",",
"attr",
",",
"original",
")"
] | Temporarily assign obj.attr to value. | [
"Temporarily",
"assign",
"obj",
".",
"attr",
"to",
"value",
"."
] | [
"\"\"\"Temporarily assign obj.attr to value.\"\"\""
] | [
{
"param": "obj",
"type": null
},
{
"param": "attr",
"type": null
},
{
"param": "value",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "obj",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "attr",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def temporary_assignment(obj, attr, value):
original = getattr(obj, attr)
setattr(obj, attr, value)
try:
yield
finally:
setattr(obj, attr, original) | 12 | 748 |
63484325eaa962f8f517d4993495bc314d2325ed | alchem0x2A/vasp-interactive-test | examples/ex11_k8s_minimal.py | [
"MIT"
] | Python | _thread_calculate | <not_specific> | def _thread_calculate(atoms, energy):
"""A threaded version of atoms.get_potential_energy. Energy is a one-member list
ideas taken from https://wiki.fysik.dtu.dk/ase/_modules/ase/neb.html#NEB
"""
energy[0] = atoms.get_potential_energy()
return | A threaded version of atoms.get_potential_energy. Energy is a one-member list
ideas taken from https://wiki.fysik.dtu.dk/ase/_modules/ase/neb.html#NEB
| A threaded version of atoms.get_potential_energy. | [
"A",
"threaded",
"version",
"of",
"atoms",
".",
"get_potential_energy",
"."
] | def _thread_calculate(atoms, energy):
energy[0] = atoms.get_potential_energy()
return | [
"def",
"_thread_calculate",
"(",
"atoms",
",",
"energy",
")",
":",
"energy",
"[",
"0",
"]",
"=",
"atoms",
".",
"get_potential_energy",
"(",
")",
"return"
] | A threaded version of atoms.get_potential_energy. | [
"A",
"threaded",
"version",
"of",
"atoms",
".",
"get_potential_energy",
"."
] | [
"\"\"\"A threaded version of atoms.get_potential_energy. Energy is a one-member list\n ideas taken from https://wiki.fysik.dtu.dk/ase/_modules/ase/neb.html#NEB\n \"\"\""
] | [
{
"param": "atoms",
"type": null
},
{
"param": "energy",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "atoms",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "energy",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _thread_calculate(atoms, energy):
energy[0] = atoms.get_potential_energy()
return | 13 | 315 |
e71a7da7f51510edc97fee6d78a7582b46d166d7 | seung-lab/mapbuffer | mapbuffer/mapbuffer.py | [
"BSD-3-Clause"
] | Python | eytzinger_sort | <not_specific> | def eytzinger_sort(inpt, output, i = 0, k = 1):
"""
Takes an ascendingly sorted input and
an equal sized output buffer into which to
rewrite the input in eytzinger order.
Modified from:
https://algorithmica.org/en/eytzinger
"""
if k <= len(inpt):
i = eytzinger_sort(inpt, output, i, 2 * k)
output[k - 1] = inpt[i]
i += 1
i = eytzinger_sort(inpt, output,i, 2 * k + 1)
return i |
Takes an ascendingly sorted input and
an equal sized output buffer into which to
rewrite the input in eytzinger order.
Modified from:
https://algorithmica.org/en/eytzinger
| Takes an ascendingly sorted input and
an equal sized output buffer into which to
rewrite the input in eytzinger order.
| [
"Takes",
"an",
"ascendingly",
"sorted",
"input",
"and",
"an",
"equal",
"sized",
"output",
"buffer",
"into",
"which",
"to",
"rewrite",
"the",
"input",
"in",
"eytzinger",
"order",
"."
] | def eytzinger_sort(inpt, output, i = 0, k = 1):
if k <= len(inpt):
i = eytzinger_sort(inpt, output, i, 2 * k)
output[k - 1] = inpt[i]
i += 1
i = eytzinger_sort(inpt, output,i, 2 * k + 1)
return i | [
"def",
"eytzinger_sort",
"(",
"inpt",
",",
"output",
",",
"i",
"=",
"0",
",",
"k",
"=",
"1",
")",
":",
"if",
"k",
"<=",
"len",
"(",
"inpt",
")",
":",
"i",
"=",
"eytzinger_sort",
"(",
"inpt",
",",
"output",
",",
"i",
",",
"2",
"*",
"k",
")",
"output",
"[",
"k",
"-",
"1",
"]",
"=",
"inpt",
"[",
"i",
"]",
"i",
"+=",
"1",
"i",
"=",
"eytzinger_sort",
"(",
"inpt",
",",
"output",
",",
"i",
",",
"2",
"*",
"k",
"+",
"1",
")",
"return",
"i"
] | Takes an ascendingly sorted input and
an equal sized output buffer into which to
rewrite the input in eytzinger order. | [
"Takes",
"an",
"ascendingly",
"sorted",
"input",
"and",
"an",
"equal",
"sized",
"output",
"buffer",
"into",
"which",
"to",
"rewrite",
"the",
"input",
"in",
"eytzinger",
"order",
"."
] | [
"\"\"\"\n Takes an ascendingly sorted input and \n an equal sized output buffer into which to \n rewrite the input in eytzinger order.\n\n Modified from:\n https://algorithmica.org/en/eytzinger\n \"\"\""
] | [
{
"param": "inpt",
"type": null
},
{
"param": "output",
"type": null
},
{
"param": "i",
"type": null
},
{
"param": "k",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "inpt",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "output",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "i",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "k",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def eytzinger_sort(inpt, output, i = 0, k = 1):
if k <= len(inpt):
i = eytzinger_sort(inpt, output, i, 2 * k)
output[k - 1] = inpt[i]
i += 1
i = eytzinger_sort(inpt, output,i, 2 * k + 1)
return i | 14 | 993 |
b2984037c6a8ed370b786cafaa04ec91d894c19a | GHzytp/PyLorentz | GUI/PyLorentz_GUI.py | [
"BSD-3-Clause"
] | Python | change_visibility | None | def change_visibility(window: sg.Window, elem_val_list: List[Tuple[sg.Element, Any]]) -> None:
""" Take a list of element keys and change
visibility of the element.
Args:
window : The element representing the main GUI window.
elem_val_list : The list of elements with values whose
state is to be changed.
Returns:
None
"""
for elem_key, val in elem_val_list:
window[elem_key].Update(visible=val) | Take a list of element keys and change
visibility of the element.
Args:
window : The element representing the main GUI window.
elem_val_list : The list of elements with values whose
state is to be changed.
Returns:
None
| Take a list of element keys and change
visibility of the element. | [
"Take",
"a",
"list",
"of",
"element",
"keys",
"and",
"change",
"visibility",
"of",
"the",
"element",
"."
] | def change_visibility(window: sg.Window, elem_val_list: List[Tuple[sg.Element, Any]]) -> None:
for elem_key, val in elem_val_list:
window[elem_key].Update(visible=val) | [
"def",
"change_visibility",
"(",
"window",
":",
"sg",
".",
"Window",
",",
"elem_val_list",
":",
"List",
"[",
"Tuple",
"[",
"sg",
".",
"Element",
",",
"Any",
"]",
"]",
")",
"->",
"None",
":",
"for",
"elem_key",
",",
"val",
"in",
"elem_val_list",
":",
"window",
"[",
"elem_key",
"]",
".",
"Update",
"(",
"visible",
"=",
"val",
")"
] | Take a list of element keys and change
visibility of the element. | [
"Take",
"a",
"list",
"of",
"element",
"keys",
"and",
"change",
"visibility",
"of",
"the",
"element",
"."
] | [
"\"\"\" Take a list of element keys and change\n visibility of the element.\n\n Args:\n window : The element representing the main GUI window.\n elem_val_list : The list of elements with values whose\n state is to be changed.\n\n Returns:\n None\n \"\"\""
] | [
{
"param": "window",
"type": "sg.Window"
},
{
"param": "elem_val_list",
"type": "List[Tuple[sg.Element, Any]]"
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "window",
"type": "sg.Window",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "elem_val_list",
"type": "List[Tuple[sg.Element, Any]]",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "window ",
"type": null,
"docstring": "The element representing the main GUI window.",
"docstring_tokens": [
"The",
"element",
"representing",
"the",
"main",
"GUI",
"window",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "elem_val_list ",
"type": null,
"docstring": "The list of elements with values whose\nstate is to be changed.",
"docstring_tokens": [
"The",
"list",
"of",
"elements",
"with",
"values",
"whose",
"state",
"is",
"to",
"be",
"changed",
"."
],
"default": null,
"is_optional": null
}
],
"others": []
} | def change_visibility(window: sg.Window, elem_val_list: List[Tuple[sg.Element, Any]]) -> None:
for elem_key, val in elem_val_list:
window[elem_key].Update(visible=val) | 15 | 239 |
61d3a295b3c1cedde8c1eab628b02734cf1c0cd5 | BensonGathu/neighborhood | virtual/lib/python3.8/site-packages/PIL/ImageChops.py | [
"Unlicense"
] | Python | multiply | <not_specific> | def multiply(image1, image2):
"""
Superimposes two images on top of each other.
If you multiply an image with a solid black image, the result is black. If
you multiply with a solid white image, the image is unaffected.
.. code-block:: python
out = image1 * image2 / MAX
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_multiply(image2.im)) |
Superimposes two images on top of each other.
If you multiply an image with a solid black image, the result is black. If
you multiply with a solid white image, the image is unaffected.
.. code-block:: python
out = image1 * image2 / MAX
:rtype: :py:class:`~PIL.Image.Image`
| Superimposes two images on top of each other.
If you multiply an image with a solid black image, the result is black. If
you multiply with a solid white image, the image is unaffected.
code-block:: python
out = image1 * image2 / MAX | [
"Superimposes",
"two",
"images",
"on",
"top",
"of",
"each",
"other",
".",
"If",
"you",
"multiply",
"an",
"image",
"with",
"a",
"solid",
"black",
"image",
"the",
"result",
"is",
"black",
".",
"If",
"you",
"multiply",
"with",
"a",
"solid",
"white",
"image",
"the",
"image",
"is",
"unaffected",
".",
"code",
"-",
"block",
"::",
"python",
"out",
"=",
"image1",
"*",
"image2",
"/",
"MAX"
] | def multiply(image1, image2):
image1.load()
image2.load()
return image1._new(image1.im.chop_multiply(image2.im)) | [
"def",
"multiply",
"(",
"image1",
",",
"image2",
")",
":",
"image1",
".",
"load",
"(",
")",
"image2",
".",
"load",
"(",
")",
"return",
"image1",
".",
"_new",
"(",
"image1",
".",
"im",
".",
"chop_multiply",
"(",
"image2",
".",
"im",
")",
")"
] | Superimposes two images on top of each other. | [
"Superimposes",
"two",
"images",
"on",
"top",
"of",
"each",
"other",
"."
] | [
"\"\"\"\n Superimposes two images on top of each other.\n\n If you multiply an image with a solid black image, the result is black. If\n you multiply with a solid white image, the image is unaffected.\n\n .. code-block:: python\n\n out = image1 * image2 / MAX\n\n :rtype: :py:class:`~PIL.Image.Image`\n \"\"\""
] | [
{
"param": "image1",
"type": null
},
{
"param": "image2",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": ":py:class:`~PIL.Image.Image`"
}
],
"raises": [],
"params": [
{
"identifier": "image1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "image2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def multiply(image1, image2):
image1.load()
image2.load()
return image1._new(image1.im.chop_multiply(image2.im)) | 16 | 804 |
2d8da20bf2fb315a41e314223d05cc62ea9dcae5 | DeepanshS/mrsimulator-ui | app/sims/method/__init__.py | [
"BSD-3-Clause"
] | Python | generate_sidepanel | <not_specific> | def generate_sidepanel(method, index):
"""Generate scrollable side panel listing for methods"""
title = html.B(f"Method {index}", className="")
# method name
name = html.Div(method["name"], className="")
# method channel(s)
channels = ", ".join(method["channels"])
channels = html.Div(f"Channel: {channels}")
# n dimensions
n_dim = len(method["spectral_dimensions"])
n_dim = html.Div(f"Dimensions: {n_dim}")
a_tag = html.A([title, name, channels, n_dim])
# The H6(index) only shows for smaller screen sizes.
return html.Li(
[html.H6(index), html.Div(a_tag)],
# draggable="true",
className="list-group-item",
# id={"type": "select-method-index", "index": index},
) | Generate scrollable side panel listing for methods | Generate scrollable side panel listing for methods | [
"Generate",
"scrollable",
"side",
"panel",
"listing",
"for",
"methods"
] | def generate_sidepanel(method, index):
title = html.B(f"Method {index}", className="")
name = html.Div(method["name"], className="")
channels = ", ".join(method["channels"])
channels = html.Div(f"Channel: {channels}")
n_dim = len(method["spectral_dimensions"])
n_dim = html.Div(f"Dimensions: {n_dim}")
a_tag = html.A([title, name, channels, n_dim])
return html.Li(
[html.H6(index), html.Div(a_tag)],
className="list-group-item",
) | [
"def",
"generate_sidepanel",
"(",
"method",
",",
"index",
")",
":",
"title",
"=",
"html",
".",
"B",
"(",
"f\"Method {index}\"",
",",
"className",
"=",
"\"\"",
")",
"name",
"=",
"html",
".",
"Div",
"(",
"method",
"[",
"\"name\"",
"]",
",",
"className",
"=",
"\"\"",
")",
"channels",
"=",
"\", \"",
".",
"join",
"(",
"method",
"[",
"\"channels\"",
"]",
")",
"channels",
"=",
"html",
".",
"Div",
"(",
"f\"Channel: {channels}\"",
")",
"n_dim",
"=",
"len",
"(",
"method",
"[",
"\"spectral_dimensions\"",
"]",
")",
"n_dim",
"=",
"html",
".",
"Div",
"(",
"f\"Dimensions: {n_dim}\"",
")",
"a_tag",
"=",
"html",
".",
"A",
"(",
"[",
"title",
",",
"name",
",",
"channels",
",",
"n_dim",
"]",
")",
"return",
"html",
".",
"Li",
"(",
"[",
"html",
".",
"H6",
"(",
"index",
")",
",",
"html",
".",
"Div",
"(",
"a_tag",
")",
"]",
",",
"className",
"=",
"\"list-group-item\"",
",",
")"
] | Generate scrollable side panel listing for methods | [
"Generate",
"scrollable",
"side",
"panel",
"listing",
"for",
"methods"
] | [
"\"\"\"Generate scrollable side panel listing for methods\"\"\"",
"# method name",
"# method channel(s)",
"# n dimensions",
"# The H6(index) only shows for smaller screen sizes.",
"# draggable=\"true\",",
"# id={\"type\": \"select-method-index\", \"index\": index},"
] | [
{
"param": "method",
"type": null
},
{
"param": "index",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "method",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "index",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import html
def generate_sidepanel(method, index):
title = html.B(f"Method {index}", className="")
name = html.Div(method["name"], className="")
channels = ", ".join(method["channels"])
channels = html.Div(f"Channel: {channels}")
n_dim = len(method["spectral_dimensions"])
n_dim = html.Div(f"Dimensions: {n_dim}")
a_tag = html.A([title, name, channels, n_dim])
return html.Li(
[html.H6(index), html.Div(a_tag)],
className="list-group-item",
) | 18 | 196 |
6b1bbc653af085e7c5303ff686cc116c3b15f540 | QuantumJack/GNN | lib/net_gen.py | [
"Apache-2.0"
] | Python | __generate_nets | <not_specific> | def __generate_nets(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path):
""" Generate random network modules extracted from main_net_filename
Args:
gnw_path: path to GeneNetWeaver ".jar" file.
main_netfile_path: filepath for the main network used for module extraction.
net_size: size of the extracted modules.
num_nets_per_size: number of modules to be extracted for each size.
outdir_path: directory to store extracted network modules.
"""
if not os.path.exists(outdir_path):
os.mkdir(outdir_path)
command = ("java -jar {!s} --extract -c data/settings.txt --input-net {!s}"
" --input-net-format=0 --random-seed --greedy-selection --subnet-size={!s}"
" --num-subnets={!s} --output-net-format=0 --output-path={!s}"
).format(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path)
subprocess.run([command], shell=True)
file_prefix = os.path.basename(main_netfile_path)[:-4]
generated_files = glob.glob("{!s}/{!s}*.tsv".format(outdir_path, file_prefix))
return generated_files | Generate random network modules extracted from main_net_filename
Args:
gnw_path: path to GeneNetWeaver ".jar" file.
main_netfile_path: filepath for the main network used for module extraction.
net_size: size of the extracted modules.
num_nets_per_size: number of modules to be extracted for each size.
outdir_path: directory to store extracted network modules.
| Generate random network modules extracted from main_net_filename | [
"Generate",
"random",
"network",
"modules",
"extracted",
"from",
"main_net_filename"
] | def __generate_nets(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path):
if not os.path.exists(outdir_path):
os.mkdir(outdir_path)
command = ("java -jar {!s} --extract -c data/settings.txt --input-net {!s}"
" --input-net-format=0 --random-seed --greedy-selection --subnet-size={!s}"
" --num-subnets={!s} --output-net-format=0 --output-path={!s}"
).format(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path)
subprocess.run([command], shell=True)
file_prefix = os.path.basename(main_netfile_path)[:-4]
generated_files = glob.glob("{!s}/{!s}*.tsv".format(outdir_path, file_prefix))
return generated_files | [
"def",
"__generate_nets",
"(",
"gnw_path",
",",
"main_netfile_path",
",",
"net_size",
",",
"num_nets_per_size",
",",
"outdir_path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"outdir_path",
")",
":",
"os",
".",
"mkdir",
"(",
"outdir_path",
")",
"command",
"=",
"(",
"\"java -jar {!s} --extract -c data/settings.txt --input-net {!s}\"",
"\" --input-net-format=0 --random-seed --greedy-selection --subnet-size={!s}\"",
"\" --num-subnets={!s} --output-net-format=0 --output-path={!s}\"",
")",
".",
"format",
"(",
"gnw_path",
",",
"main_netfile_path",
",",
"net_size",
",",
"num_nets_per_size",
",",
"outdir_path",
")",
"subprocess",
".",
"run",
"(",
"[",
"command",
"]",
",",
"shell",
"=",
"True",
")",
"file_prefix",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"main_netfile_path",
")",
"[",
":",
"-",
"4",
"]",
"generated_files",
"=",
"glob",
".",
"glob",
"(",
"\"{!s}/{!s}*.tsv\"",
".",
"format",
"(",
"outdir_path",
",",
"file_prefix",
")",
")",
"return",
"generated_files"
] | Generate random network modules extracted from main_net_filename | [
"Generate",
"random",
"network",
"modules",
"extracted",
"from",
"main_net_filename"
] | [
"\"\"\" Generate random network modules extracted from main_net_filename\n Args:\n gnw_path: path to GeneNetWeaver \".jar\" file.\n main_netfile_path: filepath for the main network used for module extraction.\n net_size: size of the extracted modules.\n num_nets_per_size: number of modules to be extracted for each size.\n outdir_path: directory to store extracted network modules.\n \"\"\""
] | [
{
"param": "gnw_path",
"type": null
},
{
"param": "main_netfile_path",
"type": null
},
{
"param": "net_size",
"type": null
},
{
"param": "num_nets_per_size",
"type": null
},
{
"param": "outdir_path",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "gnw_path",
"type": null,
"docstring": "path to GeneNetWeaver \".jar\" file.",
"docstring_tokens": [
"path",
"to",
"GeneNetWeaver",
"\"",
".",
"jar",
"\"",
"file",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "main_netfile_path",
"type": null,
"docstring": "filepath for the main network used for module extraction.",
"docstring_tokens": [
"filepath",
"for",
"the",
"main",
"network",
"used",
"for",
"module",
"extraction",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "net_size",
"type": null,
"docstring": "size of the extracted modules.",
"docstring_tokens": [
"size",
"of",
"the",
"extracted",
"modules",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "num_nets_per_size",
"type": null,
"docstring": "number of modules to be extracted for each size.",
"docstring_tokens": [
"number",
"of",
"modules",
"to",
"be",
"extracted",
"for",
"each",
"size",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "outdir_path",
"type": null,
"docstring": "directory to store extracted network modules.",
"docstring_tokens": [
"directory",
"to",
"store",
"extracted",
"network",
"modules",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import subprocess
import os
import glob
def __generate_nets(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path):
if not os.path.exists(outdir_path):
os.mkdir(outdir_path)
command = ("java -jar {!s} --extract -c data/settings.txt --input-net {!s}"
" --input-net-format=0 --random-seed --greedy-selection --subnet-size={!s}"
" --num-subnets={!s} --output-net-format=0 --output-path={!s}"
).format(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path)
subprocess.run([command], shell=True)
file_prefix = os.path.basename(main_netfile_path)[:-4]
generated_files = glob.glob("{!s}/{!s}*.tsv".format(outdir_path, file_prefix))
return generated_files | 20 | 539 |
5258c905652f8e689e7f69238996a5b2f5d95f0d | clbarnes/pyboolnet | boolnet/misc.py | [
"MIT"
] | Python | prod | <not_specific> | def prod(numbers):
"""
Find the product of a sequence
:param numbers: Sequence of numbers
:return: Their product
"""
ret = 1
for number in numbers:
ret *= number
return ret |
Find the product of a sequence
:param numbers: Sequence of numbers
:return: Their product
| Find the product of a sequence | [
"Find",
"the",
"product",
"of",
"a",
"sequence"
] | def prod(numbers):
ret = 1
for number in numbers:
ret *= number
return ret | [
"def",
"prod",
"(",
"numbers",
")",
":",
"ret",
"=",
"1",
"for",
"number",
"in",
"numbers",
":",
"ret",
"*=",
"number",
"return",
"ret"
] | Find the product of a sequence | [
"Find",
"the",
"product",
"of",
"a",
"sequence"
] | [
"\"\"\"\n Find the product of a sequence\n\n :param numbers: Sequence of numbers\n :return: Their product\n \"\"\""
] | [
{
"param": "numbers",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "numbers",
"type": null,
"docstring": "Sequence of numbers",
"docstring_tokens": [
"Sequence",
"of",
"numbers"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def prod(numbers):
ret = 1
for number in numbers:
ret *= number
return ret | 23 | 413 |
664e3c76f043a3063bee9e66192f6daf1dad61e8 | sullivanmj/pywemo | pywemo/ouimeaux_device/api/rules_db.py | [
"MIT"
] | Python | _unpack_db | <not_specific> | def _unpack_db(content, db_file):
"""Unpack the sqlite database from a .zip file content."""
zip_contents = io.BytesIO(content)
with zipfile.ZipFile(zip_contents) as zip_file:
inner_file_name = zip_file.namelist()[0]
with zip_file.open(inner_file_name) as zipped_db_file:
db_file.write(zipped_db_file.read())
db_file.flush()
return inner_file_name
raise RuntimeError("Could not find database within zip file") | Unpack the sqlite database from a .zip file content. | Unpack the sqlite database from a .zip file content. | [
"Unpack",
"the",
"sqlite",
"database",
"from",
"a",
".",
"zip",
"file",
"content",
"."
] | def _unpack_db(content, db_file):
zip_contents = io.BytesIO(content)
with zipfile.ZipFile(zip_contents) as zip_file:
inner_file_name = zip_file.namelist()[0]
with zip_file.open(inner_file_name) as zipped_db_file:
db_file.write(zipped_db_file.read())
db_file.flush()
return inner_file_name
raise RuntimeError("Could not find database within zip file") | [
"def",
"_unpack_db",
"(",
"content",
",",
"db_file",
")",
":",
"zip_contents",
"=",
"io",
".",
"BytesIO",
"(",
"content",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"zip_contents",
")",
"as",
"zip_file",
":",
"inner_file_name",
"=",
"zip_file",
".",
"namelist",
"(",
")",
"[",
"0",
"]",
"with",
"zip_file",
".",
"open",
"(",
"inner_file_name",
")",
"as",
"zipped_db_file",
":",
"db_file",
".",
"write",
"(",
"zipped_db_file",
".",
"read",
"(",
")",
")",
"db_file",
".",
"flush",
"(",
")",
"return",
"inner_file_name",
"raise",
"RuntimeError",
"(",
"\"Could not find database within zip file\"",
")"
] | Unpack the sqlite database from a .zip file content. | [
"Unpack",
"the",
"sqlite",
"database",
"from",
"a",
".",
"zip",
"file",
"content",
"."
] | [
"\"\"\"Unpack the sqlite database from a .zip file content.\"\"\""
] | [
{
"param": "content",
"type": null
},
{
"param": "db_file",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "content",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "db_file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import zipfile
import io
def _unpack_db(content, db_file):
zip_contents = io.BytesIO(content)
with zipfile.ZipFile(zip_contents) as zip_file:
inner_file_name = zip_file.namelist()[0]
with zip_file.open(inner_file_name) as zipped_db_file:
db_file.write(zipped_db_file.read())
db_file.flush()
return inner_file_name
raise RuntimeError("Could not find database within zip file") | 24 | 619 |
ae5462bd222a5c5bd4c72139814f3a36c73c6486 | 4con/hello-gn | build/android/apksize.py | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | Python | print_human_readable_size_info | null | def print_human_readable_size_info(apk):
"""Prints size information in human readable format.
Args:
apk: ApkSizeInfo object
"""
files = apk.processed_files
logging.critical('Stats for files as they exist within the apk:')
for ext in files:
logging.critical(' %-8s %s bytes in %s files', ext,
files[ext]['compressed_bytes'], files[ext]['number'])
logging.critical('--------------------------------------')
logging.critical(
'All Files: %s bytes in %s files', apk.compressed_size, apk.total_files)
logging.critical('APK Size: %s', apk.apk_size)
logging.critical('APK overhead: %s', apk.apk_size - apk.compressed_size)
logging.critical('--------------------------------------')
logging.critical('Stats for files when extracted from the apk:')
for ext in files:
logging.critical(' %-8s %s bytes in %s files', ext,
files[ext]['uncompressed_bytes'], files[ext]['number'])
logging.critical('--------------------------------------')
logging.critical(
'All Files: %s bytes in %s files', apk.uncompressed_size, apk.total_files) | Prints size information in human readable format.
Args:
apk: ApkSizeInfo object
| Prints size information in human readable format. | [
"Prints",
"size",
"information",
"in",
"human",
"readable",
"format",
"."
] | def print_human_readable_size_info(apk):
files = apk.processed_files
logging.critical('Stats for files as they exist within the apk:')
for ext in files:
logging.critical(' %-8s %s bytes in %s files', ext,
files[ext]['compressed_bytes'], files[ext]['number'])
logging.critical('--------------------------------------')
logging.critical(
'All Files: %s bytes in %s files', apk.compressed_size, apk.total_files)
logging.critical('APK Size: %s', apk.apk_size)
logging.critical('APK overhead: %s', apk.apk_size - apk.compressed_size)
logging.critical('--------------------------------------')
logging.critical('Stats for files when extracted from the apk:')
for ext in files:
logging.critical(' %-8s %s bytes in %s files', ext,
files[ext]['uncompressed_bytes'], files[ext]['number'])
logging.critical('--------------------------------------')
logging.critical(
'All Files: %s bytes in %s files', apk.uncompressed_size, apk.total_files) | [
"def",
"print_human_readable_size_info",
"(",
"apk",
")",
":",
"files",
"=",
"apk",
".",
"processed_files",
"logging",
".",
"critical",
"(",
"'Stats for files as they exist within the apk:'",
")",
"for",
"ext",
"in",
"files",
":",
"logging",
".",
"critical",
"(",
"' %-8s %s bytes in %s files'",
",",
"ext",
",",
"files",
"[",
"ext",
"]",
"[",
"'compressed_bytes'",
"]",
",",
"files",
"[",
"ext",
"]",
"[",
"'number'",
"]",
")",
"logging",
".",
"critical",
"(",
"'--------------------------------------'",
")",
"logging",
".",
"critical",
"(",
"'All Files: %s bytes in %s files'",
",",
"apk",
".",
"compressed_size",
",",
"apk",
".",
"total_files",
")",
"logging",
".",
"critical",
"(",
"'APK Size: %s'",
",",
"apk",
".",
"apk_size",
")",
"logging",
".",
"critical",
"(",
"'APK overhead: %s'",
",",
"apk",
".",
"apk_size",
"-",
"apk",
".",
"compressed_size",
")",
"logging",
".",
"critical",
"(",
"'--------------------------------------'",
")",
"logging",
".",
"critical",
"(",
"'Stats for files when extracted from the apk:'",
")",
"for",
"ext",
"in",
"files",
":",
"logging",
".",
"critical",
"(",
"' %-8s %s bytes in %s files'",
",",
"ext",
",",
"files",
"[",
"ext",
"]",
"[",
"'uncompressed_bytes'",
"]",
",",
"files",
"[",
"ext",
"]",
"[",
"'number'",
"]",
")",
"logging",
".",
"critical",
"(",
"'--------------------------------------'",
")",
"logging",
".",
"critical",
"(",
"'All Files: %s bytes in %s files'",
",",
"apk",
".",
"uncompressed_size",
",",
"apk",
".",
"total_files",
")"
] | Prints size information in human readable format. | [
"Prints",
"size",
"information",
"in",
"human",
"readable",
"format",
"."
] | [
"\"\"\"Prints size information in human readable format.\n\n Args:\n apk: ApkSizeInfo object\n \"\"\""
] | [
{
"param": "apk",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "apk",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import logging
def print_human_readable_size_info(apk):
files = apk.processed_files
logging.critical('Stats for files as they exist within the apk:')
for ext in files:
logging.critical(' %-8s %s bytes in %s files', ext,
files[ext]['compressed_bytes'], files[ext]['number'])
logging.critical('--------------------------------------')
logging.critical(
'All Files: %s bytes in %s files', apk.compressed_size, apk.total_files)
logging.critical('APK Size: %s', apk.apk_size)
logging.critical('APK overhead: %s', apk.apk_size - apk.compressed_size)
logging.critical('--------------------------------------')
logging.critical('Stats for files when extracted from the apk:')
for ext in files:
logging.critical(' %-8s %s bytes in %s files', ext,
files[ext]['uncompressed_bytes'], files[ext]['number'])
logging.critical('--------------------------------------')
logging.critical(
'All Files: %s bytes in %s files', apk.uncompressed_size, apk.total_files) | 25 | 803 |
cfb9c7e09aba8ed937f808b341f34b7eee06405b | finex-dev/finex | finex/ratios.py | [
"BSD-3-Clause"
] | Python | roce | <not_specific> | def roce(net_income, preferred_dividends, average_common_equity):
"""Computes return on common equity.
Parameters
----------
net_income : int or float
Net income
preferred_dividends : int or float
Preferred dividends
average_common_equity : int or float
Average common equity
Returns
-------
out : int or float
Return on common equity
"""
return (net_income - preferred_dividends) / average_common_equity | Computes return on common equity.
Parameters
----------
net_income : int or float
Net income
preferred_dividends : int or float
Preferred dividends
average_common_equity : int or float
Average common equity
Returns
-------
out : int or float
Return on common equity
| Computes return on common equity.
Parameters
net_income : int or float
Net income
preferred_dividends : int or float
Preferred dividends
average_common_equity : int or float
Average common equity
Returns
out : int or float
Return on common equity | [
"Computes",
"return",
"on",
"common",
"equity",
".",
"Parameters",
"net_income",
":",
"int",
"or",
"float",
"Net",
"income",
"preferred_dividends",
":",
"int",
"or",
"float",
"Preferred",
"dividends",
"average_common_equity",
":",
"int",
"or",
"float",
"Average",
"common",
"equity",
"Returns",
"out",
":",
"int",
"or",
"float",
"Return",
"on",
"common",
"equity"
] | def roce(net_income, preferred_dividends, average_common_equity):
return (net_income - preferred_dividends) / average_common_equity | [
"def",
"roce",
"(",
"net_income",
",",
"preferred_dividends",
",",
"average_common_equity",
")",
":",
"return",
"(",
"net_income",
"-",
"preferred_dividends",
")",
"/",
"average_common_equity"
] | Computes return on common equity. | [
"Computes",
"return",
"on",
"common",
"equity",
"."
] | [
"\"\"\"Computes return on common equity.\n\n Parameters\n ----------\n net_income : int or float\n Net income\n preferred_dividends : int or float\n Preferred dividends\n average_common_equity : int or float\n Average common equity\n\n Returns\n -------\n out : int or float\n Return on common equity\n \"\"\""
] | [
{
"param": "net_income",
"type": null
},
{
"param": "preferred_dividends",
"type": null
},
{
"param": "average_common_equity",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "net_income",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "preferred_dividends",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "average_common_equity",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def roce(net_income, preferred_dividends, average_common_equity):
return (net_income - preferred_dividends) / average_common_equity | 26 | 306 |
177f8f7c5c114b75e46eb141489e8ed3694972a9 | guesswhohaha/learntools | learntools/core/asserts.py | [
"Apache-2.0"
] | Python | assert_has_columns | null | def assert_has_columns(df, cols, name="dataframe", strict=False):
"""Assert that the given dataframe contains columns with the given names.
If strict is True, then assert it has *only* those columns.
"""
for col in cols:
assert col in df.columns, "Expected {} to have column `{}`".format(
name, col
)
if strict:
for col in df.columns:
msg = "Unexpected column in {}: `{}`".format(name, col)
assert col in cols, msg | Assert that the given dataframe contains columns with the given names.
If strict is True, then assert it has *only* those columns.
| Assert that the given dataframe contains columns with the given names.
If strict is True, then assert it has *only* those columns. | [
"Assert",
"that",
"the",
"given",
"dataframe",
"contains",
"columns",
"with",
"the",
"given",
"names",
".",
"If",
"strict",
"is",
"True",
"then",
"assert",
"it",
"has",
"*",
"only",
"*",
"those",
"columns",
"."
] | def assert_has_columns(df, cols, name="dataframe", strict=False):
for col in cols:
assert col in df.columns, "Expected {} to have column `{}`".format(
name, col
)
if strict:
for col in df.columns:
msg = "Unexpected column in {}: `{}`".format(name, col)
assert col in cols, msg | [
"def",
"assert_has_columns",
"(",
"df",
",",
"cols",
",",
"name",
"=",
"\"dataframe\"",
",",
"strict",
"=",
"False",
")",
":",
"for",
"col",
"in",
"cols",
":",
"assert",
"col",
"in",
"df",
".",
"columns",
",",
"\"Expected {} to have column `{}`\"",
".",
"format",
"(",
"name",
",",
"col",
")",
"if",
"strict",
":",
"for",
"col",
"in",
"df",
".",
"columns",
":",
"msg",
"=",
"\"Unexpected column in {}: `{}`\"",
".",
"format",
"(",
"name",
",",
"col",
")",
"assert",
"col",
"in",
"cols",
",",
"msg"
] | Assert that the given dataframe contains columns with the given names. | [
"Assert",
"that",
"the",
"given",
"dataframe",
"contains",
"columns",
"with",
"the",
"given",
"names",
"."
] | [
"\"\"\"Assert that the given dataframe contains columns with the given names.\n If strict is True, then assert it has *only* those columns.\n \"\"\""
] | [
{
"param": "df",
"type": null
},
{
"param": "cols",
"type": null
},
{
"param": "name",
"type": null
},
{
"param": "strict",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "df",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "cols",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "strict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def assert_has_columns(df, cols, name="dataframe", strict=False):
for col in cols:
assert col in df.columns, "Expected {} to have column `{}`".format(
name, col
)
if strict:
for col in df.columns:
msg = "Unexpected column in {}: `{}`".format(name, col)
assert col in cols, msg | 27 | 419 |
bb9bc874cfb0cf7a29aef5ba2b188b6dea437065 | maurogaravello/Pirati | plot.py | [
"MIT"
] | Python | check_negative | <not_specific> | def check_negative(value):
""" check if a value is negative"""
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid entry" % value)
return ivalue | check if a value is negative | check if a value is negative | [
"check",
"if",
"a",
"value",
"is",
"negative"
] | def check_negative(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid entry" % value)
return ivalue | [
"def",
"check_negative",
"(",
"value",
")",
":",
"ivalue",
"=",
"int",
"(",
"value",
")",
"if",
"ivalue",
"<=",
"0",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"%s is an invalid entry\"",
"%",
"value",
")",
"return",
"ivalue"
] | check if a value is negative | [
"check",
"if",
"a",
"value",
"is",
"negative"
] | [
"\"\"\" check if a value is negative\"\"\""
] | [
{
"param": "value",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import argparse
def check_negative(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid entry" % value)
return ivalue | 28 | 856 |
642f0f99428865d2e704e0a3535fd3e4cd3f9e98 | larsOhne/pvdn | pvdn/detection/train.py | [
"CC0-1.0"
] | Python | print_end_summary | null | def print_end_summary(epochs: int, output_dir: str, best_epochs: dict,
best_metrics: dict):
"""
Outputs the final summary to the terminal.
:param epochs: number of epochs the model has been trained
:type epochs: int
:param output_dir: directory where the results are written to
:type output_dir: str
:param best_epochs: dictionary with metrics as keys, and epoch number of
the model that has the best performance in it as value
:type best_epochs: dict
:param best_metrics: dictionary storing the best result for each metric
:type best_metrics: dict
"""
print(f"\nTraining completed successfully for {epochs} epochs.\n"
f"Logs written to {output_dir}.")
best_str = [f"{k}: {v:4f} (epoch {best_epochs[k]})" for k, v in
best_metrics.items()]
print(f"-------------- Best --------------")
for s in best_str:
print(s) |
Outputs the final summary to the terminal.
:param epochs: number of epochs the model has been trained
:type epochs: int
:param output_dir: directory where the results are written to
:type output_dir: str
:param best_epochs: dictionary with metrics as keys, and epoch number of
the model that has the best performance in it as value
:type best_epochs: dict
:param best_metrics: dictionary storing the best result for each metric
:type best_metrics: dict
| Outputs the final summary to the terminal. | [
"Outputs",
"the",
"final",
"summary",
"to",
"the",
"terminal",
"."
] | def print_end_summary(epochs: int, output_dir: str, best_epochs: dict,
best_metrics: dict):
print(f"\nTraining completed successfully for {epochs} epochs.\n"
f"Logs written to {output_dir}.")
best_str = [f"{k}: {v:4f} (epoch {best_epochs[k]})" for k, v in
best_metrics.items()]
print(f"-------------- Best --------------")
for s in best_str:
print(s) | [
"def",
"print_end_summary",
"(",
"epochs",
":",
"int",
",",
"output_dir",
":",
"str",
",",
"best_epochs",
":",
"dict",
",",
"best_metrics",
":",
"dict",
")",
":",
"print",
"(",
"f\"\\nTraining completed successfully for {epochs} epochs.\\n\"",
"f\"Logs written to {output_dir}.\"",
")",
"best_str",
"=",
"[",
"f\"{k}: {v:4f} (epoch {best_epochs[k]})\"",
"for",
"k",
",",
"v",
"in",
"best_metrics",
".",
"items",
"(",
")",
"]",
"print",
"(",
"f\"-------------- Best --------------\"",
")",
"for",
"s",
"in",
"best_str",
":",
"print",
"(",
"s",
")"
] | Outputs the final summary to the terminal. | [
"Outputs",
"the",
"final",
"summary",
"to",
"the",
"terminal",
"."
] | [
"\"\"\"\n Outputs the final summary to the terminal.\n :param epochs: number of epochs the model has been trained\n :type epochs: int\n :param output_dir: directory where the results are written to\n :type output_dir: str\n :param best_epochs: dictionary with metrics as keys, and epoch number of\n the model that has the best performance in it as value\n :type best_epochs: dict\n :param best_metrics: dictionary storing the best result for each metric\n :type best_metrics: dict\n \"\"\""
] | [
{
"param": "epochs",
"type": "int"
},
{
"param": "output_dir",
"type": "str"
},
{
"param": "best_epochs",
"type": "dict"
},
{
"param": "best_metrics",
"type": "dict"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "epochs",
"type": "int",
"docstring": "number of epochs the model has been trained",
"docstring_tokens": [
"number",
"of",
"epochs",
"the",
"model",
"has",
"been",
"trained"
],
"default": null,
"is_optional": null
},
{
"identifier": "output_dir",
"type": "str",
"docstring": "directory where the results are written to",
"docstring_tokens": [
"directory",
"where",
"the",
"results",
"are",
"written",
"to"
],
"default": null,
"is_optional": null
},
{
"identifier": "best_epochs",
"type": "dict",
"docstring": "dictionary with metrics as keys, and epoch number of\nthe model that has the best performance in it as value",
"docstring_tokens": [
"dictionary",
"with",
"metrics",
"as",
"keys",
"and",
"epoch",
"number",
"of",
"the",
"model",
"that",
"has",
"the",
"best",
"performance",
"in",
"it",
"as",
"value"
],
"default": null,
"is_optional": null
},
{
"identifier": "best_metrics",
"type": "dict",
"docstring": "dictionary storing the best result for each metric",
"docstring_tokens": [
"dictionary",
"storing",
"the",
"best",
"result",
"for",
"each",
"metric"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def print_end_summary(epochs: int, output_dir: str, best_epochs: dict,
best_metrics: dict):
print(f"\nTraining completed successfully for {epochs} epochs.\n"
f"Logs written to {output_dir}.")
best_str = [f"{k}: {v:4f} (epoch {best_epochs[k]})" for k, v in
best_metrics.items()]
print(f"-------------- Best --------------")
for s in best_str:
print(s) | 29 | 22 |
8a51d4283261aa0674abb481617734ad2aaf10f1 | j19sch/pytest-logfest | pytest_logfest/plugin.py | [
"MIT"
] | Python | fxt_function_logger | null | def fxt_function_logger(request, module_logger, session_filememoryhandler):
"""
Yields a logger, child of the module logger and named the name of the function.
Adds records for test started, setup error, test fail, and test ended.
"""
logger = module_logger.getChild(request.node.name)
logger.info("TEST STARTED")
yield logger
try:
if request.node.rep_setup.failed:
logger.warning("SETUP ERROR")
except AttributeError:
pass
try:
if request.node.rep_call.failed:
logger.warning("TEST FAIL")
except AttributeError:
pass
logger.info("TEST ENDED\n")
session_filememoryhandler.flush_with_filter_on_info() |
Yields a logger, child of the module logger and named the name of the function.
Adds records for test started, setup error, test fail, and test ended.
| Yields a logger, child of the module logger and named the name of the function.
Adds records for test started, setup error, test fail, and test ended. | [
"Yields",
"a",
"logger",
"child",
"of",
"the",
"module",
"logger",
"and",
"named",
"the",
"name",
"of",
"the",
"function",
".",
"Adds",
"records",
"for",
"test",
"started",
"setup",
"error",
"test",
"fail",
"and",
"test",
"ended",
"."
] | def fxt_function_logger(request, module_logger, session_filememoryhandler):
logger = module_logger.getChild(request.node.name)
logger.info("TEST STARTED")
yield logger
try:
if request.node.rep_setup.failed:
logger.warning("SETUP ERROR")
except AttributeError:
pass
try:
if request.node.rep_call.failed:
logger.warning("TEST FAIL")
except AttributeError:
pass
logger.info("TEST ENDED\n")
session_filememoryhandler.flush_with_filter_on_info() | [
"def",
"fxt_function_logger",
"(",
"request",
",",
"module_logger",
",",
"session_filememoryhandler",
")",
":",
"logger",
"=",
"module_logger",
".",
"getChild",
"(",
"request",
".",
"node",
".",
"name",
")",
"logger",
".",
"info",
"(",
"\"TEST STARTED\"",
")",
"yield",
"logger",
"try",
":",
"if",
"request",
".",
"node",
".",
"rep_setup",
".",
"failed",
":",
"logger",
".",
"warning",
"(",
"\"SETUP ERROR\"",
")",
"except",
"AttributeError",
":",
"pass",
"try",
":",
"if",
"request",
".",
"node",
".",
"rep_call",
".",
"failed",
":",
"logger",
".",
"warning",
"(",
"\"TEST FAIL\"",
")",
"except",
"AttributeError",
":",
"pass",
"logger",
".",
"info",
"(",
"\"TEST ENDED\\n\"",
")",
"session_filememoryhandler",
".",
"flush_with_filter_on_info",
"(",
")"
] | Yields a logger, child of the module logger and named the name of the function. | [
"Yields",
"a",
"logger",
"child",
"of",
"the",
"module",
"logger",
"and",
"named",
"the",
"name",
"of",
"the",
"function",
"."
] | [
"\"\"\"\n Yields a logger, child of the module logger and named the name of the function.\n Adds records for test started, setup error, test fail, and test ended.\n\n \"\"\""
] | [
{
"param": "request",
"type": null
},
{
"param": "module_logger",
"type": null
},
{
"param": "session_filememoryhandler",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "request",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "module_logger",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "session_filememoryhandler",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def fxt_function_logger(request, module_logger, session_filememoryhandler):
logger = module_logger.getChild(request.node.name)
logger.info("TEST STARTED")
yield logger
try:
if request.node.rep_setup.failed:
logger.warning("SETUP ERROR")
except AttributeError:
pass
try:
if request.node.rep_call.failed:
logger.warning("TEST FAIL")
except AttributeError:
pass
logger.info("TEST ENDED\n")
session_filememoryhandler.flush_with_filter_on_info() | 30 | 301 |
4d978bbc81c4e00039d1224dd4790f6483e5af9b | abugler/SMLFinalProject | runners/utils.py | [
"MIT"
] | Python | disp_script | null | def disp_script(spec):
"""
Displays the arguments for a script in a readable fashion in
logging.
Args:
spec (dict): Dictionary containing script parameters.
"""
logging.info(
f"\n"
f" Running {spec['script']} with args:\n"
f" config: {spec['config']}\n"
f" run_in: {spec['run_in']}\n"
f" num_gpus: {spec['num_gpus']}\n"
f" blocking: {spec['blocking']}\n"
) |
Displays the arguments for a script in a readable fashion in
logging.
Args:
spec (dict): Dictionary containing script parameters.
| Displays the arguments for a script in a readable fashion in
logging. | [
"Displays",
"the",
"arguments",
"for",
"a",
"script",
"in",
"a",
"readable",
"fashion",
"in",
"logging",
"."
] | def disp_script(spec):
logging.info(
f"\n"
f" Running {spec['script']} with args:\n"
f" config: {spec['config']}\n"
f" run_in: {spec['run_in']}\n"
f" num_gpus: {spec['num_gpus']}\n"
f" blocking: {spec['blocking']}\n"
) | [
"def",
"disp_script",
"(",
"spec",
")",
":",
"logging",
".",
"info",
"(",
"f\"\\n\"",
"f\" Running {spec['script']} with args:\\n\"",
"f\" config: {spec['config']}\\n\"",
"f\" run_in: {spec['run_in']}\\n\"",
"f\" num_gpus: {spec['num_gpus']}\\n\"",
"f\" blocking: {spec['blocking']}\\n\"",
")"
] | Displays the arguments for a script in a readable fashion in
logging. | [
"Displays",
"the",
"arguments",
"for",
"a",
"script",
"in",
"a",
"readable",
"fashion",
"in",
"logging",
"."
] | [
"\"\"\"\n Displays the arguments for a script in a readable fashion in\n logging.\n \n Args:\n spec (dict): Dictionary containing script parameters.\n \"\"\""
] | [
{
"param": "spec",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "spec",
"type": null,
"docstring": "Dictionary containing script parameters.",
"docstring_tokens": [
"Dictionary",
"containing",
"script",
"parameters",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | import logging
def disp_script(spec):
logging.info(
f"\n"
f" Running {spec['script']} with args:\n"
f" config: {spec['config']}\n"
f" run_in: {spec['run_in']}\n"
f" num_gpus: {spec['num_gpus']}\n"
f" blocking: {spec['blocking']}\n"
) | 31 | 1,017 |
bcc04efb1ef7d3258b74f184ec0c0258e4e8c5fa | elyase/altair | altair/expr/core.py | [
"BSD-3-Clause"
] | Python | _js_repr | <not_specific> | def _js_repr(val):
"""Return a javascript-safe string representation of val"""
if val is True:
return 'true'
elif val is False:
return 'false'
elif val is None:
return 'null'
else:
return repr(val) | Return a javascript-safe string representation of val | Return a javascript-safe string representation of val | [
"Return",
"a",
"javascript",
"-",
"safe",
"string",
"representation",
"of",
"val"
] | def _js_repr(val):
if val is True:
return 'true'
elif val is False:
return 'false'
elif val is None:
return 'null'
else:
return repr(val) | [
"def",
"_js_repr",
"(",
"val",
")",
":",
"if",
"val",
"is",
"True",
":",
"return",
"'true'",
"elif",
"val",
"is",
"False",
":",
"return",
"'false'",
"elif",
"val",
"is",
"None",
":",
"return",
"'null'",
"else",
":",
"return",
"repr",
"(",
"val",
")"
] | Return a javascript-safe string representation of val | [
"Return",
"a",
"javascript",
"-",
"safe",
"string",
"representation",
"of",
"val"
] | [
"\"\"\"Return a javascript-safe string representation of val\"\"\""
] | [
{
"param": "val",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "val",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _js_repr(val):
if val is True:
return 'true'
elif val is False:
return 'false'
elif val is None:
return 'null'
else:
return repr(val) | 32 | 126 |
892fd63e452f2cae6fe22559ccb6584141ee4b92 | Bielorusse/Erdorbit_random | script.py | [
"MIT"
] | Python | multiplyArrayByScalar | <not_specific> | def multiplyArrayByScalar(inputArray, scalar):
'''
Multiplying each component of a 2D array by a scalar
- Inputs:
inputArray input array
scal input scalar
- Outputs:
outputArray output array
'''
outputArray = []
for j in range(len(inputArray)):
outputArray.append(inputArray[j])
for k in range(len(inputArray[j])):
outputArray[j][k] = inputArray[j][k] * scalar
return outputArray |
Multiplying each component of a 2D array by a scalar
- Inputs:
inputArray input array
scal input scalar
- Outputs:
outputArray output array
| Multiplying each component of a 2D array by a scalar
Inputs:
inputArray input array
scal input scalar
Outputs:
outputArray output array | [
"Multiplying",
"each",
"component",
"of",
"a",
"2D",
"array",
"by",
"a",
"scalar",
"Inputs",
":",
"inputArray",
"input",
"array",
"scal",
"input",
"scalar",
"Outputs",
":",
"outputArray",
"output",
"array"
] | def multiplyArrayByScalar(inputArray, scalar):
outputArray = []
for j in range(len(inputArray)):
outputArray.append(inputArray[j])
for k in range(len(inputArray[j])):
outputArray[j][k] = inputArray[j][k] * scalar
return outputArray | [
"def",
"multiplyArrayByScalar",
"(",
"inputArray",
",",
"scalar",
")",
":",
"outputArray",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"inputArray",
")",
")",
":",
"outputArray",
".",
"append",
"(",
"inputArray",
"[",
"j",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"inputArray",
"[",
"j",
"]",
")",
")",
":",
"outputArray",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"inputArray",
"[",
"j",
"]",
"[",
"k",
"]",
"*",
"scalar",
"return",
"outputArray"
] | Multiplying each component of a 2D array by a scalar
Inputs:
inputArray input array
scal input scalar
Outputs:
outputArray output array | [
"Multiplying",
"each",
"component",
"of",
"a",
"2D",
"array",
"by",
"a",
"scalar",
"Inputs",
":",
"inputArray",
"input",
"array",
"scal",
"input",
"scalar",
"Outputs",
":",
"outputArray",
"output",
"array"
] | [
"'''\n\tMultiplying each component of a 2D array by a scalar\n \t- Inputs:\n inputArray input array\n scal input scalar\n \t- Outputs:\n \t\t outputArray output array\n '''"
] | [
{
"param": "inputArray",
"type": null
},
{
"param": "scalar",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "inputArray",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "scalar",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def multiplyArrayByScalar(inputArray, scalar):
outputArray = []
for j in range(len(inputArray)):
outputArray.append(inputArray[j])
for k in range(len(inputArray[j])):
outputArray[j][k] = inputArray[j][k] * scalar
return outputArray | 33 | 1,004 |
950f789d70c9ff60ddf38726d7b8992d103f289c | google-research/pyreach | pyreach/common/base/debug.py | [
"Apache-2.0"
] | Python | debug | None | def debug(msg: str) -> None:
"""Write debug message to stderr.
Debug message consists file name, line number and function name
of the calling routine.
Args:
msg: a custom message.
"""
parent_frame = inspect.stack()[1]
file = parent_frame[1]
line = parent_frame[2]
func = parent_frame[3]
sys.stderr.write("{}:{}-{}: {}\n".format(file, line, func, msg))
sys.stderr.flush() | Write debug message to stderr.
Debug message consists file name, line number and function name
of the calling routine.
Args:
msg: a custom message.
| Write debug message to stderr.
Debug message consists file name, line number and function name
of the calling routine. | [
"Write",
"debug",
"message",
"to",
"stderr",
".",
"Debug",
"message",
"consists",
"file",
"name",
"line",
"number",
"and",
"function",
"name",
"of",
"the",
"calling",
"routine",
"."
] | def debug(msg: str) -> None:
parent_frame = inspect.stack()[1]
file = parent_frame[1]
line = parent_frame[2]
func = parent_frame[3]
sys.stderr.write("{}:{}-{}: {}\n".format(file, line, func, msg))
sys.stderr.flush() | [
"def",
"debug",
"(",
"msg",
":",
"str",
")",
"->",
"None",
":",
"parent_frame",
"=",
"inspect",
".",
"stack",
"(",
")",
"[",
"1",
"]",
"file",
"=",
"parent_frame",
"[",
"1",
"]",
"line",
"=",
"parent_frame",
"[",
"2",
"]",
"func",
"=",
"parent_frame",
"[",
"3",
"]",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"{}:{}-{}: {}\\n\"",
".",
"format",
"(",
"file",
",",
"line",
",",
"func",
",",
"msg",
")",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")"
] | Write debug message to stderr. | [
"Write",
"debug",
"message",
"to",
"stderr",
"."
] | [
"\"\"\"Write debug message to stderr.\n\n Debug message consists file name, line number and function name\n of the calling routine.\n\n Args:\n msg: a custom message.\n \"\"\""
] | [
{
"param": "msg",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "msg",
"type": "str",
"docstring": "a custom message.",
"docstring_tokens": [
"a",
"custom",
"message",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import inspect
import sys
def debug(msg: str) -> None:
parent_frame = inspect.stack()[1]
file = parent_frame[1]
line = parent_frame[2]
func = parent_frame[3]
sys.stderr.write("{}:{}-{}: {}\n".format(file, line, func, msg))
sys.stderr.flush() | 34 | 504 |
9a6a097e06c1ab31ad5a3c4d60545ff85a3c54d4 | cookbrite/ebs-deploy | ebs_deploy/__init__.py | [
"MIT"
] | Python | override_scaling | <not_specific> | def override_scaling(option_settings, min_size, max_size):
""" takes the merged option_settings and injects custom min/max autoscaling sizes """
match_namespace = "aws:autoscaling:asg"
match_keys = {"MinSize": min_size, "MaxSize": max_size}
copied_option_settings = []
for (namespace, key, value) in option_settings:
new_option = (namespace, key, value)
if match_namespace == namespace and key in match_keys:
new_option = (namespace, key, match_keys[key])
copied_option_settings.append(new_option)
return copied_option_settings | takes the merged option_settings and injects custom min/max autoscaling sizes | takes the merged option_settings and injects custom min/max autoscaling sizes | [
"takes",
"the",
"merged",
"option_settings",
"and",
"injects",
"custom",
"min",
"/",
"max",
"autoscaling",
"sizes"
] | def override_scaling(option_settings, min_size, max_size):
match_namespace = "aws:autoscaling:asg"
match_keys = {"MinSize": min_size, "MaxSize": max_size}
copied_option_settings = []
for (namespace, key, value) in option_settings:
new_option = (namespace, key, value)
if match_namespace == namespace and key in match_keys:
new_option = (namespace, key, match_keys[key])
copied_option_settings.append(new_option)
return copied_option_settings | [
"def",
"override_scaling",
"(",
"option_settings",
",",
"min_size",
",",
"max_size",
")",
":",
"match_namespace",
"=",
"\"aws:autoscaling:asg\"",
"match_keys",
"=",
"{",
"\"MinSize\"",
":",
"min_size",
",",
"\"MaxSize\"",
":",
"max_size",
"}",
"copied_option_settings",
"=",
"[",
"]",
"for",
"(",
"namespace",
",",
"key",
",",
"value",
")",
"in",
"option_settings",
":",
"new_option",
"=",
"(",
"namespace",
",",
"key",
",",
"value",
")",
"if",
"match_namespace",
"==",
"namespace",
"and",
"key",
"in",
"match_keys",
":",
"new_option",
"=",
"(",
"namespace",
",",
"key",
",",
"match_keys",
"[",
"key",
"]",
")",
"copied_option_settings",
".",
"append",
"(",
"new_option",
")",
"return",
"copied_option_settings"
] | takes the merged option_settings and injects custom min/max autoscaling sizes | [
"takes",
"the",
"merged",
"option_settings",
"and",
"injects",
"custom",
"min",
"/",
"max",
"autoscaling",
"sizes"
] | [
"\"\"\" takes the merged option_settings and injects custom min/max autoscaling sizes \"\"\""
] | [
{
"param": "option_settings",
"type": null
},
{
"param": "min_size",
"type": null
},
{
"param": "max_size",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "option_settings",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "min_size",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "max_size",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def override_scaling(option_settings, min_size, max_size):
match_namespace = "aws:autoscaling:asg"
match_keys = {"MinSize": min_size, "MaxSize": max_size}
copied_option_settings = []
for (namespace, key, value) in option_settings:
new_option = (namespace, key, value)
if match_namespace == namespace and key in match_keys:
new_option = (namespace, key, match_keys[key])
copied_option_settings.append(new_option)
return copied_option_settings | 35 | 744 |
440b71cde91480a4acc2ad27be6b5c2c2c2604ee | iamholger/professor | pyext/professor2/ipol.py | [
"MIT"
] | Python | mk_ipolinputs | <not_specific> | def mk_ipolinputs(params):
"""
Make sorted run name and parameter name & value lists, suitable for passing to prof.Ipol
params is a dict (actually, prefer OrderedDict) of run_names -> param_vals,
as returned from read_rundata
"""
runs = sorted(params.keys())
if not runs:
return runs, [], [[]]
paramnames = params[runs[0]].keys()
paramslist = [[params[run][pn] for pn in paramnames] for run in runs]
return runs, paramnames, paramslist |
Make sorted run name and parameter name & value lists, suitable for passing to prof.Ipol
params is a dict (actually, prefer OrderedDict) of run_names -> param_vals,
as returned from read_rundata
| Make sorted run name and parameter name & value lists, suitable for passing to prof.Ipol
params is a dict (actually, prefer OrderedDict) of run_names -> param_vals,
as returned from read_rundata | [
"Make",
"sorted",
"run",
"name",
"and",
"parameter",
"name",
"&",
"value",
"lists",
"suitable",
"for",
"passing",
"to",
"prof",
".",
"Ipol",
"params",
"is",
"a",
"dict",
"(",
"actually",
"prefer",
"OrderedDict",
")",
"of",
"run_names",
"-",
">",
"param_vals",
"as",
"returned",
"from",
"read_rundata"
] | def mk_ipolinputs(params):
runs = sorted(params.keys())
if not runs:
return runs, [], [[]]
paramnames = params[runs[0]].keys()
paramslist = [[params[run][pn] for pn in paramnames] for run in runs]
return runs, paramnames, paramslist | [
"def",
"mk_ipolinputs",
"(",
"params",
")",
":",
"runs",
"=",
"sorted",
"(",
"params",
".",
"keys",
"(",
")",
")",
"if",
"not",
"runs",
":",
"return",
"runs",
",",
"[",
"]",
",",
"[",
"[",
"]",
"]",
"paramnames",
"=",
"params",
"[",
"runs",
"[",
"0",
"]",
"]",
".",
"keys",
"(",
")",
"paramslist",
"=",
"[",
"[",
"params",
"[",
"run",
"]",
"[",
"pn",
"]",
"for",
"pn",
"in",
"paramnames",
"]",
"for",
"run",
"in",
"runs",
"]",
"return",
"runs",
",",
"paramnames",
",",
"paramslist"
] | Make sorted run name and parameter name & value lists, suitable for passing to prof.Ipol
params is a dict (actually, prefer OrderedDict) of run_names -> param_vals,
as returned from read_rundata | [
"Make",
"sorted",
"run",
"name",
"and",
"parameter",
"name",
"&",
"value",
"lists",
"suitable",
"for",
"passing",
"to",
"prof",
".",
"Ipol",
"params",
"is",
"a",
"dict",
"(",
"actually",
"prefer",
"OrderedDict",
")",
"of",
"run_names",
"-",
">",
"param_vals",
"as",
"returned",
"from",
"read_rundata"
] | [
"\"\"\"\n Make sorted run name and parameter name & value lists, suitable for passing to prof.Ipol\n\n params is a dict (actually, prefer OrderedDict) of run_names -> param_vals,\n as returned from read_rundata\n \"\"\""
] | [
{
"param": "params",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "params",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def mk_ipolinputs(params):
runs = sorted(params.keys())
if not runs:
return runs, [], [[]]
paramnames = params[runs[0]].keys()
paramslist = [[params[run][pn] for pn in paramnames] for run in runs]
return runs, paramnames, paramslist | 36 | 703 |
6ba8475b25a768b98f6094c5b04ba522fbead45a | Hyperparticle/lct-master | charles-university/statistical-nlp/assignment-3/tag.py | [
"MIT"
] | Python | open_text | <not_specific> | def open_text(filename):
"""Reads a text line by line, applies light preprocessing, and returns an array of words and tags"""
with open(filename, encoding='iso-8859-2') as f:
content = f.readlines()
preprocess = lambda word: tuple(word.strip().rsplit('/', 1))
return [preprocess(word) for word in content] | Reads a text line by line, applies light preprocessing, and returns an array of words and tags | Reads a text line by line, applies light preprocessing, and returns an array of words and tags | [
"Reads",
"a",
"text",
"line",
"by",
"line",
"applies",
"light",
"preprocessing",
"and",
"returns",
"an",
"array",
"of",
"words",
"and",
"tags"
] | def open_text(filename):
with open(filename, encoding='iso-8859-2') as f:
content = f.readlines()
preprocess = lambda word: tuple(word.strip().rsplit('/', 1))
return [preprocess(word) for word in content] | [
"def",
"open_text",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"encoding",
"=",
"'iso-8859-2'",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"readlines",
"(",
")",
"preprocess",
"=",
"lambda",
"word",
":",
"tuple",
"(",
"word",
".",
"strip",
"(",
")",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
")",
"return",
"[",
"preprocess",
"(",
"word",
")",
"for",
"word",
"in",
"content",
"]"
] | Reads a text line by line, applies light preprocessing, and returns an array of words and tags | [
"Reads",
"a",
"text",
"line",
"by",
"line",
"applies",
"light",
"preprocessing",
"and",
"returns",
"an",
"array",
"of",
"words",
"and",
"tags"
] | [
"\"\"\"Reads a text line by line, applies light preprocessing, and returns an array of words and tags\"\"\""
] | [
{
"param": "filename",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def open_text(filename):
with open(filename, encoding='iso-8859-2') as f:
content = f.readlines()
preprocess = lambda word: tuple(word.strip().rsplit('/', 1))
return [preprocess(word) for word in content] | 37 | 628 |
9b15eca2a02be787e6c3a05fff838aba8bbda7f9 | jschmidtnj/CS115 | hw5.py | [
"MIT"
] | Python | fast_lucas | <not_specific> | def fast_lucas(n):
'''Returns the nth Lucas number using the memoization technique
shown in class and lab. The Lucas numbers are as follows:
[2, 1, 3, 4, 7, 11, ...]'''
def lucasMemo(n, memo):
if n in memo:
return memo[n]
if n == 0:
result = 2
elif n == 1:
result = 1
else:
result = lucasMemo(n-1, memo) + lucasMemo(n-2, memo)
memo[n] = result
return result
return lucasMemo(n, {}) | Returns the nth Lucas number using the memoization technique
shown in class and lab. The Lucas numbers are as follows:
[2, 1, 3, 4, 7, 11, ...] | Returns the nth Lucas number using the memoization technique
shown in class and lab. | [
"Returns",
"the",
"nth",
"Lucas",
"number",
"using",
"the",
"memoization",
"technique",
"shown",
"in",
"class",
"and",
"lab",
"."
] | def fast_lucas(n):
def lucasMemo(n, memo):
if n in memo:
return memo[n]
if n == 0:
result = 2
elif n == 1:
result = 1
else:
result = lucasMemo(n-1, memo) + lucasMemo(n-2, memo)
memo[n] = result
return result
return lucasMemo(n, {}) | [
"def",
"fast_lucas",
"(",
"n",
")",
":",
"def",
"lucasMemo",
"(",
"n",
",",
"memo",
")",
":",
"if",
"n",
"in",
"memo",
":",
"return",
"memo",
"[",
"n",
"]",
"if",
"n",
"==",
"0",
":",
"result",
"=",
"2",
"elif",
"n",
"==",
"1",
":",
"result",
"=",
"1",
"else",
":",
"result",
"=",
"lucasMemo",
"(",
"n",
"-",
"1",
",",
"memo",
")",
"+",
"lucasMemo",
"(",
"n",
"-",
"2",
",",
"memo",
")",
"memo",
"[",
"n",
"]",
"=",
"result",
"return",
"result",
"return",
"lucasMemo",
"(",
"n",
",",
"{",
"}",
")"
] | Returns the nth Lucas number using the memoization technique
shown in class and lab. | [
"Returns",
"the",
"nth",
"Lucas",
"number",
"using",
"the",
"memoization",
"technique",
"shown",
"in",
"class",
"and",
"lab",
"."
] | [
"'''Returns the nth Lucas number using the memoization technique\n shown in class and lab. The Lucas numbers are as follows:\n [2, 1, 3, 4, 7, 11, ...]'''"
] | [
{
"param": "n",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "n",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def fast_lucas(n):
def lucasMemo(n, memo):
if n in memo:
return memo[n]
if n == 0:
result = 2
elif n == 1:
result = 1
else:
result = lucasMemo(n-1, memo) + lucasMemo(n-2, memo)
memo[n] = result
return result
return lucasMemo(n, {}) | 38 | 371 |
4b509323441ad1cfa76f24e0b712b509f5f23a01 | fangohr/oommf-python | vision/oommf/mifgen.py | [
"BSD-2-Clause"
] | Python | _save_mif | <not_specific> | def _save_mif(sim_object, target):
"""
save_mif(sim_object)
Function takes a simulation instance and then saves the mif to file in
order to run a simulation, and also += 1 to the N_mifs in order to keep
track of multiple simulation steps, ie if a parameter will change.
Parameters
----------
sim_object : instance
A simulation instance.
Returns
-------
string
The path to the mif file
"""
path = sim_object.name + "_" + str(sim_object.t) + "_" + str(target) + ".mif"
f = open(path, 'w')
f.write(sim_object.mif)
return path |
save_mif(sim_object)
Function takes a simulation instance and then saves the mif to file in
order to run a simulation, and also += 1 to the N_mifs in order to keep
track of multiple simulation steps, ie if a parameter will change.
Parameters
----------
sim_object : instance
A simulation instance.
Returns
-------
string
The path to the mif file
| save_mif(sim_object)
Function takes a simulation instance and then saves the mif to file in
order to run a simulation, and also += 1 to the N_mifs in order to keep
track of multiple simulation steps, ie if a parameter will change.
Parameters
sim_object : instance
A simulation instance.
Returns
string
The path to the mif file | [
"save_mif",
"(",
"sim_object",
")",
"Function",
"takes",
"a",
"simulation",
"instance",
"and",
"then",
"saves",
"the",
"mif",
"to",
"file",
"in",
"order",
"to",
"run",
"a",
"simulation",
"and",
"also",
"+",
"=",
"1",
"to",
"the",
"N_mifs",
"in",
"order",
"to",
"keep",
"track",
"of",
"multiple",
"simulation",
"steps",
"ie",
"if",
"a",
"parameter",
"will",
"change",
".",
"Parameters",
"sim_object",
":",
"instance",
"A",
"simulation",
"instance",
".",
"Returns",
"string",
"The",
"path",
"to",
"the",
"mif",
"file"
] | def _save_mif(sim_object, target):
path = sim_object.name + "_" + str(sim_object.t) + "_" + str(target) + ".mif"
f = open(path, 'w')
f.write(sim_object.mif)
return path | [
"def",
"_save_mif",
"(",
"sim_object",
",",
"target",
")",
":",
"path",
"=",
"sim_object",
".",
"name",
"+",
"\"_\"",
"+",
"str",
"(",
"sim_object",
".",
"t",
")",
"+",
"\"_\"",
"+",
"str",
"(",
"target",
")",
"+",
"\".mif\"",
"f",
"=",
"open",
"(",
"path",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"sim_object",
".",
"mif",
")",
"return",
"path"
] | save_mif(sim_object)
Function takes a simulation instance and then saves the mif to file in
order to run a simulation, and also += 1 to the N_mifs in order to keep
track of multiple simulation steps, ie if a parameter will change. | [
"save_mif",
"(",
"sim_object",
")",
"Function",
"takes",
"a",
"simulation",
"instance",
"and",
"then",
"saves",
"the",
"mif",
"to",
"file",
"in",
"order",
"to",
"run",
"a",
"simulation",
"and",
"also",
"+",
"=",
"1",
"to",
"the",
"N_mifs",
"in",
"order",
"to",
"keep",
"track",
"of",
"multiple",
"simulation",
"steps",
"ie",
"if",
"a",
"parameter",
"will",
"change",
"."
] | [
"\"\"\"\n save_mif(sim_object)\n\n Function takes a simulation instance and then saves the mif to file in\n order to run a simulation, and also += 1 to the N_mifs in order to keep\n track of multiple simulation steps, ie if a parameter will change.\n \n Parameters\n ----------\n sim_object : instance\n A simulation instance.\n \n Returns\n -------\n string\n The path to the mif file\n \"\"\""
] | [
{
"param": "sim_object",
"type": null
},
{
"param": "target",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "sim_object",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "target",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _save_mif(sim_object, target):
path = sim_object.name + "_" + str(sim_object.t) + "_" + str(target) + ".mif"
f = open(path, 'w')
f.write(sim_object.mif)
return path | 39 | 157 |
fdbaead8c186a6baca4ad86b5dbf22f4c5e3e1b2 | masenf/metaforward | src/metaforward.py | [
"BSD-2-Clause"
] | Python | shadowed_attributes_from_bases | <not_specific> | def shadowed_attributes_from_bases(mcs, bases, dct=None):
"""
Collect attributes from a base class and the dct of a class under construction.
:param bases: sequence of types
:param dct: optional dict of attributes for a class under construction
:return: set of attributes which should not be proxied
"""
shadowed_attributes = set()
if dct is not None:
shadowed_attributes.update(dct.keys())
for base in bases:
shadowed_attributes.update(dir(base))
return shadowed_attributes |
Collect attributes from a base class and the dct of a class under construction.
:param bases: sequence of types
:param dct: optional dict of attributes for a class under construction
:return: set of attributes which should not be proxied
| Collect attributes from a base class and the dct of a class under construction. | [
"Collect",
"attributes",
"from",
"a",
"base",
"class",
"and",
"the",
"dct",
"of",
"a",
"class",
"under",
"construction",
"."
] | def shadowed_attributes_from_bases(mcs, bases, dct=None):
shadowed_attributes = set()
if dct is not None:
shadowed_attributes.update(dct.keys())
for base in bases:
shadowed_attributes.update(dir(base))
return shadowed_attributes | [
"def",
"shadowed_attributes_from_bases",
"(",
"mcs",
",",
"bases",
",",
"dct",
"=",
"None",
")",
":",
"shadowed_attributes",
"=",
"set",
"(",
")",
"if",
"dct",
"is",
"not",
"None",
":",
"shadowed_attributes",
".",
"update",
"(",
"dct",
".",
"keys",
"(",
")",
")",
"for",
"base",
"in",
"bases",
":",
"shadowed_attributes",
".",
"update",
"(",
"dir",
"(",
"base",
")",
")",
"return",
"shadowed_attributes"
] | Collect attributes from a base class and the dct of a class under construction. | [
"Collect",
"attributes",
"from",
"a",
"base",
"class",
"and",
"the",
"dct",
"of",
"a",
"class",
"under",
"construction",
"."
] | [
"\"\"\"\n Collect attributes from a base class and the dct of a class under construction.\n\n :param bases: sequence of types\n :param dct: optional dict of attributes for a class under construction\n :return: set of attributes which should not be proxied\n \"\"\""
] | [
{
"param": "mcs",
"type": null
},
{
"param": "bases",
"type": null
},
{
"param": "dct",
"type": null
}
] | {
"returns": [
{
"docstring": "set of attributes which should not be proxied",
"docstring_tokens": [
"set",
"of",
"attributes",
"which",
"should",
"not",
"be",
"proxied"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "mcs",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "bases",
"type": null,
"docstring": "sequence of types",
"docstring_tokens": [
"sequence",
"of",
"types"
],
"default": null,
"is_optional": null
},
{
"identifier": "dct",
"type": null,
"docstring": "optional dict of attributes for a class under construction",
"docstring_tokens": [
"optional",
"dict",
"of",
"attributes",
"for",
"a",
"class",
"under",
"construction"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def shadowed_attributes_from_bases(mcs, bases, dct=None):
shadowed_attributes = set()
if dct is not None:
shadowed_attributes.update(dct.keys())
for base in bases:
shadowed_attributes.update(dir(base))
return shadowed_attributes | 40 | 867 |
efb7d56381ffb678d115f43d13a2057ef73b85d9 | nuxy/bittrex_autotrader | bittrex_autotrader/__main__.py | [
"MIT"
] | Python | _list_of_dict_filter_by | <not_specific> | def _list_of_dict_filter_by(data, key, value):
"""
Returns list of dictionary items filtered by key/value.
Args:
data (dict):
Data to filter.
key (str):
Dictionary key search.
value (str):
Dictionary key value match.
Returns:
list
"""
return [
item for i, item in enumerate(data) if data[i].get(key) == value
] |
Returns list of dictionary items filtered by key/value.
Args:
data (dict):
Data to filter.
key (str):
Dictionary key search.
value (str):
Dictionary key value match.
Returns:
list
| Returns list of dictionary items filtered by key/value. | [
"Returns",
"list",
"of",
"dictionary",
"items",
"filtered",
"by",
"key",
"/",
"value",
"."
] | def _list_of_dict_filter_by(data, key, value):
return [
item for i, item in enumerate(data) if data[i].get(key) == value
] | [
"def",
"_list_of_dict_filter_by",
"(",
"data",
",",
"key",
",",
"value",
")",
":",
"return",
"[",
"item",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"data",
")",
"if",
"data",
"[",
"i",
"]",
".",
"get",
"(",
"key",
")",
"==",
"value",
"]"
] | Returns list of dictionary items filtered by key/value. | [
"Returns",
"list",
"of",
"dictionary",
"items",
"filtered",
"by",
"key",
"/",
"value",
"."
] | [
"\"\"\"\n Returns list of dictionary items filtered by key/value.\n\n Args:\n data (dict):\n Data to filter.\n key (str):\n Dictionary key search.\n value (str):\n Dictionary key value match.\n\n Returns:\n list\n \"\"\""
] | [
{
"param": "data",
"type": null
},
{
"param": "key",
"type": null
},
{
"param": "value",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": "Data to filter.",
"docstring_tokens": [
"Data",
"to",
"filter",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "key",
"type": null,
"docstring": "Dictionary key search.",
"docstring_tokens": [
"Dictionary",
"key",
"search",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "value",
"type": null,
"docstring": "Dictionary key value match.",
"docstring_tokens": [
"Dictionary",
"key",
"value",
"match",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def _list_of_dict_filter_by(data, key, value):
return [
item for i, item in enumerate(data) if data[i].get(key) == value
] | 41 | 411 |
95e101001497dcd38c9653f10ac6de81199769ab | d-chambers/OpenSarToolkit | ost/helpers/scihub.py | [
"MIT"
] | Python | connect | <not_specific> | def connect(uname=None, pword=None, base_url="https://apihub.copernicus.eu/apihub"):
"""Generates an opener for the Copernicus apihub/dhus
:param uname: username of Copernicus' scihub
:type uname: str
:param pword: password of Copernicus' scihub
:type pword: str
:param base_url:
:return: an urllib opener instance for Copernicus' scihub
:rtype: opener object
"""
if not uname:
print(
" If you do not have a Copernicus Scihub user"
" account go to: https://scihub.copernicus.eu"
)
uname = input(" Your Copernicus Scihub Username:")
if not pword:
pword = getpass.getpass(" Your Copernicus Scihub Password:")
# create opener
manager = urllib.request.HTTPPasswordMgrWithDefaultRealm()
manager.add_password(None, base_url, uname, pword)
handler = urllib.request.HTTPBasicAuthHandler(manager)
opener = urllib.request.build_opener(handler)
return opener | Generates an opener for the Copernicus apihub/dhus
:param uname: username of Copernicus' scihub
:type uname: str
:param pword: password of Copernicus' scihub
:type pword: str
:param base_url:
:return: an urllib opener instance for Copernicus' scihub
:rtype: opener object
| Generates an opener for the Copernicus apihub/dhus | [
"Generates",
"an",
"opener",
"for",
"the",
"Copernicus",
"apihub",
"/",
"dhus"
] | def connect(uname=None, pword=None, base_url="https://apihub.copernicus.eu/apihub"):
if not uname:
print(
" If you do not have a Copernicus Scihub user"
" account go to: https://scihub.copernicus.eu"
)
uname = input(" Your Copernicus Scihub Username:")
if not pword:
pword = getpass.getpass(" Your Copernicus Scihub Password:")
manager = urllib.request.HTTPPasswordMgrWithDefaultRealm()
manager.add_password(None, base_url, uname, pword)
handler = urllib.request.HTTPBasicAuthHandler(manager)
opener = urllib.request.build_opener(handler)
return opener | [
"def",
"connect",
"(",
"uname",
"=",
"None",
",",
"pword",
"=",
"None",
",",
"base_url",
"=",
"\"https://apihub.copernicus.eu/apihub\"",
")",
":",
"if",
"not",
"uname",
":",
"print",
"(",
"\" If you do not have a Copernicus Scihub user\"",
"\" account go to: https://scihub.copernicus.eu\"",
")",
"uname",
"=",
"input",
"(",
"\" Your Copernicus Scihub Username:\"",
")",
"if",
"not",
"pword",
":",
"pword",
"=",
"getpass",
".",
"getpass",
"(",
"\" Your Copernicus Scihub Password:\"",
")",
"manager",
"=",
"urllib",
".",
"request",
".",
"HTTPPasswordMgrWithDefaultRealm",
"(",
")",
"manager",
".",
"add_password",
"(",
"None",
",",
"base_url",
",",
"uname",
",",
"pword",
")",
"handler",
"=",
"urllib",
".",
"request",
".",
"HTTPBasicAuthHandler",
"(",
"manager",
")",
"opener",
"=",
"urllib",
".",
"request",
".",
"build_opener",
"(",
"handler",
")",
"return",
"opener"
] | Generates an opener for the Copernicus apihub/dhus | [
"Generates",
"an",
"opener",
"for",
"the",
"Copernicus",
"apihub",
"/",
"dhus"
] | [
"\"\"\"Generates an opener for the Copernicus apihub/dhus\n\n\n :param uname: username of Copernicus' scihub\n :type uname: str\n :param pword: password of Copernicus' scihub\n :type pword: str\n :param base_url:\n :return: an urllib opener instance for Copernicus' scihub\n :rtype: opener object\n \"\"\"",
"# create opener"
] | [
{
"param": "uname",
"type": null
},
{
"param": "pword",
"type": null
},
{
"param": "base_url",
"type": null
}
] | {
"returns": [
{
"docstring": "an urllib opener instance for Copernicus' scihub",
"docstring_tokens": [
"an",
"urllib",
"opener",
"instance",
"for",
"Copernicus",
"'",
"scihub"
],
"type": "opener object"
}
],
"raises": [],
"params": [
{
"identifier": "uname",
"type": null,
"docstring": "username of Copernicus' scihub",
"docstring_tokens": [
"username",
"of",
"Copernicus",
"'",
"scihub"
],
"default": null,
"is_optional": null
},
{
"identifier": "pword",
"type": null,
"docstring": "password of Copernicus' scihub",
"docstring_tokens": [
"password",
"of",
"Copernicus",
"'",
"scihub"
],
"default": null,
"is_optional": null
},
{
"identifier": "base_url",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import getpass
import urllib
def connect(uname=None, pword=None, base_url="https://apihub.copernicus.eu/apihub"):
if not uname:
print(
" If you do not have a Copernicus Scihub user"
" account go to: https://scihub.copernicus.eu"
)
uname = input(" Your Copernicus Scihub Username:")
if not pword:
pword = getpass.getpass(" Your Copernicus Scihub Password:")
manager = urllib.request.HTTPPasswordMgrWithDefaultRealm()
manager.add_password(None, base_url, uname, pword)
handler = urllib.request.HTTPBasicAuthHandler(manager)
opener = urllib.request.build_opener(handler)
return opener | 43 | 125 |
32e3f226d9e450744b37a06b1b5e8904828a3571 | beli302/Pitches | virtual/lib/python3.6/site-packages/flask_uploads.py | [
"MIT"
] | Python | patch_request_class | <not_specific> | def patch_request_class(app, size=64 * 1024 * 1024):
"""
By default, Flask will accept uploads to an arbitrary size. While Werkzeug
switches uploads from memory to a temporary file when they hit 500 KiB,
it's still possible for someone to overload your disk space with a
gigantic file.
This patches the app's request class's
`~werkzeug.BaseRequest.max_content_length` attribute so that any upload
larger than the given size is rejected with an HTTP error.
.. note::
In Flask 0.6, you can do this by setting the `MAX_CONTENT_LENGTH`
setting, without patching the request class. To emulate this behavior,
you can pass `None` as the size (you must pass it explicitly). That is
the best way to call this function, as it won't break the Flask 0.6
functionality if it exists.
.. versionchanged:: 0.1.1
:param app: The app to patch the request class of.
:param size: The maximum size to accept, in bytes. The default is 64 MiB.
If it is `None`, the app's `MAX_CONTENT_LENGTH` configuration
setting will be used to patch.
"""
if size is None:
if isinstance(app.request_class.__dict__['max_content_length'],
property):
return
size = app.config.get('MAX_CONTENT_LENGTH')
reqclass = app.request_class
patched = type(reqclass.__name__, (reqclass,),
{'max_content_length': size})
app.request_class = patched |
By default, Flask will accept uploads to an arbitrary size. While Werkzeug
switches uploads from memory to a temporary file when they hit 500 KiB,
it's still possible for someone to overload your disk space with a
gigantic file.
This patches the app's request class's
`~werkzeug.BaseRequest.max_content_length` attribute so that any upload
larger than the given size is rejected with an HTTP error.
.. note::
In Flask 0.6, you can do this by setting the `MAX_CONTENT_LENGTH`
setting, without patching the request class. To emulate this behavior,
you can pass `None` as the size (you must pass it explicitly). That is
the best way to call this function, as it won't break the Flask 0.6
functionality if it exists.
.. versionchanged:: 0.1.1
:param app: The app to patch the request class of.
:param size: The maximum size to accept, in bytes. The default is 64 MiB.
If it is `None`, the app's `MAX_CONTENT_LENGTH` configuration
setting will be used to patch.
| By default, Flask will accept uploads to an arbitrary size. While Werkzeug
switches uploads from memory to a temporary file when they hit 500 KiB,
it's still possible for someone to overload your disk space with a
gigantic file.
This patches the app's request class's
`~werkzeug.BaseRequest.max_content_length` attribute so that any upload
larger than the given size is rejected with an HTTP error.
In Flask 0.6, you can do this by setting the `MAX_CONTENT_LENGTH`
setting, without patching the request class. To emulate this behavior,
you can pass `None` as the size (you must pass it explicitly). That is
the best way to call this function, as it won't break the Flask 0.6
functionality if it exists.
| [
"By",
"default",
"Flask",
"will",
"accept",
"uploads",
"to",
"an",
"arbitrary",
"size",
".",
"While",
"Werkzeug",
"switches",
"uploads",
"from",
"memory",
"to",
"a",
"temporary",
"file",
"when",
"they",
"hit",
"500",
"KiB",
"it",
"'",
"s",
"still",
"possible",
"for",
"someone",
"to",
"overload",
"your",
"disk",
"space",
"with",
"a",
"gigantic",
"file",
".",
"This",
"patches",
"the",
"app",
"'",
"s",
"request",
"class",
"'",
"s",
"`",
"~werkzeug",
".",
"BaseRequest",
".",
"max_content_length",
"`",
"attribute",
"so",
"that",
"any",
"upload",
"larger",
"than",
"the",
"given",
"size",
"is",
"rejected",
"with",
"an",
"HTTP",
"error",
".",
"In",
"Flask",
"0",
".",
"6",
"you",
"can",
"do",
"this",
"by",
"setting",
"the",
"`",
"MAX_CONTENT_LENGTH",
"`",
"setting",
"without",
"patching",
"the",
"request",
"class",
".",
"To",
"emulate",
"this",
"behavior",
"you",
"can",
"pass",
"`",
"None",
"`",
"as",
"the",
"size",
"(",
"you",
"must",
"pass",
"it",
"explicitly",
")",
".",
"That",
"is",
"the",
"best",
"way",
"to",
"call",
"this",
"function",
"as",
"it",
"won",
"'",
"t",
"break",
"the",
"Flask",
"0",
".",
"6",
"functionality",
"if",
"it",
"exists",
"."
] | def patch_request_class(app, size=64 * 1024 * 1024):
if size is None:
if isinstance(app.request_class.__dict__['max_content_length'],
property):
return
size = app.config.get('MAX_CONTENT_LENGTH')
reqclass = app.request_class
patched = type(reqclass.__name__, (reqclass,),
{'max_content_length': size})
app.request_class = patched | [
"def",
"patch_request_class",
"(",
"app",
",",
"size",
"=",
"64",
"*",
"1024",
"*",
"1024",
")",
":",
"if",
"size",
"is",
"None",
":",
"if",
"isinstance",
"(",
"app",
".",
"request_class",
".",
"__dict__",
"[",
"'max_content_length'",
"]",
",",
"property",
")",
":",
"return",
"size",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'MAX_CONTENT_LENGTH'",
")",
"reqclass",
"=",
"app",
".",
"request_class",
"patched",
"=",
"type",
"(",
"reqclass",
".",
"__name__",
",",
"(",
"reqclass",
",",
")",
",",
"{",
"'max_content_length'",
":",
"size",
"}",
")",
"app",
".",
"request_class",
"=",
"patched"
] | By default, Flask will accept uploads to an arbitrary size. | [
"By",
"default",
"Flask",
"will",
"accept",
"uploads",
"to",
"an",
"arbitrary",
"size",
"."
] | [
"\"\"\"\n By default, Flask will accept uploads to an arbitrary size. While Werkzeug\n switches uploads from memory to a temporary file when they hit 500 KiB,\n it's still possible for someone to overload your disk space with a\n gigantic file.\n\n This patches the app's request class's\n `~werkzeug.BaseRequest.max_content_length` attribute so that any upload\n larger than the given size is rejected with an HTTP error.\n\n .. note::\n\n In Flask 0.6, you can do this by setting the `MAX_CONTENT_LENGTH`\n setting, without patching the request class. To emulate this behavior,\n you can pass `None` as the size (you must pass it explicitly). That is\n the best way to call this function, as it won't break the Flask 0.6\n functionality if it exists.\n\n .. versionchanged:: 0.1.1\n\n :param app: The app to patch the request class of.\n :param size: The maximum size to accept, in bytes. The default is 64 MiB.\n If it is `None`, the app's `MAX_CONTENT_LENGTH` configuration\n setting will be used to patch.\n \"\"\""
] | [
{
"param": "app",
"type": null
},
{
"param": "size",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "app",
"type": null,
"docstring": "The app to patch the request class of.",
"docstring_tokens": [
"The",
"app",
"to",
"patch",
"the",
"request",
"class",
"of",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "size",
"type": null,
"docstring": "The maximum size to accept, in bytes. The default is 64 MiB.\nIf it is `None`, the app's `MAX_CONTENT_LENGTH` configuration\nsetting will be used to patch.",
"docstring_tokens": [
"The",
"maximum",
"size",
"to",
"accept",
"in",
"bytes",
".",
"The",
"default",
"is",
"64",
"MiB",
".",
"If",
"it",
"is",
"`",
"None",
"`",
"the",
"app",
"'",
"s",
"`",
"MAX_CONTENT_LENGTH",
"`",
"configuration",
"setting",
"will",
"be",
"used",
"to",
"patch",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def patch_request_class(app, size=64 * 1024 * 1024):
if size is None:
if isinstance(app.request_class.__dict__['max_content_length'],
property):
return
size = app.config.get('MAX_CONTENT_LENGTH')
reqclass = app.request_class
patched = type(reqclass.__name__, (reqclass,),
{'max_content_length': size})
app.request_class = patched | 44 | 142 |
303426d5ee7004b9dd1a7767a2408579d6fb2033 | neptune-ml/sacred | sacred/commandline_options.py | [
"MIT"
] | Python | apply | null | def apply(cls, args, run):
"""Add priority info for this run."""
try:
priority = float(args)
except ValueError:
raise ValueError(
"The PRIORITY argument must be a number! " "(but was '{}')".format(args)
)
run.meta_info["priority"] = priority | Add priority info for this run. | Add priority info for this run. | [
"Add",
"priority",
"info",
"for",
"this",
"run",
"."
] | def apply(cls, args, run):
try:
priority = float(args)
except ValueError:
raise ValueError(
"The PRIORITY argument must be a number! " "(but was '{}')".format(args)
)
run.meta_info["priority"] = priority | [
"def",
"apply",
"(",
"cls",
",",
"args",
",",
"run",
")",
":",
"try",
":",
"priority",
"=",
"float",
"(",
"args",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"The PRIORITY argument must be a number! \"",
"\"(but was '{}')\"",
".",
"format",
"(",
"args",
")",
")",
"run",
".",
"meta_info",
"[",
"\"priority\"",
"]",
"=",
"priority"
] | Add priority info for this run. | [
"Add",
"priority",
"info",
"for",
"this",
"run",
"."
] | [
"\"\"\"Add priority info for this run.\"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "args",
"type": null
},
{
"param": "run",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "args",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "run",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def apply(cls, args, run):
try:
priority = float(args)
except ValueError:
raise ValueError(
"The PRIORITY argument must be a number! " "(but was '{}')".format(args)
)
run.meta_info["priority"] = priority | 45 | 194 |
866a30195e6c69b24064d40dcd0432c7464bdea9 | tvhahn/Tool-Wear-Files | load_data.py | [
"MIT"
] | Python | tool_no_apply | <not_specific> | def tool_no_apply(cols):
"""Gets the tool number from the PMC signal
Explanation
===========
Same explanation as in the cut_signal_apply function
"""
pmc = cols[0]
if (pmc - 64) > 0:
return int(pmc - 64)
else:
return int(pmc) | Gets the tool number from the PMC signal
Explanation
===========
Same explanation as in the cut_signal_apply function
| Gets the tool number from the PMC signal
Explanation
Same explanation as in the cut_signal_apply function | [
"Gets",
"the",
"tool",
"number",
"from",
"the",
"PMC",
"signal",
"Explanation",
"Same",
"explanation",
"as",
"in",
"the",
"cut_signal_apply",
"function"
] | def tool_no_apply(cols):
pmc = cols[0]
if (pmc - 64) > 0:
return int(pmc - 64)
else:
return int(pmc) | [
"def",
"tool_no_apply",
"(",
"cols",
")",
":",
"pmc",
"=",
"cols",
"[",
"0",
"]",
"if",
"(",
"pmc",
"-",
"64",
")",
">",
"0",
":",
"return",
"int",
"(",
"pmc",
"-",
"64",
")",
"else",
":",
"return",
"int",
"(",
"pmc",
")"
] | Gets the tool number from the PMC signal
Explanation | [
"Gets",
"the",
"tool",
"number",
"from",
"the",
"PMC",
"signal",
"Explanation"
] | [
"\"\"\"Gets the tool number from the PMC signal\n \n Explanation\n ===========\n Same explanation as in the cut_signal_apply function\n\n \"\"\""
] | [
{
"param": "cols",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cols",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def tool_no_apply(cols):
pmc = cols[0]
if (pmc - 64) > 0:
return int(pmc - 64)
else:
return int(pmc) | 46 | 531 |
323c8a621d742c6892970c147bed4e6c27b1d7f0 | pypyjs/pypy | pypy/module/cpyext/import_.py | [
"Apache-2.0",
"OpenSSL"
] | Python | PyImport_Import | <not_specific> | def PyImport_Import(space, w_name):
"""
This is a higher-level interface that calls the current "import hook function".
It invokes the __import__() function from the __builtins__ of the
current globals. This means that the import is done using whatever import hooks
are installed in the current environment, e.g. by rexec or ihooks.
Always uses absolute imports."""
caller = space.getexecutioncontext().gettopframe_nohidden()
# Get the builtins from current globals
if caller is not None:
w_globals = caller.w_globals
w_builtin = space.getitem(w_globals, space.wrap('__builtins__'))
else:
# No globals -- use standard builtins, and fake globals
w_builtin = space.getbuiltinmodule('__builtin__')
w_globals = space.newdict()
space.setitem(w_globals, space.wrap("__builtins__"), w_builtin)
# Get the __import__ function from the builtins
if space.isinstance_w(w_builtin, space.w_dict):
w_import = space.getitem(w_builtin, space.wrap("__import__"))
else:
w_import = space.getattr(w_builtin, space.wrap("__import__"))
# Call the __import__ function with the proper argument list
# Always use absolute import here.
return space.call_function(w_import,
w_name, w_globals, w_globals,
space.newlist([space.wrap("__doc__")])) |
This is a higher-level interface that calls the current "import hook function".
It invokes the __import__() function from the __builtins__ of the
current globals. This means that the import is done using whatever import hooks
are installed in the current environment, e.g. by rexec or ihooks.
Always uses absolute imports. | This is a higher-level interface that calls the current "import hook function".
It invokes the __import__() function from the __builtins__ of the
current globals. This means that the import is done using whatever import hooks
are installed in the current environment, e.g. by rexec or ihooks.
Always uses absolute imports. | [
"This",
"is",
"a",
"higher",
"-",
"level",
"interface",
"that",
"calls",
"the",
"current",
"\"",
"import",
"hook",
"function",
"\"",
".",
"It",
"invokes",
"the",
"__import__",
"()",
"function",
"from",
"the",
"__builtins__",
"of",
"the",
"current",
"globals",
".",
"This",
"means",
"that",
"the",
"import",
"is",
"done",
"using",
"whatever",
"import",
"hooks",
"are",
"installed",
"in",
"the",
"current",
"environment",
"e",
".",
"g",
".",
"by",
"rexec",
"or",
"ihooks",
".",
"Always",
"uses",
"absolute",
"imports",
"."
] | def PyImport_Import(space, w_name):
caller = space.getexecutioncontext().gettopframe_nohidden()
if caller is not None:
w_globals = caller.w_globals
w_builtin = space.getitem(w_globals, space.wrap('__builtins__'))
else:
w_builtin = space.getbuiltinmodule('__builtin__')
w_globals = space.newdict()
space.setitem(w_globals, space.wrap("__builtins__"), w_builtin)
if space.isinstance_w(w_builtin, space.w_dict):
w_import = space.getitem(w_builtin, space.wrap("__import__"))
else:
w_import = space.getattr(w_builtin, space.wrap("__import__"))
return space.call_function(w_import,
w_name, w_globals, w_globals,
space.newlist([space.wrap("__doc__")])) | [
"def",
"PyImport_Import",
"(",
"space",
",",
"w_name",
")",
":",
"caller",
"=",
"space",
".",
"getexecutioncontext",
"(",
")",
".",
"gettopframe_nohidden",
"(",
")",
"if",
"caller",
"is",
"not",
"None",
":",
"w_globals",
"=",
"caller",
".",
"w_globals",
"w_builtin",
"=",
"space",
".",
"getitem",
"(",
"w_globals",
",",
"space",
".",
"wrap",
"(",
"'__builtins__'",
")",
")",
"else",
":",
"w_builtin",
"=",
"space",
".",
"getbuiltinmodule",
"(",
"'__builtin__'",
")",
"w_globals",
"=",
"space",
".",
"newdict",
"(",
")",
"space",
".",
"setitem",
"(",
"w_globals",
",",
"space",
".",
"wrap",
"(",
"\"__builtins__\"",
")",
",",
"w_builtin",
")",
"if",
"space",
".",
"isinstance_w",
"(",
"w_builtin",
",",
"space",
".",
"w_dict",
")",
":",
"w_import",
"=",
"space",
".",
"getitem",
"(",
"w_builtin",
",",
"space",
".",
"wrap",
"(",
"\"__import__\"",
")",
")",
"else",
":",
"w_import",
"=",
"space",
".",
"getattr",
"(",
"w_builtin",
",",
"space",
".",
"wrap",
"(",
"\"__import__\"",
")",
")",
"return",
"space",
".",
"call_function",
"(",
"w_import",
",",
"w_name",
",",
"w_globals",
",",
"w_globals",
",",
"space",
".",
"newlist",
"(",
"[",
"space",
".",
"wrap",
"(",
"\"__doc__\"",
")",
"]",
")",
")"
] | This is a higher-level interface that calls the current "import hook function". | [
"This",
"is",
"a",
"higher",
"-",
"level",
"interface",
"that",
"calls",
"the",
"current",
"\"",
"import",
"hook",
"function",
"\"",
"."
] | [
"\"\"\"\n This is a higher-level interface that calls the current \"import hook function\".\n It invokes the __import__() function from the __builtins__ of the\n current globals. This means that the import is done using whatever import hooks\n are installed in the current environment, e.g. by rexec or ihooks.\n\n Always uses absolute imports.\"\"\"",
"# Get the builtins from current globals",
"# No globals -- use standard builtins, and fake globals",
"# Get the __import__ function from the builtins",
"# Call the __import__ function with the proper argument list",
"# Always use absolute import here."
] | [
{
"param": "space",
"type": null
},
{
"param": "w_name",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "space",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "w_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def PyImport_Import(space, w_name):
caller = space.getexecutioncontext().gettopframe_nohidden()
if caller is not None:
w_globals = caller.w_globals
w_builtin = space.getitem(w_globals, space.wrap('__builtins__'))
else:
w_builtin = space.getbuiltinmodule('__builtin__')
w_globals = space.newdict()
space.setitem(w_globals, space.wrap("__builtins__"), w_builtin)
if space.isinstance_w(w_builtin, space.w_dict):
w_import = space.getitem(w_builtin, space.wrap("__import__"))
else:
w_import = space.getattr(w_builtin, space.wrap("__import__"))
return space.call_function(w_import,
w_name, w_globals, w_globals,
space.newlist([space.wrap("__doc__")])) | 47 | 54 |
f134b7410f030c009eefe67e740f832d9cbaa2f8 | MirkoLedda/polyoligo | src/polyoligo/lib_utils.py | [
"BSD-2-Clause"
] | Python | seconds_to_hms | <not_specific> | def seconds_to_hms(t):
"""Formats a datetime.timedelta object to a HH:MM:SS string."""
hours, remainder = divmod(t.total_seconds(), 3600)
minutes, seconds = divmod(remainder, 60)
hours, minutes, seconds = int(hours), int(minutes), int(seconds)
if hours < 10:
hours = "0%s" % int(hours)
if minutes < 10:
minutes = "0%s" % minutes
if seconds < 10:
seconds = "0%s" % seconds
return "%s:%s:%s" % (hours, minutes, seconds) | Formats a datetime.timedelta object to a HH:MM:SS string. | Formats a datetime.timedelta object to a HH:MM:SS string. | [
"Formats",
"a",
"datetime",
".",
"timedelta",
"object",
"to",
"a",
"HH",
":",
"MM",
":",
"SS",
"string",
"."
] | def seconds_to_hms(t):
hours, remainder = divmod(t.total_seconds(), 3600)
minutes, seconds = divmod(remainder, 60)
hours, minutes, seconds = int(hours), int(minutes), int(seconds)
if hours < 10:
hours = "0%s" % int(hours)
if minutes < 10:
minutes = "0%s" % minutes
if seconds < 10:
seconds = "0%s" % seconds
return "%s:%s:%s" % (hours, minutes, seconds) | [
"def",
"seconds_to_hms",
"(",
"t",
")",
":",
"hours",
",",
"remainder",
"=",
"divmod",
"(",
"t",
".",
"total_seconds",
"(",
")",
",",
"3600",
")",
"minutes",
",",
"seconds",
"=",
"divmod",
"(",
"remainder",
",",
"60",
")",
"hours",
",",
"minutes",
",",
"seconds",
"=",
"int",
"(",
"hours",
")",
",",
"int",
"(",
"minutes",
")",
",",
"int",
"(",
"seconds",
")",
"if",
"hours",
"<",
"10",
":",
"hours",
"=",
"\"0%s\"",
"%",
"int",
"(",
"hours",
")",
"if",
"minutes",
"<",
"10",
":",
"minutes",
"=",
"\"0%s\"",
"%",
"minutes",
"if",
"seconds",
"<",
"10",
":",
"seconds",
"=",
"\"0%s\"",
"%",
"seconds",
"return",
"\"%s:%s:%s\"",
"%",
"(",
"hours",
",",
"minutes",
",",
"seconds",
")"
] | Formats a datetime.timedelta object to a HH:MM:SS string. | [
"Formats",
"a",
"datetime",
".",
"timedelta",
"object",
"to",
"a",
"HH",
":",
"MM",
":",
"SS",
"string",
"."
] | [
"\"\"\"Formats a datetime.timedelta object to a HH:MM:SS string.\"\"\""
] | [
{
"param": "t",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "t",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def seconds_to_hms(t):
hours, remainder = divmod(t.total_seconds(), 3600)
minutes, seconds = divmod(remainder, 60)
hours, minutes, seconds = int(hours), int(minutes), int(seconds)
if hours < 10:
hours = "0%s" % int(hours)
if minutes < 10:
minutes = "0%s" % minutes
if seconds < 10:
seconds = "0%s" % seconds
return "%s:%s:%s" % (hours, minutes, seconds) | 48 | 814 |
d37711957c262fbe690050937109571e502cf3e6 | Pandaaaa906/scrapy-playwright | scrapy_playwright/headers.py | [
"BSD-3-Clause"
] | Python | use_playwright_headers | dict | async def use_playwright_headers(
browser_type: str,
playwright_request: PlaywrightRequest,
scrapy_headers: Headers,
) -> dict:
"""Return headers from the Playwright request, unaltered"""
return playwright_request.headers | Return headers from the Playwright request, unaltered | Return headers from the Playwright request, unaltered | [
"Return",
"headers",
"from",
"the",
"Playwright",
"request",
"unaltered"
] | async def use_playwright_headers(
browser_type: str,
playwright_request: PlaywrightRequest,
scrapy_headers: Headers,
) -> dict:
return playwright_request.headers | [
"async",
"def",
"use_playwright_headers",
"(",
"browser_type",
":",
"str",
",",
"playwright_request",
":",
"PlaywrightRequest",
",",
"scrapy_headers",
":",
"Headers",
",",
")",
"->",
"dict",
":",
"return",
"playwright_request",
".",
"headers"
] | Return headers from the Playwright request, unaltered | [
"Return",
"headers",
"from",
"the",
"Playwright",
"request",
"unaltered"
] | [
"\"\"\"Return headers from the Playwright request, unaltered\"\"\""
] | [
{
"param": "browser_type",
"type": "str"
},
{
"param": "playwright_request",
"type": "PlaywrightRequest"
},
{
"param": "scrapy_headers",
"type": "Headers"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "browser_type",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "playwright_request",
"type": "PlaywrightRequest",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "scrapy_headers",
"type": "Headers",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | async def use_playwright_headers(
browser_type: str,
playwright_request: PlaywrightRequest,
scrapy_headers: Headers,
) -> dict:
return playwright_request.headers | 49 | 759 |
35a98f36b10113ace2ee5dac283ea5b6e9f91f5e | threefoldtech/jumpscaleX_core | install/threesdk/shell.py | [
"Apache-2.0"
] | Python | eval_code | <not_specific> | def eval_code(stmts, locals_=None, globals_=None):
"""
a helper function to ignore incomplete syntax erros when evaluating code
while typing incomplete lines, e.g.: j.clien...
"""
if not stmts:
return
try:
code = compile(stmts, filename=__name__, mode="eval")
except SyntaxError:
return
try:
return eval(code, globals_, locals_)
except:
return |
a helper function to ignore incomplete syntax erros when evaluating code
while typing incomplete lines, e.g.: j.clien...
| a helper function to ignore incomplete syntax erros when evaluating code
while typing incomplete lines, e.g.: j.clien | [
"a",
"helper",
"function",
"to",
"ignore",
"incomplete",
"syntax",
"erros",
"when",
"evaluating",
"code",
"while",
"typing",
"incomplete",
"lines",
"e",
".",
"g",
".",
":",
"j",
".",
"clien"
] | def eval_code(stmts, locals_=None, globals_=None):
if not stmts:
return
try:
code = compile(stmts, filename=__name__, mode="eval")
except SyntaxError:
return
try:
return eval(code, globals_, locals_)
except:
return | [
"def",
"eval_code",
"(",
"stmts",
",",
"locals_",
"=",
"None",
",",
"globals_",
"=",
"None",
")",
":",
"if",
"not",
"stmts",
":",
"return",
"try",
":",
"code",
"=",
"compile",
"(",
"stmts",
",",
"filename",
"=",
"__name__",
",",
"mode",
"=",
"\"eval\"",
")",
"except",
"SyntaxError",
":",
"return",
"try",
":",
"return",
"eval",
"(",
"code",
",",
"globals_",
",",
"locals_",
")",
"except",
":",
"return"
] | a helper function to ignore incomplete syntax erros when evaluating code
while typing incomplete lines, e.g. | [
"a",
"helper",
"function",
"to",
"ignore",
"incomplete",
"syntax",
"erros",
"when",
"evaluating",
"code",
"while",
"typing",
"incomplete",
"lines",
"e",
".",
"g",
"."
] | [
"\"\"\"\n a helper function to ignore incomplete syntax erros when evaluating code\n while typing incomplete lines, e.g.: j.clien...\n \"\"\""
] | [
{
"param": "stmts",
"type": null
},
{
"param": "locals_",
"type": null
},
{
"param": "globals_",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "stmts",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "locals_",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "globals_",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def eval_code(stmts, locals_=None, globals_=None):
if not stmts:
return
try:
code = compile(stmts, filename=__name__, mode="eval")
except SyntaxError:
return
try:
return eval(code, globals_, locals_)
except:
return | 51 | 545 |
3d22d48525dfc4451cbff275e76eaf0f861d2502 | Allthemighty/SataniaBot | src/modules/misc/misc_util.py | [
"MIT"
] | Python | simple_check | <not_specific> | def simple_check(author, channel):
"""
A predicate used in the wait_for() function, to ensure the user input can only come
from the user who activated the command, and in the same channel.
:param author: Author object
:param channel: Channel object
:return: Check function
"""
def check(message):
return message.author == author and message.channel == channel
return check |
A predicate used in the wait_for() function, to ensure the user input can only come
from the user who activated the command, and in the same channel.
:param author: Author object
:param channel: Channel object
:return: Check function
| A predicate used in the wait_for() function, to ensure the user input can only come
from the user who activated the command, and in the same channel. | [
"A",
"predicate",
"used",
"in",
"the",
"wait_for",
"()",
"function",
"to",
"ensure",
"the",
"user",
"input",
"can",
"only",
"come",
"from",
"the",
"user",
"who",
"activated",
"the",
"command",
"and",
"in",
"the",
"same",
"channel",
"."
] | def simple_check(author, channel):
def check(message):
return message.author == author and message.channel == channel
return check | [
"def",
"simple_check",
"(",
"author",
",",
"channel",
")",
":",
"def",
"check",
"(",
"message",
")",
":",
"return",
"message",
".",
"author",
"==",
"author",
"and",
"message",
".",
"channel",
"==",
"channel",
"return",
"check"
] | A predicate used in the wait_for() function, to ensure the user input can only come
from the user who activated the command, and in the same channel. | [
"A",
"predicate",
"used",
"in",
"the",
"wait_for",
"()",
"function",
"to",
"ensure",
"the",
"user",
"input",
"can",
"only",
"come",
"from",
"the",
"user",
"who",
"activated",
"the",
"command",
"and",
"in",
"the",
"same",
"channel",
"."
] | [
"\"\"\"\n A predicate used in the wait_for() function, to ensure the user input can only come\n from the user who activated the command, and in the same channel.\n :param author: Author object\n :param channel: Channel object\n :return: Check function\n \"\"\""
] | [
{
"param": "author",
"type": null
},
{
"param": "channel",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "author",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "channel",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def simple_check(author, channel):
def check(message):
return message.author == author and message.channel == channel
return check | 52 | 443 |
ecd832b296a9381f4398c16dcef906a078177dd4 | cmu-sei/usersim | tasks/frequency.py | [
"BSL-1.0"
] | Python | parameters | <not_specific> | def parameters(cls):
""" Returns a dictionary with human-readable descriptions of required arguments for the Frequency task.
Returns:
dict of dicts: Configuration dictionary with the keys 'required' and 'optional', where values are dicts
containing the required and optional parameters and their descriptions for the Frequency task,
respectively.
"""
params = {'required': {'task': 'task| the configuration of another task',
'frequency': 'number| positive decimal number - avg number of triggers per hour',
'repetitions': 'int| non-negative integer - 0 for unlimited'},
'optional': {}}
return params | Returns a dictionary with human-readable descriptions of required arguments for the Frequency task.
Returns:
dict of dicts: Configuration dictionary with the keys 'required' and 'optional', where values are dicts
containing the required and optional parameters and their descriptions for the Frequency task,
respectively.
| Returns a dictionary with human-readable descriptions of required arguments for the Frequency task. | [
"Returns",
"a",
"dictionary",
"with",
"human",
"-",
"readable",
"descriptions",
"of",
"required",
"arguments",
"for",
"the",
"Frequency",
"task",
"."
] | def parameters(cls):
params = {'required': {'task': 'task| the configuration of another task',
'frequency': 'number| positive decimal number - avg number of triggers per hour',
'repetitions': 'int| non-negative integer - 0 for unlimited'},
'optional': {}}
return params | [
"def",
"parameters",
"(",
"cls",
")",
":",
"params",
"=",
"{",
"'required'",
":",
"{",
"'task'",
":",
"'task| the configuration of another task'",
",",
"'frequency'",
":",
"'number| positive decimal number - avg number of triggers per hour'",
",",
"'repetitions'",
":",
"'int| non-negative integer - 0 for unlimited'",
"}",
",",
"'optional'",
":",
"{",
"}",
"}",
"return",
"params"
] | Returns a dictionary with human-readable descriptions of required arguments for the Frequency task. | [
"Returns",
"a",
"dictionary",
"with",
"human",
"-",
"readable",
"descriptions",
"of",
"required",
"arguments",
"for",
"the",
"Frequency",
"task",
"."
] | [
"\"\"\" Returns a dictionary with human-readable descriptions of required arguments for the Frequency task.\n\n Returns:\n dict of dicts: Configuration dictionary with the keys 'required' and 'optional', where values are dicts\n containing the required and optional parameters and their descriptions for the Frequency task,\n respectively.\n \"\"\""
] | [
{
"param": "cls",
"type": null
}
] | {
"returns": [
{
"docstring": "dict of dicts: Configuration dictionary with the keys 'required' and 'optional', where values are dicts\ncontaining the required and optional parameters and their descriptions for the Frequency task,\nrespectively.",
"docstring_tokens": [
"dict",
"of",
"dicts",
":",
"Configuration",
"dictionary",
"with",
"the",
"keys",
"'",
"required",
"'",
"and",
"'",
"optional",
"'",
"where",
"values",
"are",
"dicts",
"containing",
"the",
"required",
"and",
"optional",
"parameters",
"and",
"their",
"descriptions",
"for",
"the",
"Frequency",
"task",
"respectively",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def parameters(cls):
params = {'required': {'task': 'task| the configuration of another task',
'frequency': 'number| positive decimal number - avg number of triggers per hour',
'repetitions': 'int| non-negative integer - 0 for unlimited'},
'optional': {}}
return params | 53 | 85 |
3a0d16ef8d51a02e0c9568006f90794aa64c9e5c | russhughes/TurtlePlotBot | lib/oledui.py | [
"MIT"
] | Python | _screen_shot | null | def _screen_shot(uio, *_):
"""
_screen_shot
Append a text screen shot of the OLED display in the screenshot.txt
file. This can be triggered by a call to the routine or by long
pressing a button on the Pin 0 any time the UI is waiting for a
button press.
Each screen shot starts with a line consisting of the word "BEGIN".
Each row of the display is represented as a line of '.' and 'X'
characters where a dark pixel is represented by a '.' and each light
pixel is represented by a 'X'. The screenshot ends with a line
consisting of the word "END". The screenshot.txt file can contain
multiple screenshots.
"""
print("Writing screenshot... ")
with open('screenshots.txt', 'a') as output:
print('BEGIN', file=output)
for row in range(uio.display.height):
for col in range(uio.display.width):
if uio.display.pixel(col, row):
print('X', sep="", end="", file=output)
else:
print(".", sep="", end="", file=output)
print("", file=output)
print("END", file=output)
print("done.") |
_screen_shot
Append a text screen shot of the OLED display in the screenshot.txt
file. This can be triggered by a call to the routine or by long
pressing a button on the Pin 0 any time the UI is waiting for a
button press.
Each screen shot starts with a line consisting of the word "BEGIN".
Each row of the display is represented as a line of '.' and 'X'
characters where a dark pixel is represented by a '.' and each light
pixel is represented by a 'X'. The screenshot ends with a line
consisting of the word "END". The screenshot.txt file can contain
multiple screenshots.
| _screen_shot
Append a text screen shot of the OLED display in the screenshot.txt
file. This can be triggered by a call to the routine or by long
pressing a button on the Pin 0 any time the UI is waiting for a
button press.
Each screen shot starts with a line consisting of the word "BEGIN". | [
"_screen_shot",
"Append",
"a",
"text",
"screen",
"shot",
"of",
"the",
"OLED",
"display",
"in",
"the",
"screenshot",
".",
"txt",
"file",
".",
"This",
"can",
"be",
"triggered",
"by",
"a",
"call",
"to",
"the",
"routine",
"or",
"by",
"long",
"pressing",
"a",
"button",
"on",
"the",
"Pin",
"0",
"any",
"time",
"the",
"UI",
"is",
"waiting",
"for",
"a",
"button",
"press",
".",
"Each",
"screen",
"shot",
"starts",
"with",
"a",
"line",
"consisting",
"of",
"the",
"word",
"\"",
"BEGIN",
"\"",
"."
] | def _screen_shot(uio, *_):
print("Writing screenshot... ")
with open('screenshots.txt', 'a') as output:
print('BEGIN', file=output)
for row in range(uio.display.height):
for col in range(uio.display.width):
if uio.display.pixel(col, row):
print('X', sep="", end="", file=output)
else:
print(".", sep="", end="", file=output)
print("", file=output)
print("END", file=output)
print("done.") | [
"def",
"_screen_shot",
"(",
"uio",
",",
"*",
"_",
")",
":",
"print",
"(",
"\"Writing screenshot... \"",
")",
"with",
"open",
"(",
"'screenshots.txt'",
",",
"'a'",
")",
"as",
"output",
":",
"print",
"(",
"'BEGIN'",
",",
"file",
"=",
"output",
")",
"for",
"row",
"in",
"range",
"(",
"uio",
".",
"display",
".",
"height",
")",
":",
"for",
"col",
"in",
"range",
"(",
"uio",
".",
"display",
".",
"width",
")",
":",
"if",
"uio",
".",
"display",
".",
"pixel",
"(",
"col",
",",
"row",
")",
":",
"print",
"(",
"'X'",
",",
"sep",
"=",
"\"\"",
",",
"end",
"=",
"\"\"",
",",
"file",
"=",
"output",
")",
"else",
":",
"print",
"(",
"\".\"",
",",
"sep",
"=",
"\"\"",
",",
"end",
"=",
"\"\"",
",",
"file",
"=",
"output",
")",
"print",
"(",
"\"\"",
",",
"file",
"=",
"output",
")",
"print",
"(",
"\"END\"",
",",
"file",
"=",
"output",
")",
"print",
"(",
"\"done.\"",
")"
] | _screen_shot
Append a text screen shot of the OLED display in the screenshot.txt
file. | [
"_screen_shot",
"Append",
"a",
"text",
"screen",
"shot",
"of",
"the",
"OLED",
"display",
"in",
"the",
"screenshot",
".",
"txt",
"file",
"."
] | [
"\"\"\"\n _screen_shot\n\n Append a text screen shot of the OLED display in the screenshot.txt\n file. This can be triggered by a call to the routine or by long\n pressing a button on the Pin 0 any time the UI is waiting for a\n button press.\n\n Each screen shot starts with a line consisting of the word \"BEGIN\".\n Each row of the display is represented as a line of '.' and 'X'\n characters where a dark pixel is represented by a '.' and each light\n pixel is represented by a 'X'. The screenshot ends with a line\n consisting of the word \"END\". The screenshot.txt file can contain\n multiple screenshots.\n \"\"\""
] | [
{
"param": "uio",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "uio",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _screen_shot(uio, *_):
print("Writing screenshot... ")
with open('screenshots.txt', 'a') as output:
print('BEGIN', file=output)
for row in range(uio.display.height):
for col in range(uio.display.width):
if uio.display.pixel(col, row):
print('X', sep="", end="", file=output)
else:
print(".", sep="", end="", file=output)
print("", file=output)
print("END", file=output)
print("done.") | 54 | 918 |
2e79e2b725ac9e7e5c08e4e2dcbc552df0a4a442 | ooici/coi-services | ion/agents/platform/test/helper.py | [
"BSD-2-Clause"
] | Python | using_actual_rsn_oms_endpoint | <not_specific> | def using_actual_rsn_oms_endpoint(cls):
"""
Determines whether we are testing against the actual RSN OMS endpoint.
This is based on looking up the "USING_ACTUAL_RSN_OMS_ENDPOINT"
environment variable, which normally will only be defined as
convenient while doing local tests. See OOIION-1352.
"""
return "yes" == os.getenv('USING_ACTUAL_RSN_OMS_ENDPOINT') |
Determines whether we are testing against the actual RSN OMS endpoint.
This is based on looking up the "USING_ACTUAL_RSN_OMS_ENDPOINT"
environment variable, which normally will only be defined as
convenient while doing local tests. See OOIION-1352.
| Determines whether we are testing against the actual RSN OMS endpoint.
This is based on looking up the "USING_ACTUAL_RSN_OMS_ENDPOINT"
environment variable, which normally will only be defined as
convenient while doing local tests. | [
"Determines",
"whether",
"we",
"are",
"testing",
"against",
"the",
"actual",
"RSN",
"OMS",
"endpoint",
".",
"This",
"is",
"based",
"on",
"looking",
"up",
"the",
"\"",
"USING_ACTUAL_RSN_OMS_ENDPOINT",
"\"",
"environment",
"variable",
"which",
"normally",
"will",
"only",
"be",
"defined",
"as",
"convenient",
"while",
"doing",
"local",
"tests",
"."
] | def using_actual_rsn_oms_endpoint(cls):
return "yes" == os.getenv('USING_ACTUAL_RSN_OMS_ENDPOINT') | [
"def",
"using_actual_rsn_oms_endpoint",
"(",
"cls",
")",
":",
"return",
"\"yes\"",
"==",
"os",
".",
"getenv",
"(",
"'USING_ACTUAL_RSN_OMS_ENDPOINT'",
")"
] | Determines whether we are testing against the actual RSN OMS endpoint. | [
"Determines",
"whether",
"we",
"are",
"testing",
"against",
"the",
"actual",
"RSN",
"OMS",
"endpoint",
"."
] | [
"\"\"\"\n Determines whether we are testing against the actual RSN OMS endpoint.\n This is based on looking up the \"USING_ACTUAL_RSN_OMS_ENDPOINT\"\n environment variable, which normally will only be defined as\n convenient while doing local tests. See OOIION-1352.\n \"\"\""
] | [
{
"param": "cls",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
def using_actual_rsn_oms_endpoint(cls):
return "yes" == os.getenv('USING_ACTUAL_RSN_OMS_ENDPOINT') | 55 | 657 |
cc179d17e4e35730d499aba1ba8c9c604fca0dad | crahal/lit_reviewer | src/pubmed_functions.py | [
"MIT"
] | Python | return_pag | <not_specific> | def return_pag(paper):
""" Return the pagination from the paper nest"""
try:
return paper['MedlineCitation']['Article']['Pagination']['MedlinePgn']
except KeyError:
return None | Return the pagination from the paper nest | Return the pagination from the paper nest | [
"Return",
"the",
"pagination",
"from",
"the",
"paper",
"nest"
] | def return_pag(paper):
try:
return paper['MedlineCitation']['Article']['Pagination']['MedlinePgn']
except KeyError:
return None | [
"def",
"return_pag",
"(",
"paper",
")",
":",
"try",
":",
"return",
"paper",
"[",
"'MedlineCitation'",
"]",
"[",
"'Article'",
"]",
"[",
"'Pagination'",
"]",
"[",
"'MedlinePgn'",
"]",
"except",
"KeyError",
":",
"return",
"None"
] | Return the pagination from the paper nest | [
"Return",
"the",
"pagination",
"from",
"the",
"paper",
"nest"
] | [
"\"\"\" Return the pagination from the paper nest\"\"\""
] | [
{
"param": "paper",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "paper",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def return_pag(paper):
try:
return paper['MedlineCitation']['Article']['Pagination']['MedlinePgn']
except KeyError:
return None | 56 | 359 |
2e17d39b74ad622e3ac0ae25d5f59143463bfa85 | rafaelcn/cryptography | aes/src/aes/common.py | [
"MIT"
] | Python | add_round_key | null | def add_round_key(state, key):
"""
Add the given key to the state using XOR.
"""
for i in range(4):
for j in range(4):
state[i][j] ^= key[i][j] |
Add the given key to the state using XOR.
| Add the given key to the state using XOR. | [
"Add",
"the",
"given",
"key",
"to",
"the",
"state",
"using",
"XOR",
"."
] | def add_round_key(state, key):
for i in range(4):
for j in range(4):
state[i][j] ^= key[i][j] | [
"def",
"add_round_key",
"(",
"state",
",",
"key",
")",
":",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"for",
"j",
"in",
"range",
"(",
"4",
")",
":",
"state",
"[",
"i",
"]",
"[",
"j",
"]",
"^=",
"key",
"[",
"i",
"]",
"[",
"j",
"]"
] | Add the given key to the state using XOR. | [
"Add",
"the",
"given",
"key",
"to",
"the",
"state",
"using",
"XOR",
"."
] | [
"\"\"\"\n Add the given key to the state using XOR.\n \"\"\""
] | [
{
"param": "state",
"type": null
},
{
"param": "key",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "state",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "key",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def add_round_key(state, key):
for i in range(4):
for j in range(4):
state[i][j] ^= key[i][j] | 57 | 727 |
e18bd85473e5921ae439f6af3476d616402d6ba0 | waikato-datamining/video-frame-processor | src/vfp/_processor.py | [
"MIT"
] | Python | decode_fourcc | <not_specific> | def decode_fourcc(cc):
"""
Turns the float into a four letter codec string.
Taken from here:
https://stackoverflow.com/a/49138893/4698227
:param cc: the codec as float
:type cc: float
:return: the codec string
:rtype: str
"""
return "".join([chr((int(cc) >> 8 * i) & 0xFF) for i in range(4)]) |
Turns the float into a four letter codec string.
Taken from here:
https://stackoverflow.com/a/49138893/4698227
:param cc: the codec as float
:type cc: float
:return: the codec string
:rtype: str
| Turns the float into a four letter codec string. | [
"Turns",
"the",
"float",
"into",
"a",
"four",
"letter",
"codec",
"string",
"."
] | def decode_fourcc(cc):
return "".join([chr((int(cc) >> 8 * i) & 0xFF) for i in range(4)]) | [
"def",
"decode_fourcc",
"(",
"cc",
")",
":",
"return",
"\"\"",
".",
"join",
"(",
"[",
"chr",
"(",
"(",
"int",
"(",
"cc",
")",
">>",
"8",
"*",
"i",
")",
"&",
"0xFF",
")",
"for",
"i",
"in",
"range",
"(",
"4",
")",
"]",
")"
] | Turns the float into a four letter codec string. | [
"Turns",
"the",
"float",
"into",
"a",
"four",
"letter",
"codec",
"string",
"."
] | [
"\"\"\"\n Turns the float into a four letter codec string.\n Taken from here:\n https://stackoverflow.com/a/49138893/4698227\n :param cc: the codec as float\n :type cc: float\n :return: the codec string\n :rtype: str\n \"\"\""
] | [
{
"param": "cc",
"type": null
}
] | {
"returns": [
{
"docstring": "the codec string",
"docstring_tokens": [
"the",
"codec",
"string"
],
"type": "str"
}
],
"raises": [],
"params": [
{
"identifier": "cc",
"type": null,
"docstring": "the codec as float",
"docstring_tokens": [
"the",
"codec",
"as",
"float"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def decode_fourcc(cc):
return "".join([chr((int(cc) >> 8 * i) & 0xFF) for i in range(4)]) | 58 | 316 |
3378601f401cc65b610da88dac290768b028faa7 | mwhit74/sau | sau/sau.py | [
"MIT"
] | Python | ac1_mx | <not_specific> | def ac1_mx(w, l, x):
"""Moment at x - Beam simply supported - Uniformly dist. loads
Calculates the moment in the beam at any location, x, along the
beam due to a uniformly distributed load.
m = w*x/2*(l-x)
Args:
w (float): uniformly distributed load
l (float): length of beam between supports
E (float): modulus of elasticity
I (float): section modulus
x (float): distance along beam from left support
Returns:
m (tuple(float, str)): maximum positive moment at midspan
Notes:
1. Consistent units are the responsibility of the user.
3. For maximum positive moment use x = L/2.
"""
m = w*x/2.*(l-x)
text = (f'm = w*x/2*(l-x) \n' +
f'm = {w:.2f}*{x:.2f}/2*({l:.2f}-{x:.2f}) \n' +
f'm = {m:.2f}')
return m, text | Moment at x - Beam simply supported - Uniformly dist. loads
Calculates the moment in the beam at any location, x, along the
beam due to a uniformly distributed load.
m = w*x/2*(l-x)
Args:
w (float): uniformly distributed load
l (float): length of beam between supports
E (float): modulus of elasticity
I (float): section modulus
x (float): distance along beam from left support
Returns:
m (tuple(float, str)): maximum positive moment at midspan
Notes:
1. Consistent units are the responsibility of the user.
3. For maximum positive moment use x = L/2.
| Moment at x - Beam simply supported - Uniformly dist. loads
Calculates the moment in the beam at any location, x, along the
beam due to a uniformly distributed load.
| [
"Moment",
"at",
"x",
"-",
"Beam",
"simply",
"supported",
"-",
"Uniformly",
"dist",
".",
"loads",
"Calculates",
"the",
"moment",
"in",
"the",
"beam",
"at",
"any",
"location",
"x",
"along",
"the",
"beam",
"due",
"to",
"a",
"uniformly",
"distributed",
"load",
"."
] | def ac1_mx(w, l, x):
m = w*x/2.*(l-x)
text = (f'm = w*x/2*(l-x) \n' +
f'm = {w:.2f}*{x:.2f}/2*({l:.2f}-{x:.2f}) \n' +
f'm = {m:.2f}')
return m, text | [
"def",
"ac1_mx",
"(",
"w",
",",
"l",
",",
"x",
")",
":",
"m",
"=",
"w",
"*",
"x",
"/",
"2.",
"*",
"(",
"l",
"-",
"x",
")",
"text",
"=",
"(",
"f'm = w*x/2*(l-x) \\n'",
"+",
"f'm = {w:.2f}*{x:.2f}/2*({l:.2f}-{x:.2f}) \\n'",
"+",
"f'm = {m:.2f}'",
")",
"return",
"m",
",",
"text"
] | Moment at x - Beam simply supported - Uniformly dist. | [
"Moment",
"at",
"x",
"-",
"Beam",
"simply",
"supported",
"-",
"Uniformly",
"dist",
"."
] | [
"\"\"\"Moment at x - Beam simply supported - Uniformly dist. loads\n \n Calculates the moment in the beam at any location, x, along the\n beam due to a uniformly distributed load.\n \n m = w*x/2*(l-x)\n \n Args:\n w (float): uniformly distributed load\n \n l (float): length of beam between supports\n \n E (float): modulus of elasticity\n \n I (float): section modulus\n \n x (float): distance along beam from left support\n \n Returns:\n m (tuple(float, str)): maximum positive moment at midspan\n \n Notes:\n 1. Consistent units are the responsibility of the user.\n 3. For maximum positive moment use x = L/2.\n \"\"\""
] | [
{
"param": "w",
"type": null
},
{
"param": "l",
"type": null
},
{
"param": "x",
"type": null
}
] | {
"returns": [
{
"docstring": "m (tuple(float, str)): maximum positive moment at midspan",
"docstring_tokens": [
"m",
"(",
"tuple",
"(",
"float",
"str",
"))",
":",
"maximum",
"positive",
"moment",
"at",
"midspan"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "w",
"type": null,
"docstring": "uniformly distributed load",
"docstring_tokens": [
"uniformly",
"distributed",
"load"
],
"default": null,
"is_optional": false
},
{
"identifier": "l",
"type": null,
"docstring": "length of beam between supports",
"docstring_tokens": [
"length",
"of",
"beam",
"between",
"supports"
],
"default": null,
"is_optional": false
},
{
"identifier": "x",
"type": null,
"docstring": "distance along beam from left support",
"docstring_tokens": [
"distance",
"along",
"beam",
"from",
"left",
"support"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [
{
"identifier": "E",
"type": null,
"docstring": "modulus of elasticity",
"docstring_tokens": [
"modulus",
"of",
"elasticity"
],
"default": null,
"is_optional": false
},
{
"identifier": "I",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
}
],
"others": []
} | def ac1_mx(w, l, x):
m = w*x/2.*(l-x)
text = (f'm = w*x/2*(l-x) \n' +
f'm = {w:.2f}*{x:.2f}/2*({l:.2f}-{x:.2f}) \n' +
f'm = {m:.2f}')
return m, text | 59 | 185 |
85c81e489f3398ee5c56508f8cd0e49341a902fc | amole-arup/eng_utilities | eng_utilities/section_utilities.py | [
"MIT"
] | Python | deck_props | <not_specific> | def deck_props(DD, DR, B, TT, TB):
"""Properties of a trapezoidal deck when given deck depth (DD), rib depth (DR)
rib spacing (B), rib width at top (TT), rib width at bottom (TB)
- P is the length of the deck on the underside (topside is ignored)
- A is the cross-sectional area of concrete per rib
- D_AVE is the average depth of concrete
NB At the moment no flexural properties are calculated"""
A = DD * B + 0.5 * (TT + TB) * DR
P = B - TT + TB + ((TT - TB)**2 + 4 * DR**2)**0.5
return {'P': P, 'A': A, 'D_AVE': A / B} | Properties of a trapezoidal deck when given deck depth (DD), rib depth (DR)
rib spacing (B), rib width at top (TT), rib width at bottom (TB)
- P is the length of the deck on the underside (topside is ignored)
- A is the cross-sectional area of concrete per rib
- D_AVE is the average depth of concrete
NB At the moment no flexural properties are calculated | Properties of a trapezoidal deck when given deck depth (DD), rib depth (DR)
rib spacing (B), rib width at top (TT), rib width at bottom (TB)
P is the length of the deck on the underside (topside is ignored)
A is the cross-sectional area of concrete per rib
D_AVE is the average depth of concrete
NB At the moment no flexural properties are calculated | [
"Properties",
"of",
"a",
"trapezoidal",
"deck",
"when",
"given",
"deck",
"depth",
"(",
"DD",
")",
"rib",
"depth",
"(",
"DR",
")",
"rib",
"spacing",
"(",
"B",
")",
"rib",
"width",
"at",
"top",
"(",
"TT",
")",
"rib",
"width",
"at",
"bottom",
"(",
"TB",
")",
"P",
"is",
"the",
"length",
"of",
"the",
"deck",
"on",
"the",
"underside",
"(",
"topside",
"is",
"ignored",
")",
"A",
"is",
"the",
"cross",
"-",
"sectional",
"area",
"of",
"concrete",
"per",
"rib",
"D_AVE",
"is",
"the",
"average",
"depth",
"of",
"concrete",
"NB",
"At",
"the",
"moment",
"no",
"flexural",
"properties",
"are",
"calculated"
] | def deck_props(DD, DR, B, TT, TB):
A = DD * B + 0.5 * (TT + TB) * DR
P = B - TT + TB + ((TT - TB)**2 + 4 * DR**2)**0.5
return {'P': P, 'A': A, 'D_AVE': A / B} | [
"def",
"deck_props",
"(",
"DD",
",",
"DR",
",",
"B",
",",
"TT",
",",
"TB",
")",
":",
"A",
"=",
"DD",
"*",
"B",
"+",
"0.5",
"*",
"(",
"TT",
"+",
"TB",
")",
"*",
"DR",
"P",
"=",
"B",
"-",
"TT",
"+",
"TB",
"+",
"(",
"(",
"TT",
"-",
"TB",
")",
"**",
"2",
"+",
"4",
"*",
"DR",
"**",
"2",
")",
"**",
"0.5",
"return",
"{",
"'P'",
":",
"P",
",",
"'A'",
":",
"A",
",",
"'D_AVE'",
":",
"A",
"/",
"B",
"}"
] | Properties of a trapezoidal deck when given deck depth (DD), rib depth (DR)
rib spacing (B), rib width at top (TT), rib width at bottom (TB)
P is the length of the deck on the underside (topside is ignored)
A is the cross-sectional area of concrete per rib
D_AVE is the average depth of concrete | [
"Properties",
"of",
"a",
"trapezoidal",
"deck",
"when",
"given",
"deck",
"depth",
"(",
"DD",
")",
"rib",
"depth",
"(",
"DR",
")",
"rib",
"spacing",
"(",
"B",
")",
"rib",
"width",
"at",
"top",
"(",
"TT",
")",
"rib",
"width",
"at",
"bottom",
"(",
"TB",
")",
"P",
"is",
"the",
"length",
"of",
"the",
"deck",
"on",
"the",
"underside",
"(",
"topside",
"is",
"ignored",
")",
"A",
"is",
"the",
"cross",
"-",
"sectional",
"area",
"of",
"concrete",
"per",
"rib",
"D_AVE",
"is",
"the",
"average",
"depth",
"of",
"concrete"
] | [
"\"\"\"Properties of a trapezoidal deck when given deck depth (DD), rib depth (DR)\n rib spacing (B), rib width at top (TT), rib width at bottom (TB)\n - P is the length of the deck on the underside (topside is ignored)\n - A is the cross-sectional area of concrete per rib\n - D_AVE is the average depth of concrete\n \n NB At the moment no flexural properties are calculated\"\"\""
] | [
{
"param": "DD",
"type": null
},
{
"param": "DR",
"type": null
},
{
"param": "B",
"type": null
},
{
"param": "TT",
"type": null
},
{
"param": "TB",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "DD",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "DR",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "B",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "TT",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "TB",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def deck_props(DD, DR, B, TT, TB):
A = DD * B + 0.5 * (TT + TB) * DR
P = B - TT + TB + ((TT - TB)**2 + 4 * DR**2)**0.5
return {'P': P, 'A': A, 'D_AVE': A / B} | 60 | 30 |
4158684ebdf6a0f7e38126105f4c44de5ba48d04 | turnoutnow/game-analytics-pipeline | source/demo/publish_data.py | [
"MIT-0"
] | Python | send_record_batch | null | def send_record_batch(kinesis_client, stream_name, raw_records):
"""Send a batch of records to Amazon Kinesis."""
# Translate input records into the format needed by the boto3 SDK
formatted_records = []
for rec in raw_records:
formatted_records.append({'PartitionKey': rec['event']['event_id'], 'Data': json.dumps(rec)})
kinesis_client.put_records(StreamName=stream_name, Records=formatted_records)
print('Sent %d records to stream %s.' % (len(formatted_records), stream_name)) | Send a batch of records to Amazon Kinesis. | Send a batch of records to Amazon Kinesis. | [
"Send",
"a",
"batch",
"of",
"records",
"to",
"Amazon",
"Kinesis",
"."
] | def send_record_batch(kinesis_client, stream_name, raw_records):
formatted_records = []
for rec in raw_records:
formatted_records.append({'PartitionKey': rec['event']['event_id'], 'Data': json.dumps(rec)})
kinesis_client.put_records(StreamName=stream_name, Records=formatted_records)
print('Sent %d records to stream %s.' % (len(formatted_records), stream_name)) | [
"def",
"send_record_batch",
"(",
"kinesis_client",
",",
"stream_name",
",",
"raw_records",
")",
":",
"formatted_records",
"=",
"[",
"]",
"for",
"rec",
"in",
"raw_records",
":",
"formatted_records",
".",
"append",
"(",
"{",
"'PartitionKey'",
":",
"rec",
"[",
"'event'",
"]",
"[",
"'event_id'",
"]",
",",
"'Data'",
":",
"json",
".",
"dumps",
"(",
"rec",
")",
"}",
")",
"kinesis_client",
".",
"put_records",
"(",
"StreamName",
"=",
"stream_name",
",",
"Records",
"=",
"formatted_records",
")",
"print",
"(",
"'Sent %d records to stream %s.'",
"%",
"(",
"len",
"(",
"formatted_records",
")",
",",
"stream_name",
")",
")"
] | Send a batch of records to Amazon Kinesis. | [
"Send",
"a",
"batch",
"of",
"records",
"to",
"Amazon",
"Kinesis",
"."
] | [
"\"\"\"Send a batch of records to Amazon Kinesis.\"\"\"",
"# Translate input records into the format needed by the boto3 SDK"
] | [
{
"param": "kinesis_client",
"type": null
},
{
"param": "stream_name",
"type": null
},
{
"param": "raw_records",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "kinesis_client",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "stream_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "raw_records",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import json
def send_record_batch(kinesis_client, stream_name, raw_records):
formatted_records = []
for rec in raw_records:
formatted_records.append({'PartitionKey': rec['event']['event_id'], 'Data': json.dumps(rec)})
kinesis_client.put_records(StreamName=stream_name, Records=formatted_records)
print('Sent %d records to stream %s.' % (len(formatted_records), stream_name)) | 61 | 111 |
21cda7ead5143cd6dba8fbe256ccb44e0a991113 | baajur/Anakin | tools/external_converter_v2/parser/tensorflow/run_pb.py | [
"Apache-2.0"
] | Python | convert_name_tf2ak | <not_specific> | def convert_name_tf2ak(tf_name, perfix='record_'):
'''
conver tf name to ak name
:param tf_name:
:param perfix:
:return:
'''
ak_name = tf_name[:]
for index, x in enumerate(tf_name):
if x == '/':
ak_name = ak_name[:index] + '_' + ak_name[index + 1:]
return perfix + ak_name |
conver tf name to ak name
:param tf_name:
:param perfix:
:return:
| conver tf name to ak name | [
"conver",
"tf",
"name",
"to",
"ak",
"name"
] | def convert_name_tf2ak(tf_name, perfix='record_'):
ak_name = tf_name[:]
for index, x in enumerate(tf_name):
if x == '/':
ak_name = ak_name[:index] + '_' + ak_name[index + 1:]
return perfix + ak_name | [
"def",
"convert_name_tf2ak",
"(",
"tf_name",
",",
"perfix",
"=",
"'record_'",
")",
":",
"ak_name",
"=",
"tf_name",
"[",
":",
"]",
"for",
"index",
",",
"x",
"in",
"enumerate",
"(",
"tf_name",
")",
":",
"if",
"x",
"==",
"'/'",
":",
"ak_name",
"=",
"ak_name",
"[",
":",
"index",
"]",
"+",
"'_'",
"+",
"ak_name",
"[",
"index",
"+",
"1",
":",
"]",
"return",
"perfix",
"+",
"ak_name"
] | conver tf name to ak name | [
"conver",
"tf",
"name",
"to",
"ak",
"name"
] | [
"'''\n conver tf name to ak name\n :param tf_name:\n :param perfix:\n :return:\n '''"
] | [
{
"param": "tf_name",
"type": null
},
{
"param": "perfix",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "tf_name",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "perfix",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def convert_name_tf2ak(tf_name, perfix='record_'):
ak_name = tf_name[:]
for index, x in enumerate(tf_name):
if x == '/':
ak_name = ak_name[:index] + '_' + ak_name[index + 1:]
return perfix + ak_name | 62 | 80 |
fde94ebd28e106560e867cbb11453ca9930c778e | philosofool/phi_baseball | stats.py | [
"MIT"
] | Python | from_rate | <not_specific> | def from_rate(rate, denominator, to_int=False):
'''
Returns a counting-type stat from a rate and a denominator.
Note: this is essentially the product of the two arguments.
You might be wondering: so, why? You certainly don't have to! It's mostly for coherence
and the idea that this is "all you need for your baseball maths" in a single
place. Note that get_rate (the complement of this function) allows us to decide whether to
allow infitie values instead of handling a division by zero error. When coupled with pandas,
this gives us a good way impute stats via df.replace(inf,df['ERA'].mean()) for example.
'''
out = rate * denominator
if to_int:
out = int(round(out))
return out |
Returns a counting-type stat from a rate and a denominator.
Note: this is essentially the product of the two arguments.
You might be wondering: so, why? You certainly don't have to! It's mostly for coherence
and the idea that this is "all you need for your baseball maths" in a single
place. Note that get_rate (the complement of this function) allows us to decide whether to
allow infitie values instead of handling a division by zero error. When coupled with pandas,
this gives us a good way impute stats via df.replace(inf,df['ERA'].mean()) for example.
| Returns a counting-type stat from a rate and a denominator.
Note: this is essentially the product of the two arguments.
You might be wondering: so, why. You certainly don't have to. It's mostly for coherence
and the idea that this is "all you need for your baseball maths" in a single
place. Note that get_rate (the complement of this function) allows us to decide whether to
allow infitie values instead of handling a division by zero error. When coupled with pandas,
this gives us a good way impute stats via df.replace(inf,df['ERA'].mean()) for example. | [
"Returns",
"a",
"counting",
"-",
"type",
"stat",
"from",
"a",
"rate",
"and",
"a",
"denominator",
".",
"Note",
":",
"this",
"is",
"essentially",
"the",
"product",
"of",
"the",
"two",
"arguments",
".",
"You",
"might",
"be",
"wondering",
":",
"so",
"why",
".",
"You",
"certainly",
"don",
"'",
"t",
"have",
"to",
".",
"It",
"'",
"s",
"mostly",
"for",
"coherence",
"and",
"the",
"idea",
"that",
"this",
"is",
"\"",
"all",
"you",
"need",
"for",
"your",
"baseball",
"maths",
"\"",
"in",
"a",
"single",
"place",
".",
"Note",
"that",
"get_rate",
"(",
"the",
"complement",
"of",
"this",
"function",
")",
"allows",
"us",
"to",
"decide",
"whether",
"to",
"allow",
"infitie",
"values",
"instead",
"of",
"handling",
"a",
"division",
"by",
"zero",
"error",
".",
"When",
"coupled",
"with",
"pandas",
"this",
"gives",
"us",
"a",
"good",
"way",
"impute",
"stats",
"via",
"df",
".",
"replace",
"(",
"inf",
"df",
"[",
"'",
"ERA",
"'",
"]",
".",
"mean",
"()",
")",
"for",
"example",
"."
] | def from_rate(rate, denominator, to_int=False):
out = rate * denominator
if to_int:
out = int(round(out))
return out | [
"def",
"from_rate",
"(",
"rate",
",",
"denominator",
",",
"to_int",
"=",
"False",
")",
":",
"out",
"=",
"rate",
"*",
"denominator",
"if",
"to_int",
":",
"out",
"=",
"int",
"(",
"round",
"(",
"out",
")",
")",
"return",
"out"
] | Returns a counting-type stat from a rate and a denominator. | [
"Returns",
"a",
"counting",
"-",
"type",
"stat",
"from",
"a",
"rate",
"and",
"a",
"denominator",
"."
] | [
"'''\n Returns a counting-type stat from a rate and a denominator.\n \n Note: this is essentially the product of the two arguments.\n\n You might be wondering: so, why? You certainly don't have to! It's mostly for coherence\n and the idea that this is \"all you need for your baseball maths\" in a single\n place. Note that get_rate (the complement of this function) allows us to decide whether to \n allow infitie values instead of handling a division by zero error. When coupled with pandas,\n this gives us a good way impute stats via df.replace(inf,df['ERA'].mean()) for example.\n '''"
] | [
{
"param": "rate",
"type": null
},
{
"param": "denominator",
"type": null
},
{
"param": "to_int",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "rate",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "denominator",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "to_int",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def from_rate(rate, denominator, to_int=False):
out = rate * denominator
if to_int:
out = int(round(out))
return out | 63 | 716 |
1ffae5cebb1ccc9bb42ff4492d22793eeb7fe105 | lbouma/Cyclopath | doc/landonb/geometry_useful.py | [
"Apache-2.0"
] | Python | geoPointInsideBox | <not_specific> | def geoPointInsideBox(p, edges):
"""
returns True only if p lies inside or on the sides of box
"""
y,x = p
(y0,x0),(y1,x1) = edges
if y0>y1:
y0,y1 = y1,y0
if x0>x1:
x0,x1 = x1,x0
return x0<=x<=x1 and y0<=y<=y1 |
returns True only if p lies inside or on the sides of box
| returns True only if p lies inside or on the sides of box | [
"returns",
"True",
"only",
"if",
"p",
"lies",
"inside",
"or",
"on",
"the",
"sides",
"of",
"box"
] | def geoPointInsideBox(p, edges):
y,x = p
(y0,x0),(y1,x1) = edges
if y0>y1:
y0,y1 = y1,y0
if x0>x1:
x0,x1 = x1,x0
return x0<=x<=x1 and y0<=y<=y1 | [
"def",
"geoPointInsideBox",
"(",
"p",
",",
"edges",
")",
":",
"y",
",",
"x",
"=",
"p",
"(",
"y0",
",",
"x0",
")",
",",
"(",
"y1",
",",
"x1",
")",
"=",
"edges",
"if",
"y0",
">",
"y1",
":",
"y0",
",",
"y1",
"=",
"y1",
",",
"y0",
"if",
"x0",
">",
"x1",
":",
"x0",
",",
"x1",
"=",
"x1",
",",
"x0",
"return",
"x0",
"<=",
"x",
"<=",
"x1",
"and",
"y0",
"<=",
"y",
"<=",
"y1"
] | returns True only if p lies inside or on the sides of box | [
"returns",
"True",
"only",
"if",
"p",
"lies",
"inside",
"or",
"on",
"the",
"sides",
"of",
"box"
] | [
"\"\"\"\n returns True only if p lies inside or on the sides of box\n \"\"\""
] | [
{
"param": "p",
"type": null
},
{
"param": "edges",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "p",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "edges",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def geoPointInsideBox(p, edges):
y,x = p
(y0,x0),(y1,x1) = edges
if y0>y1:
y0,y1 = y1,y0
if x0>x1:
x0,x1 = x1,x0
return x0<=x<=x1 and y0<=y<=y1 | 64 | 149 |
bb3705c4383f3156e1e20fb980b44c1bcf644701 | cbrew/chartparse | python/chart/chart.py | [
"Apache-2.0"
] | Python | edge_summary | <not_specific> | def edge_summary(v):
"""
Summarize the contents of the chart.
>>> v = parse(["the","pigeons","suffer"],return_chart=True)
['the', 'pigeons', 'suffer']
Parse 1:
S
Np
det the
Nn
n pigeons
Vp
v suffer
1 parses
>>> edge_summary(v)
{'partials': 31, 'completes': 12}
"""
ps = set().union(*v.partials)
cs = set().union(*v.completes)
ps_no_pred = {p for p in ps if p not in v.prev}
cs_no_pred = {p for p in cs if p not in v.prev}
assert len(cs_no_pred) == 0
assert len(ps_no_pred) == 0
return dict(partials= len(ps),completes=len(cs)) |
Summarize the contents of the chart.
>>> v = parse(["the","pigeons","suffer"],return_chart=True)
['the', 'pigeons', 'suffer']
Parse 1:
S
Np
det the
Nn
n pigeons
Vp
v suffer
1 parses
>>> edge_summary(v)
{'partials': 31, 'completes': 12}
| Summarize the contents of the chart. | [
"Summarize",
"the",
"contents",
"of",
"the",
"chart",
"."
] | def edge_summary(v):
ps = set().union(*v.partials)
cs = set().union(*v.completes)
ps_no_pred = {p for p in ps if p not in v.prev}
cs_no_pred = {p for p in cs if p not in v.prev}
assert len(cs_no_pred) == 0
assert len(ps_no_pred) == 0
return dict(partials= len(ps),completes=len(cs)) | [
"def",
"edge_summary",
"(",
"v",
")",
":",
"ps",
"=",
"set",
"(",
")",
".",
"union",
"(",
"*",
"v",
".",
"partials",
")",
"cs",
"=",
"set",
"(",
")",
".",
"union",
"(",
"*",
"v",
".",
"completes",
")",
"ps_no_pred",
"=",
"{",
"p",
"for",
"p",
"in",
"ps",
"if",
"p",
"not",
"in",
"v",
".",
"prev",
"}",
"cs_no_pred",
"=",
"{",
"p",
"for",
"p",
"in",
"cs",
"if",
"p",
"not",
"in",
"v",
".",
"prev",
"}",
"assert",
"len",
"(",
"cs_no_pred",
")",
"==",
"0",
"assert",
"len",
"(",
"ps_no_pred",
")",
"==",
"0",
"return",
"dict",
"(",
"partials",
"=",
"len",
"(",
"ps",
")",
",",
"completes",
"=",
"len",
"(",
"cs",
")",
")"
] | Summarize the contents of the chart. | [
"Summarize",
"the",
"contents",
"of",
"the",
"chart",
"."
] | [
"\"\"\"\n Summarize the contents of the chart. \n >>> v = parse([\"the\",\"pigeons\",\"suffer\"],return_chart=True)\n ['the', 'pigeons', 'suffer']\n Parse 1:\n S\n Np\n det the\n Nn\n n pigeons\n Vp\n v suffer\n 1 parses\n >>> edge_summary(v)\n {'partials': 31, 'completes': 12}\n \"\"\""
] | [
{
"param": "v",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "v",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def edge_summary(v):
ps = set().union(*v.partials)
cs = set().union(*v.completes)
ps_no_pred = {p for p in ps if p not in v.prev}
cs_no_pred = {p for p in cs if p not in v.prev}
assert len(cs_no_pred) == 0
assert len(ps_no_pred) == 0
return dict(partials= len(ps),completes=len(cs)) | 65 | 520 |
6777462f81774b840637af9418604b1c28b0243d | xavipor/myAdversarial | game_agent.py | [
"MIT"
] | Python | custom_score_3 | <not_specific> | def custom_score_3(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# TODO: finish this function!
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
# Try to disturb opponent
tunning_1=4
nmovements=2
tunning_2=4
center=[float(game.height/2),float(game.width/2)]
movements=game.move_count
loc=game.get_player_location(player)
total=float(game.height*game.width)
percentage=100*(total-float(len(game.get_blank_spaces())))/total
my_moves = len(game.get_legal_moves(player))
enemy_moves = len(game.get_legal_moves(game.get_opponent(player)))
distance=float(abs(loc[0]-center[0])+abs(loc[1]-center[1]))
overlap = set(game.get_legal_moves(player)) & set(game.get_legal_moves(game.get_opponent(player)))
if movements <=nmovements :
if distance==0.0:
distance=1
return 100.0/ distance
else:
return float(my_moves - tunning_1 * enemy_moves) + 50*len(overlap) | Calculate the heuristic value of a game state from the point of view
of the given player.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
| Calculate the heuristic value of a game state from the point of view
of the given player.
this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game .
player : object
A player instance in the current game
Returns
float
The heuristic value of the current game state to the specified player. | [
"Calculate",
"the",
"heuristic",
"value",
"of",
"a",
"game",
"state",
"from",
"the",
"point",
"of",
"view",
"of",
"the",
"given",
"player",
".",
"this",
"function",
"should",
"be",
"called",
"from",
"within",
"a",
"Player",
"instance",
"as",
"`",
"self",
".",
"score",
"()",
"`",
"--",
"you",
"should",
"not",
"need",
"to",
"call",
"this",
"function",
"directly",
".",
"Parameters",
"game",
":",
"`",
"isolation",
".",
"Board",
"`",
"An",
"instance",
"of",
"`",
"isolation",
".",
"Board",
"`",
"encoding",
"the",
"current",
"state",
"of",
"the",
"game",
".",
"player",
":",
"object",
"A",
"player",
"instance",
"in",
"the",
"current",
"game",
"Returns",
"float",
"The",
"heuristic",
"value",
"of",
"the",
"current",
"game",
"state",
"to",
"the",
"specified",
"player",
"."
] | def custom_score_3(game, player):
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
tunning_1=4
nmovements=2
tunning_2=4
center=[float(game.height/2),float(game.width/2)]
movements=game.move_count
loc=game.get_player_location(player)
total=float(game.height*game.width)
percentage=100*(total-float(len(game.get_blank_spaces())))/total
my_moves = len(game.get_legal_moves(player))
enemy_moves = len(game.get_legal_moves(game.get_opponent(player)))
distance=float(abs(loc[0]-center[0])+abs(loc[1]-center[1]))
overlap = set(game.get_legal_moves(player)) & set(game.get_legal_moves(game.get_opponent(player)))
if movements <=nmovements :
if distance==0.0:
distance=1
return 100.0/ distance
else:
return float(my_moves - tunning_1 * enemy_moves) + 50*len(overlap) | [
"def",
"custom_score_3",
"(",
"game",
",",
"player",
")",
":",
"if",
"game",
".",
"is_loser",
"(",
"player",
")",
":",
"return",
"float",
"(",
"\"-inf\"",
")",
"if",
"game",
".",
"is_winner",
"(",
"player",
")",
":",
"return",
"float",
"(",
"\"inf\"",
")",
"tunning_1",
"=",
"4",
"nmovements",
"=",
"2",
"tunning_2",
"=",
"4",
"center",
"=",
"[",
"float",
"(",
"game",
".",
"height",
"/",
"2",
")",
",",
"float",
"(",
"game",
".",
"width",
"/",
"2",
")",
"]",
"movements",
"=",
"game",
".",
"move_count",
"loc",
"=",
"game",
".",
"get_player_location",
"(",
"player",
")",
"total",
"=",
"float",
"(",
"game",
".",
"height",
"*",
"game",
".",
"width",
")",
"percentage",
"=",
"100",
"*",
"(",
"total",
"-",
"float",
"(",
"len",
"(",
"game",
".",
"get_blank_spaces",
"(",
")",
")",
")",
")",
"/",
"total",
"my_moves",
"=",
"len",
"(",
"game",
".",
"get_legal_moves",
"(",
"player",
")",
")",
"enemy_moves",
"=",
"len",
"(",
"game",
".",
"get_legal_moves",
"(",
"game",
".",
"get_opponent",
"(",
"player",
")",
")",
")",
"distance",
"=",
"float",
"(",
"abs",
"(",
"loc",
"[",
"0",
"]",
"-",
"center",
"[",
"0",
"]",
")",
"+",
"abs",
"(",
"loc",
"[",
"1",
"]",
"-",
"center",
"[",
"1",
"]",
")",
")",
"overlap",
"=",
"set",
"(",
"game",
".",
"get_legal_moves",
"(",
"player",
")",
")",
"&",
"set",
"(",
"game",
".",
"get_legal_moves",
"(",
"game",
".",
"get_opponent",
"(",
"player",
")",
")",
")",
"if",
"movements",
"<=",
"nmovements",
":",
"if",
"distance",
"==",
"0.0",
":",
"distance",
"=",
"1",
"return",
"100.0",
"/",
"distance",
"else",
":",
"return",
"float",
"(",
"my_moves",
"-",
"tunning_1",
"*",
"enemy_moves",
")",
"+",
"50",
"*",
"len",
"(",
"overlap",
")"
] | Calculate the heuristic value of a game state from the point of view
of the given player. | [
"Calculate",
"the",
"heuristic",
"value",
"of",
"a",
"game",
"state",
"from",
"the",
"point",
"of",
"view",
"of",
"the",
"given",
"player",
"."
] | [
"\"\"\"Calculate the heuristic value of a game state from the point of view\n of the given player.\n\n Note: this function should be called from within a Player instance as\n `self.score()` -- you should not need to call this function directly.\n\n Parameters\n ----------\n game : `isolation.Board`\n An instance of `isolation.Board` encoding the current state of the\n game (e.g., player locations and blocked cells).\n\n player : object\n A player instance in the current game (i.e., an object corresponding to\n one of the player objects `game.__player_1__` or `game.__player_2__`.)\n\n Returns\n -------\n float\n The heuristic value of the current game state to the specified player.\n \"\"\"",
"# TODO: finish this function!",
"# Try to disturb opponent"
] | [
{
"param": "game",
"type": null
},
{
"param": "player",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "game",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "player",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def custom_score_3(game, player):
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
tunning_1=4
nmovements=2
tunning_2=4
center=[float(game.height/2),float(game.width/2)]
movements=game.move_count
loc=game.get_player_location(player)
total=float(game.height*game.width)
percentage=100*(total-float(len(game.get_blank_spaces())))/total
my_moves = len(game.get_legal_moves(player))
enemy_moves = len(game.get_legal_moves(game.get_opponent(player)))
distance=float(abs(loc[0]-center[0])+abs(loc[1]-center[1]))
overlap = set(game.get_legal_moves(player)) & set(game.get_legal_moves(game.get_opponent(player)))
if movements <=nmovements :
if distance==0.0:
distance=1
return 100.0/ distance
else:
return float(my_moves - tunning_1 * enemy_moves) + 50*len(overlap) | 66 | 447 |
ad5656be7921fe524c0908b6a9aaf3c8f0bab780 | Laufire/laufire | dev/laufire/filesys.py | [
"MIT"
] | Python | iterateContent | null | def iterateContent(filePath, width=4096):
r"""Reads the given file as small chunks. This could be used to read large files without buffer overruns.
"""
with open(filePath, 'rb') as file:
for chunk in iter(lambda: file.read(width), b''):
yield chunk | r"""Reads the given file as small chunks. This could be used to read large files without buffer overruns.
| r"""Reads the given file as small chunks. This could be used to read large files without buffer overruns. | [
"r",
"\"",
"\"",
"\"",
"Reads",
"the",
"given",
"file",
"as",
"small",
"chunks",
".",
"This",
"could",
"be",
"used",
"to",
"read",
"large",
"files",
"without",
"buffer",
"overruns",
"."
] | def iterateContent(filePath, width=4096):
with open(filePath, 'rb') as file:
for chunk in iter(lambda: file.read(width), b''):
yield chunk | [
"def",
"iterateContent",
"(",
"filePath",
",",
"width",
"=",
"4096",
")",
":",
"with",
"open",
"(",
"filePath",
",",
"'rb'",
")",
"as",
"file",
":",
"for",
"chunk",
"in",
"iter",
"(",
"lambda",
":",
"file",
".",
"read",
"(",
"width",
")",
",",
"b''",
")",
":",
"yield",
"chunk"
] | r"""Reads the given file as small chunks. | [
"r",
"\"",
"\"",
"\"",
"Reads",
"the",
"given",
"file",
"as",
"small",
"chunks",
"."
] | [
"r\"\"\"Reads the given file as small chunks. This could be used to read large files without buffer overruns.\n\t\"\"\""
] | [
{
"param": "filePath",
"type": null
},
{
"param": "width",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "filePath",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "width",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def iterateContent(filePath, width=4096):
with open(filePath, 'rb') as file:
for chunk in iter(lambda: file.read(width), b''):
yield chunk | 67 | 522 |
c7213c53d170ac541cda11324c75c2eeb51403fb | admariner/ecommercetools | ecommercetools/utilities/metrics.py | [
"MIT"
] | Python | revenue_per_unit | <not_specific> | def revenue_per_unit(total_revenue, total_units):
"""Return the total revenue per unit for the period.
Args:
total_revenue (float): Total revenue generated during the period.
total_units (int): Total units sold during the period.
Returns:
Total revenue per unit during the period.
"""
return total_revenue / total_units | Return the total revenue per unit for the period.
Args:
total_revenue (float): Total revenue generated during the period.
total_units (int): Total units sold during the period.
Returns:
Total revenue per unit during the period.
| Return the total revenue per unit for the period. | [
"Return",
"the",
"total",
"revenue",
"per",
"unit",
"for",
"the",
"period",
"."
] | def revenue_per_unit(total_revenue, total_units):
return total_revenue / total_units | [
"def",
"revenue_per_unit",
"(",
"total_revenue",
",",
"total_units",
")",
":",
"return",
"total_revenue",
"/",
"total_units"
] | Return the total revenue per unit for the period. | [
"Return",
"the",
"total",
"revenue",
"per",
"unit",
"for",
"the",
"period",
"."
] | [
"\"\"\"Return the total revenue per unit for the period.\n\n Args:\n total_revenue (float): Total revenue generated during the period.\n total_units (int): Total units sold during the period.\n\n Returns:\n Total revenue per unit during the period.\n \"\"\""
] | [
{
"param": "total_revenue",
"type": null
},
{
"param": "total_units",
"type": null
}
] | {
"returns": [
{
"docstring": "Total revenue per unit during the period.",
"docstring_tokens": [
"Total",
"revenue",
"per",
"unit",
"during",
"the",
"period",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "total_revenue",
"type": null,
"docstring": "Total revenue generated during the period.",
"docstring_tokens": [
"Total",
"revenue",
"generated",
"during",
"the",
"period",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "total_units",
"type": null,
"docstring": "Total units sold during the period.",
"docstring_tokens": [
"Total",
"units",
"sold",
"during",
"the",
"period",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def revenue_per_unit(total_revenue, total_units):
return total_revenue / total_units | 68 | 306 |
37516c09b27b0ff3e34b22cbd2e2a1eb1e282a19 | aws-samples/research-pacs-on-aws | python/de-identifier/research_pacs/de_identifier/dicom_tag_path_pattern.py | [
"MIT-0"
] | Python | _get_elem_tag_hexa | <not_specific> | def _get_elem_tag_hexa(elem):
"""
Returns a 8-hexadecimal digit corresponding to the data element tag.
Args:
elem: pydicom Data Element
"""
return hex(elem.tag.group)[2:].upper().zfill(4) + hex(elem.tag.elem)[2:].upper().zfill(4) |
Returns a 8-hexadecimal digit corresponding to the data element tag.
Args:
elem: pydicom Data Element
| Returns a 8-hexadecimal digit corresponding to the data element tag. | [
"Returns",
"a",
"8",
"-",
"hexadecimal",
"digit",
"corresponding",
"to",
"the",
"data",
"element",
"tag",
"."
] | def _get_elem_tag_hexa(elem):
return hex(elem.tag.group)[2:].upper().zfill(4) + hex(elem.tag.elem)[2:].upper().zfill(4) | [
"def",
"_get_elem_tag_hexa",
"(",
"elem",
")",
":",
"return",
"hex",
"(",
"elem",
".",
"tag",
".",
"group",
")",
"[",
"2",
":",
"]",
".",
"upper",
"(",
")",
".",
"zfill",
"(",
"4",
")",
"+",
"hex",
"(",
"elem",
".",
"tag",
".",
"elem",
")",
"[",
"2",
":",
"]",
".",
"upper",
"(",
")",
".",
"zfill",
"(",
"4",
")"
] | Returns a 8-hexadecimal digit corresponding to the data element tag. | [
"Returns",
"a",
"8",
"-",
"hexadecimal",
"digit",
"corresponding",
"to",
"the",
"data",
"element",
"tag",
"."
] | [
"\"\"\"\n Returns a 8-hexadecimal digit corresponding to the data element tag.\n \n Args:\n elem: pydicom Data Element\n \n \"\"\""
] | [
{
"param": "elem",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "elem",
"type": null,
"docstring": "pydicom Data Element",
"docstring_tokens": [
"pydicom",
"Data",
"Element"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _get_elem_tag_hexa(elem):
return hex(elem.tag.group)[2:].upper().zfill(4) + hex(elem.tag.elem)[2:].upper().zfill(4) | 69 | 799 |
ccb28a6c89695e1895b09241f2c913633c3636f4 | ngageoint/voxel-globe | voxel_globe/visualsfm/tools.py | [
"MIT"
] | Python | writeGcpFile | null | def writeGcpFile(inputs, outputGps):
''' inputs - List of objcets, with .filename and .xyz fields, in degree/meters
outputGps - output gps filename '''
with open(outputGps, 'w') as fid:
for input in inputs:
fid.write(input['filename'] +
(' %0.12g'*3) % (input['xyz'][0], input['xyz'][1], input['xyz'][2]) +'\n') | inputs - List of objcets, with .filename and .xyz fields, in degree/meters
outputGps - output gps filename | List of objcets, with .filename and .xyz fields, in degree/meters
outputGps - output gps filename | [
"List",
"of",
"objcets",
"with",
".",
"filename",
"and",
".",
"xyz",
"fields",
"in",
"degree",
"/",
"meters",
"outputGps",
"-",
"output",
"gps",
"filename"
] | def writeGcpFile(inputs, outputGps):
with open(outputGps, 'w') as fid:
for input in inputs:
fid.write(input['filename'] +
(' %0.12g'*3) % (input['xyz'][0], input['xyz'][1], input['xyz'][2]) +'\n') | [
"def",
"writeGcpFile",
"(",
"inputs",
",",
"outputGps",
")",
":",
"with",
"open",
"(",
"outputGps",
",",
"'w'",
")",
"as",
"fid",
":",
"for",
"input",
"in",
"inputs",
":",
"fid",
".",
"write",
"(",
"input",
"[",
"'filename'",
"]",
"+",
"(",
"' %0.12g'",
"*",
"3",
")",
"%",
"(",
"input",
"[",
"'xyz'",
"]",
"[",
"0",
"]",
",",
"input",
"[",
"'xyz'",
"]",
"[",
"1",
"]",
",",
"input",
"[",
"'xyz'",
"]",
"[",
"2",
"]",
")",
"+",
"'\\n'",
")"
] | inputs - List of objcets, with .filename and .xyz fields, in degree/meters
outputGps - output gps filename | [
"inputs",
"-",
"List",
"of",
"objcets",
"with",
".",
"filename",
"and",
".",
"xyz",
"fields",
"in",
"degree",
"/",
"meters",
"outputGps",
"-",
"output",
"gps",
"filename"
] | [
"''' inputs - List of objcets, with .filename and .xyz fields, in degree/meters\n outputGps - output gps filename '''"
] | [
{
"param": "inputs",
"type": null
},
{
"param": "outputGps",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "inputs",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "outputGps",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def writeGcpFile(inputs, outputGps):
with open(outputGps, 'w') as fid:
for input in inputs:
fid.write(input['filename'] +
(' %0.12g'*3) % (input['xyz'][0], input['xyz'][1], input['xyz'][2]) +'\n') | 70 | 724 |
b24801507e94d1354b33c33a0e640be2eeb06113 | swipswaps/juriscraper | juriscraper/pacer/utils.py | [
"BSD-2-Clause"
] | Python | reverse_goDLS_function | <not_specific> | def reverse_goDLS_function(s):
"""Extract the arguments from the goDLS JavaScript function.
In: goDLS('/doc1/01712427473','56121','69','','','1','','');return(false);
Out: {
'form_post_url': '/doc1/01712427473',
'caseid': '56121',
'de_seq_num': '69',
'got_receipt': '',
'pdf_header': '',
'pdf_toggle_possible': '1',
'magic_num': '',
'hdr': '',
}
The key names correspond to the form field names in the JavaScript on PACER,
but we don't actually know what each of these values does. Our best
speculation is:
- form_post_url: Where the form is posted to. The HTML 'action' attribute.
- caseid: The internal PACER ID for the case.
- de_seq_num: Unclear. This seems to be the internal ID for the document,
but this field can be omitted without any known issues.
- got_receipt: If set to '1', this will bypass the receipt page and
immediately direct you to the page where the PDF is embedded in an
iframe.
- pdf_header: Can be either 1 or 2. 1: Show the header. 2: No header.
- pdf_toggle_possible: This seems to always be 1. Could be that some courts
do not allow the header to be turned off, but we haven't discoered that
yet.
- magic_num: This is used for the "One free look" downloads.
- hdr: Unclear what HDR stands for but on items that have attachments,
passing this parameter bypasses the download attachment screen and takes
you directly to the PDF that you're trying to download. For an example,
see document 108 from 1:12-cv-00102 in tnmd, which is a free opinion that
has an attachment. Note that the eighth parameter was added some time
after 2010. Dockets older than that date only have seven responses.
"""
args = re.findall("\'(.*?)\'", s)
parts = {
'form_post_url': args[0],
'caseid': args[1],
'de_seq_num': args[2],
'got_receipt': args[3],
'pdf_header': args[4],
'pdf_toggle_possible': args[5],
'magic_num': args[6],
}
try:
parts['hdr'] = args[7]
except IndexError:
# At some point dockets added this eighth parameter. Older ones lack it
parts['hdr'] = None
return parts | Extract the arguments from the goDLS JavaScript function.
In: goDLS('/doc1/01712427473','56121','69','','','1','','');return(false);
Out: {
'form_post_url': '/doc1/01712427473',
'caseid': '56121',
'de_seq_num': '69',
'got_receipt': '',
'pdf_header': '',
'pdf_toggle_possible': '1',
'magic_num': '',
'hdr': '',
}
The key names correspond to the form field names in the JavaScript on PACER,
but we don't actually know what each of these values does. Our best
speculation is:
- form_post_url: Where the form is posted to. The HTML 'action' attribute.
- caseid: The internal PACER ID for the case.
- de_seq_num: Unclear. This seems to be the internal ID for the document,
but this field can be omitted without any known issues.
- got_receipt: If set to '1', this will bypass the receipt page and
immediately direct you to the page where the PDF is embedded in an
iframe.
- pdf_header: Can be either 1 or 2. 1: Show the header. 2: No header.
- pdf_toggle_possible: This seems to always be 1. Could be that some courts
do not allow the header to be turned off, but we haven't discoered that
yet.
- magic_num: This is used for the "One free look" downloads.
- hdr: Unclear what HDR stands for but on items that have attachments,
passing this parameter bypasses the download attachment screen and takes
you directly to the PDF that you're trying to download. For an example,
see document 108 from 1:12-cv-00102 in tnmd, which is a free opinion that
has an attachment. Note that the eighth parameter was added some time
after 2010. Dockets older than that date only have seven responses.
| Extract the arguments from the goDLS JavaScript function.
The key names correspond to the form field names in the JavaScript on PACER,
but we don't actually know what each of these values does. Our best
speculation is.
| [
"Extract",
"the",
"arguments",
"from",
"the",
"goDLS",
"JavaScript",
"function",
".",
"The",
"key",
"names",
"correspond",
"to",
"the",
"form",
"field",
"names",
"in",
"the",
"JavaScript",
"on",
"PACER",
"but",
"we",
"don",
"'",
"t",
"actually",
"know",
"what",
"each",
"of",
"these",
"values",
"does",
".",
"Our",
"best",
"speculation",
"is",
"."
] | def reverse_goDLS_function(s):
args = re.findall("\'(.*?)\'", s)
parts = {
'form_post_url': args[0],
'caseid': args[1],
'de_seq_num': args[2],
'got_receipt': args[3],
'pdf_header': args[4],
'pdf_toggle_possible': args[5],
'magic_num': args[6],
}
try:
parts['hdr'] = args[7]
except IndexError:
parts['hdr'] = None
return parts | [
"def",
"reverse_goDLS_function",
"(",
"s",
")",
":",
"args",
"=",
"re",
".",
"findall",
"(",
"\"\\'(.*?)\\'\"",
",",
"s",
")",
"parts",
"=",
"{",
"'form_post_url'",
":",
"args",
"[",
"0",
"]",
",",
"'caseid'",
":",
"args",
"[",
"1",
"]",
",",
"'de_seq_num'",
":",
"args",
"[",
"2",
"]",
",",
"'got_receipt'",
":",
"args",
"[",
"3",
"]",
",",
"'pdf_header'",
":",
"args",
"[",
"4",
"]",
",",
"'pdf_toggle_possible'",
":",
"args",
"[",
"5",
"]",
",",
"'magic_num'",
":",
"args",
"[",
"6",
"]",
",",
"}",
"try",
":",
"parts",
"[",
"'hdr'",
"]",
"=",
"args",
"[",
"7",
"]",
"except",
"IndexError",
":",
"parts",
"[",
"'hdr'",
"]",
"=",
"None",
"return",
"parts"
] | Extract the arguments from the goDLS JavaScript function. | [
"Extract",
"the",
"arguments",
"from",
"the",
"goDLS",
"JavaScript",
"function",
"."
] | [
"\"\"\"Extract the arguments from the goDLS JavaScript function.\n\n In: goDLS('/doc1/01712427473','56121','69','','','1','','');return(false);\n Out: {\n 'form_post_url': '/doc1/01712427473',\n 'caseid': '56121',\n 'de_seq_num': '69',\n 'got_receipt': '',\n 'pdf_header': '',\n 'pdf_toggle_possible': '1',\n 'magic_num': '',\n 'hdr': '',\n }\n\n The key names correspond to the form field names in the JavaScript on PACER,\n but we don't actually know what each of these values does. Our best\n speculation is:\n\n - form_post_url: Where the form is posted to. The HTML 'action' attribute.\n - caseid: The internal PACER ID for the case.\n - de_seq_num: Unclear. This seems to be the internal ID for the document,\n but this field can be omitted without any known issues.\n - got_receipt: If set to '1', this will bypass the receipt page and\n immediately direct you to the page where the PDF is embedded in an\n iframe.\n - pdf_header: Can be either 1 or 2. 1: Show the header. 2: No header.\n - pdf_toggle_possible: This seems to always be 1. Could be that some courts\n do not allow the header to be turned off, but we haven't discoered that\n yet.\n - magic_num: This is used for the \"One free look\" downloads.\n - hdr: Unclear what HDR stands for but on items that have attachments,\n passing this parameter bypasses the download attachment screen and takes\n you directly to the PDF that you're trying to download. For an example,\n see document 108 from 1:12-cv-00102 in tnmd, which is a free opinion that\n has an attachment. Note that the eighth parameter was added some time\n after 2010. Dockets older than that date only have seven responses.\n \"\"\"",
"# At some point dockets added this eighth parameter. Older ones lack it"
] | [
{
"param": "s",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "s",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
def reverse_goDLS_function(s):
args = re.findall("\'(.*?)\'", s)
parts = {
'form_post_url': args[0],
'caseid': args[1],
'de_seq_num': args[2],
'got_receipt': args[3],
'pdf_header': args[4],
'pdf_toggle_possible': args[5],
'magic_num': args[6],
}
try:
parts['hdr'] = args[7]
except IndexError:
parts['hdr'] = None
return parts | 71 | 732 |
d0eed2c52c6a7966d44ce7fc7758b0cb58fba734 | mod9-asr/python-sdk | mod9/es/client.py | [
"BSD-2-Clause"
] | Python | index_lines | null | def index_lines(json_lines_by_speaker, es, index, title):
"""Index speaker-separated reply messages passed in as JSON-formatted lines."""
tracks = []
for speaker, json_lines in json_lines_by_speaker.items():
concats = []
for json_line in json_lines:
engine_reply = json.loads(json_line)
if engine_reply.get('final'):
concats += engine_reply.get('phrases', '')
if len(concats) > 0:
tracks.append({'asr': json.dumps(concats), 'speaker': speaker})
if len(tracks) > 0:
document = {'title': title, 'tracks': tracks}
index_id = str(uuid.uuid4())
es.index(index, index_id, document)
logging.info("Indexed audio in index %s with ID %s.", index, index_id)
else:
logging.info('Not indexing; no "phrases" fields found.') | Index speaker-separated reply messages passed in as JSON-formatted lines. | Index speaker-separated reply messages passed in as JSON-formatted lines. | [
"Index",
"speaker",
"-",
"separated",
"reply",
"messages",
"passed",
"in",
"as",
"JSON",
"-",
"formatted",
"lines",
"."
] | def index_lines(json_lines_by_speaker, es, index, title):
tracks = []
for speaker, json_lines in json_lines_by_speaker.items():
concats = []
for json_line in json_lines:
engine_reply = json.loads(json_line)
if engine_reply.get('final'):
concats += engine_reply.get('phrases', '')
if len(concats) > 0:
tracks.append({'asr': json.dumps(concats), 'speaker': speaker})
if len(tracks) > 0:
document = {'title': title, 'tracks': tracks}
index_id = str(uuid.uuid4())
es.index(index, index_id, document)
logging.info("Indexed audio in index %s with ID %s.", index, index_id)
else:
logging.info('Not indexing; no "phrases" fields found.') | [
"def",
"index_lines",
"(",
"json_lines_by_speaker",
",",
"es",
",",
"index",
",",
"title",
")",
":",
"tracks",
"=",
"[",
"]",
"for",
"speaker",
",",
"json_lines",
"in",
"json_lines_by_speaker",
".",
"items",
"(",
")",
":",
"concats",
"=",
"[",
"]",
"for",
"json_line",
"in",
"json_lines",
":",
"engine_reply",
"=",
"json",
".",
"loads",
"(",
"json_line",
")",
"if",
"engine_reply",
".",
"get",
"(",
"'final'",
")",
":",
"concats",
"+=",
"engine_reply",
".",
"get",
"(",
"'phrases'",
",",
"''",
")",
"if",
"len",
"(",
"concats",
")",
">",
"0",
":",
"tracks",
".",
"append",
"(",
"{",
"'asr'",
":",
"json",
".",
"dumps",
"(",
"concats",
")",
",",
"'speaker'",
":",
"speaker",
"}",
")",
"if",
"len",
"(",
"tracks",
")",
">",
"0",
":",
"document",
"=",
"{",
"'title'",
":",
"title",
",",
"'tracks'",
":",
"tracks",
"}",
"index_id",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"es",
".",
"index",
"(",
"index",
",",
"index_id",
",",
"document",
")",
"logging",
".",
"info",
"(",
"\"Indexed audio in index %s with ID %s.\"",
",",
"index",
",",
"index_id",
")",
"else",
":",
"logging",
".",
"info",
"(",
"'Not indexing; no \"phrases\" fields found.'",
")"
] | Index speaker-separated reply messages passed in as JSON-formatted lines. | [
"Index",
"speaker",
"-",
"separated",
"reply",
"messages",
"passed",
"in",
"as",
"JSON",
"-",
"formatted",
"lines",
"."
] | [
"\"\"\"Index speaker-separated reply messages passed in as JSON-formatted lines.\"\"\""
] | [
{
"param": "json_lines_by_speaker",
"type": null
},
{
"param": "es",
"type": null
},
{
"param": "index",
"type": null
},
{
"param": "title",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "json_lines_by_speaker",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "es",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "index",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "title",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import logging
import uuid
import json
def index_lines(json_lines_by_speaker, es, index, title):
tracks = []
for speaker, json_lines in json_lines_by_speaker.items():
concats = []
for json_line in json_lines:
engine_reply = json.loads(json_line)
if engine_reply.get('final'):
concats += engine_reply.get('phrases', '')
if len(concats) > 0:
tracks.append({'asr': json.dumps(concats), 'speaker': speaker})
if len(tracks) > 0:
document = {'title': title, 'tracks': tracks}
index_id = str(uuid.uuid4())
es.index(index, index_id, document)
logging.info("Indexed audio in index %s with ID %s.", index, index_id)
else:
logging.info('Not indexing; no "phrases" fields found.') | 72 | 508 |
ea1c7d1974b1bf929f94f1dd2e097291279992db | topliceanu/learn | cci-book/ed6/linked_lists.py | [
"MIT"
] | Python | delete_middle | null | def delete_middle(middle):
""" 2.3 Delete Middle Node: Implement an algorithm to delete a node in the
middle (i.e., any node but the first and last node, not necessarily the exact
middle) of a singly linked list, given only access to that node.
EXAMPLE
Input: the node c from the linked list a->b->c->d->e->f
Result: nothing is returned, but the new linked list looks like a->b->d->e->f
"""
middle.value = middle.next.value
middle.next = middle.next.next | 2.3 Delete Middle Node: Implement an algorithm to delete a node in the
middle (i.e., any node but the first and last node, not necessarily the exact
middle) of a singly linked list, given only access to that node.
EXAMPLE
Input: the node c from the linked list a->b->c->d->e->f
Result: nothing is returned, but the new linked list looks like a->b->d->e->f
| 2.3 Delete Middle Node: Implement an algorithm to delete a node in the
middle of a singly linked list, given only access to that node. | [
"2",
".",
"3",
"Delete",
"Middle",
"Node",
":",
"Implement",
"an",
"algorithm",
"to",
"delete",
"a",
"node",
"in",
"the",
"middle",
"of",
"a",
"singly",
"linked",
"list",
"given",
"only",
"access",
"to",
"that",
"node",
"."
] | def delete_middle(middle):
middle.value = middle.next.value
middle.next = middle.next.next | [
"def",
"delete_middle",
"(",
"middle",
")",
":",
"middle",
".",
"value",
"=",
"middle",
".",
"next",
".",
"value",
"middle",
".",
"next",
"=",
"middle",
".",
"next",
".",
"next"
] | 2.3 Delete Middle Node: Implement an algorithm to delete a node in the
middle (i.e., any node but the first and last node, not necessarily the exact
middle) of a singly linked list, given only access to that node. | [
"2",
".",
"3",
"Delete",
"Middle",
"Node",
":",
"Implement",
"an",
"algorithm",
"to",
"delete",
"a",
"node",
"in",
"the",
"middle",
"(",
"i",
".",
"e",
".",
"any",
"node",
"but",
"the",
"first",
"and",
"last",
"node",
"not",
"necessarily",
"the",
"exact",
"middle",
")",
"of",
"a",
"singly",
"linked",
"list",
"given",
"only",
"access",
"to",
"that",
"node",
"."
] | [
"\"\"\" 2.3 Delete Middle Node: Implement an algorithm to delete a node in the\n middle (i.e., any node but the first and last node, not necessarily the exact\n middle) of a singly linked list, given only access to that node.\n EXAMPLE\n Input: the node c from the linked list a->b->c->d->e->f\n Result: nothing is returned, but the new linked list looks like a->b->d->e->f\n \"\"\""
] | [
{
"param": "middle",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "middle",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def delete_middle(middle):
middle.value = middle.next.value
middle.next = middle.next.next | 73 | 120 |
a7ecc0642cc7dddb24b80b7ae5e01c9c6672b47b | nageshlop/cligj | cligj/features.py | [
"BSD-3-Clause"
] | Python | coords_from_query | <not_specific> | def coords_from_query(query):
"""Transform a query line into a (lng, lat) pair of coordinates."""
try:
coords = json.loads(query)
except ValueError:
query = query.replace(',', ' ')
vals = query.split()
coords = [float(v) for v in vals]
return tuple(coords[:2]) | Transform a query line into a (lng, lat) pair of coordinates. | Transform a query line into a (lng, lat) pair of coordinates. | [
"Transform",
"a",
"query",
"line",
"into",
"a",
"(",
"lng",
"lat",
")",
"pair",
"of",
"coordinates",
"."
] | def coords_from_query(query):
try:
coords = json.loads(query)
except ValueError:
query = query.replace(',', ' ')
vals = query.split()
coords = [float(v) for v in vals]
return tuple(coords[:2]) | [
"def",
"coords_from_query",
"(",
"query",
")",
":",
"try",
":",
"coords",
"=",
"json",
".",
"loads",
"(",
"query",
")",
"except",
"ValueError",
":",
"query",
"=",
"query",
".",
"replace",
"(",
"','",
",",
"' '",
")",
"vals",
"=",
"query",
".",
"split",
"(",
")",
"coords",
"=",
"[",
"float",
"(",
"v",
")",
"for",
"v",
"in",
"vals",
"]",
"return",
"tuple",
"(",
"coords",
"[",
":",
"2",
"]",
")"
] | Transform a query line into a (lng, lat) pair of coordinates. | [
"Transform",
"a",
"query",
"line",
"into",
"a",
"(",
"lng",
"lat",
")",
"pair",
"of",
"coordinates",
"."
] | [
"\"\"\"Transform a query line into a (lng, lat) pair of coordinates.\"\"\""
] | [
{
"param": "query",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "query",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import json
def coords_from_query(query):
try:
coords = json.loads(query)
except ValueError:
query = query.replace(',', ' ')
vals = query.split()
coords = [float(v) for v in vals]
return tuple(coords[:2]) | 74 | 678 |
de247152d7a7ab4beefa2476dfebbb053561bdbd | hugadams/scikit-spectra | skspec/nptools/haiss.py | [
"BSD-3-Clause"
] | Python | _haiss_m1 | <not_specific> | def _haiss_m1(lambda_spr):
''' Return diameter from wavelength at spr maximum of absorbance curve.
Parameters:
-----------
lambda_spr: the wavelength at spr absorbance maximum.
Notes:
-----------
this method is not accurate for particles with diameter less than 25nm.
'''
lambda_0=512
L1=6.53
L2=0.0216
d=(math.log((lambda_spr-lambda_0)/L1))/L2
return d | Return diameter from wavelength at spr maximum of absorbance curve.
Parameters:
-----------
lambda_spr: the wavelength at spr absorbance maximum.
Notes:
-----------
this method is not accurate for particles with diameter less than 25nm.
| Return diameter from wavelength at spr maximum of absorbance curve.
Parameters.
the wavelength at spr absorbance maximum.
this method is not accurate for particles with diameter less than 25nm. | [
"Return",
"diameter",
"from",
"wavelength",
"at",
"spr",
"maximum",
"of",
"absorbance",
"curve",
".",
"Parameters",
".",
"the",
"wavelength",
"at",
"spr",
"absorbance",
"maximum",
".",
"this",
"method",
"is",
"not",
"accurate",
"for",
"particles",
"with",
"diameter",
"less",
"than",
"25nm",
"."
] | def _haiss_m1(lambda_spr):
lambda_0=512
L1=6.53
L2=0.0216
d=(math.log((lambda_spr-lambda_0)/L1))/L2
return d | [
"def",
"_haiss_m1",
"(",
"lambda_spr",
")",
":",
"lambda_0",
"=",
"512",
"L1",
"=",
"6.53",
"L2",
"=",
"0.0216",
"d",
"=",
"(",
"math",
".",
"log",
"(",
"(",
"lambda_spr",
"-",
"lambda_0",
")",
"/",
"L1",
")",
")",
"/",
"L2",
"return",
"d"
] | Return diameter from wavelength at spr maximum of absorbance curve. | [
"Return",
"diameter",
"from",
"wavelength",
"at",
"spr",
"maximum",
"of",
"absorbance",
"curve",
"."
] | [
"''' Return diameter from wavelength at spr maximum of absorbance curve.\n\n Parameters:\n -----------\n lambda_spr: the wavelength at spr absorbance maximum.\n\n Notes:\n -----------\n this method is not accurate for particles with diameter less than 25nm.\n\n '''"
] | [
{
"param": "lambda_spr",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "lambda_spr",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import math
def _haiss_m1(lambda_spr):
lambda_0=512
L1=6.53
L2=0.0216
d=(math.log((lambda_spr-lambda_0)/L1))/L2
return d | 75 | 30 |
94cd0211858390df4d0f2154cb32db76c59352c6 | Tasignotas/topographica_mirror | topo/tests/reference/topo_or_defs.py | [
"BSD-3-Clause"
] | Python | half_odd_array | <not_specific> | def half_odd_array(r):
"""Return r that ends in .5"""
int_width = int(2*r)
# force odd width
if int_width%2==0:
int_width+=1
new_r = int_width/2.0
assert new_r-0.5==int(new_r)
return new_r | Return r that ends in .5 | Return r that ends in .5 | [
"Return",
"r",
"that",
"ends",
"in",
".",
"5"
] | def half_odd_array(r):
int_width = int(2*r)
if int_width%2==0:
int_width+=1
new_r = int_width/2.0
assert new_r-0.5==int(new_r)
return new_r | [
"def",
"half_odd_array",
"(",
"r",
")",
":",
"int_width",
"=",
"int",
"(",
"2",
"*",
"r",
")",
"if",
"int_width",
"%",
"2",
"==",
"0",
":",
"int_width",
"+=",
"1",
"new_r",
"=",
"int_width",
"/",
"2.0",
"assert",
"new_r",
"-",
"0.5",
"==",
"int",
"(",
"new_r",
")",
"return",
"new_r"
] | Return r that ends in .5 | [
"Return",
"r",
"that",
"ends",
"in",
".",
"5"
] | [
"\"\"\"Return r that ends in .5\"\"\"",
"# force odd width"
] | [
{
"param": "r",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "r",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def half_odd_array(r):
int_width = int(2*r)
if int_width%2==0:
int_width+=1
new_r = int_width/2.0
assert new_r-0.5==int(new_r)
return new_r | 76 | 267 |
075536ccf1ce622f39db77fd8e219f4b9b0d8e67 | trsvchn/captum | captum/attr/_utils/lrp_rules.py | [
"BSD-3-Clause"
] | Python | backward_hook_activation | <not_specific> | def backward_hook_activation(module, grad_input, grad_output):
"""Backward hook to propagate relevance over non-linear activations."""
if (
isinstance(grad_input, tuple)
and isinstance(grad_output, tuple)
and len(grad_input) > len(grad_output)
):
# Adds any additional elements of grad_input if applicable
# This occurs when registering a backward hook on nn.Dropout
# modules, which has an additional element of None in
# grad_input
return grad_output + grad_input[len(grad_output) :]
return grad_output | Backward hook to propagate relevance over non-linear activations. | Backward hook to propagate relevance over non-linear activations. | [
"Backward",
"hook",
"to",
"propagate",
"relevance",
"over",
"non",
"-",
"linear",
"activations",
"."
] | def backward_hook_activation(module, grad_input, grad_output):
if (
isinstance(grad_input, tuple)
and isinstance(grad_output, tuple)
and len(grad_input) > len(grad_output)
):
return grad_output + grad_input[len(grad_output) :]
return grad_output | [
"def",
"backward_hook_activation",
"(",
"module",
",",
"grad_input",
",",
"grad_output",
")",
":",
"if",
"(",
"isinstance",
"(",
"grad_input",
",",
"tuple",
")",
"and",
"isinstance",
"(",
"grad_output",
",",
"tuple",
")",
"and",
"len",
"(",
"grad_input",
")",
">",
"len",
"(",
"grad_output",
")",
")",
":",
"return",
"grad_output",
"+",
"grad_input",
"[",
"len",
"(",
"grad_output",
")",
":",
"]",
"return",
"grad_output"
] | Backward hook to propagate relevance over non-linear activations. | [
"Backward",
"hook",
"to",
"propagate",
"relevance",
"over",
"non",
"-",
"linear",
"activations",
"."
] | [
"\"\"\"Backward hook to propagate relevance over non-linear activations.\"\"\"",
"# Adds any additional elements of grad_input if applicable",
"# This occurs when registering a backward hook on nn.Dropout",
"# modules, which has an additional element of None in",
"# grad_input"
] | [
{
"param": "module",
"type": null
},
{
"param": "grad_input",
"type": null
},
{
"param": "grad_output",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "module",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "grad_input",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "grad_output",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def backward_hook_activation(module, grad_input, grad_output):
if (
isinstance(grad_input, tuple)
and isinstance(grad_output, tuple)
and len(grad_input) > len(grad_output)
):
return grad_output + grad_input[len(grad_output) :]
return grad_output | 77 | 546 |
0cd2c8c8242dd34c4b2d40629282346a342b4d29 | jwcarr/eyek | eyekit/measure.py | [
"MIT"
] | Python | number_of_regressions_in | <not_specific> | def number_of_regressions_in(interest_area, fixation_sequence):
"""
Given an interest area and fixation sequence, return the number of
regressions back to that interest area after the interest area was read
for the first time. In other words, find the first fixation to exit the
interest area and then count how many times the reader returns to the
interest area from the right (or from the left in the case of
right-to-left text).
"""
entered_interest_area = False
first_exit_index = None
for fixation in fixation_sequence.iter_without_discards():
if fixation in interest_area:
entered_interest_area = True
elif entered_interest_area:
first_exit_index = fixation.index
break
if first_exit_index is None:
return 0 # IA was never exited, so there can't be any regressions back to it
count = 0
for prev_fix, curr_fix in fixation_sequence.iter_pairs(include_discards=False):
if prev_fix.index < first_exit_index:
continue
if prev_fix not in interest_area and curr_fix in interest_area:
if interest_area.right_to_left:
if curr_fix.x > prev_fix.x:
count += 1
else:
if curr_fix.x < prev_fix.x:
count += 1
return count |
Given an interest area and fixation sequence, return the number of
regressions back to that interest area after the interest area was read
for the first time. In other words, find the first fixation to exit the
interest area and then count how many times the reader returns to the
interest area from the right (or from the left in the case of
right-to-left text).
| Given an interest area and fixation sequence, return the number of
regressions back to that interest area after the interest area was read
for the first time. In other words, find the first fixation to exit the
interest area and then count how many times the reader returns to the
interest area from the right (or from the left in the case of
right-to-left text). | [
"Given",
"an",
"interest",
"area",
"and",
"fixation",
"sequence",
"return",
"the",
"number",
"of",
"regressions",
"back",
"to",
"that",
"interest",
"area",
"after",
"the",
"interest",
"area",
"was",
"read",
"for",
"the",
"first",
"time",
".",
"In",
"other",
"words",
"find",
"the",
"first",
"fixation",
"to",
"exit",
"the",
"interest",
"area",
"and",
"then",
"count",
"how",
"many",
"times",
"the",
"reader",
"returns",
"to",
"the",
"interest",
"area",
"from",
"the",
"right",
"(",
"or",
"from",
"the",
"left",
"in",
"the",
"case",
"of",
"right",
"-",
"to",
"-",
"left",
"text",
")",
"."
] | def number_of_regressions_in(interest_area, fixation_sequence):
entered_interest_area = False
first_exit_index = None
for fixation in fixation_sequence.iter_without_discards():
if fixation in interest_area:
entered_interest_area = True
elif entered_interest_area:
first_exit_index = fixation.index
break
if first_exit_index is None:
return 0
count = 0
for prev_fix, curr_fix in fixation_sequence.iter_pairs(include_discards=False):
if prev_fix.index < first_exit_index:
continue
if prev_fix not in interest_area and curr_fix in interest_area:
if interest_area.right_to_left:
if curr_fix.x > prev_fix.x:
count += 1
else:
if curr_fix.x < prev_fix.x:
count += 1
return count | [
"def",
"number_of_regressions_in",
"(",
"interest_area",
",",
"fixation_sequence",
")",
":",
"entered_interest_area",
"=",
"False",
"first_exit_index",
"=",
"None",
"for",
"fixation",
"in",
"fixation_sequence",
".",
"iter_without_discards",
"(",
")",
":",
"if",
"fixation",
"in",
"interest_area",
":",
"entered_interest_area",
"=",
"True",
"elif",
"entered_interest_area",
":",
"first_exit_index",
"=",
"fixation",
".",
"index",
"break",
"if",
"first_exit_index",
"is",
"None",
":",
"return",
"0",
"count",
"=",
"0",
"for",
"prev_fix",
",",
"curr_fix",
"in",
"fixation_sequence",
".",
"iter_pairs",
"(",
"include_discards",
"=",
"False",
")",
":",
"if",
"prev_fix",
".",
"index",
"<",
"first_exit_index",
":",
"continue",
"if",
"prev_fix",
"not",
"in",
"interest_area",
"and",
"curr_fix",
"in",
"interest_area",
":",
"if",
"interest_area",
".",
"right_to_left",
":",
"if",
"curr_fix",
".",
"x",
">",
"prev_fix",
".",
"x",
":",
"count",
"+=",
"1",
"else",
":",
"if",
"curr_fix",
".",
"x",
"<",
"prev_fix",
".",
"x",
":",
"count",
"+=",
"1",
"return",
"count"
] | Given an interest area and fixation sequence, return the number of
regressions back to that interest area after the interest area was read
for the first time. | [
"Given",
"an",
"interest",
"area",
"and",
"fixation",
"sequence",
"return",
"the",
"number",
"of",
"regressions",
"back",
"to",
"that",
"interest",
"area",
"after",
"the",
"interest",
"area",
"was",
"read",
"for",
"the",
"first",
"time",
"."
] | [
"\"\"\"\n Given an interest area and fixation sequence, return the number of\n regressions back to that interest area after the interest area was read\n for the first time. In other words, find the first fixation to exit the\n interest area and then count how many times the reader returns to the\n interest area from the right (or from the left in the case of\n right-to-left text).\n \"\"\"",
"# IA was never exited, so there can't be any regressions back to it"
] | [
{
"param": "interest_area",
"type": null
},
{
"param": "fixation_sequence",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "interest_area",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "fixation_sequence",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def number_of_regressions_in(interest_area, fixation_sequence):
entered_interest_area = False
first_exit_index = None
for fixation in fixation_sequence.iter_without_discards():
if fixation in interest_area:
entered_interest_area = True
elif entered_interest_area:
first_exit_index = fixation.index
break
if first_exit_index is None:
return 0
count = 0
for prev_fix, curr_fix in fixation_sequence.iter_pairs(include_discards=False):
if prev_fix.index < first_exit_index:
continue
if prev_fix not in interest_area and curr_fix in interest_area:
if interest_area.right_to_left:
if curr_fix.x > prev_fix.x:
count += 1
else:
if curr_fix.x < prev_fix.x:
count += 1
return count | 78 | 272 |
2025358bc8a3b62cb83c283914ff2a1bf457c1c0 | lietu/connquality | connquality/graph.py | [
"BSD-3-Clause"
] | Python | parse_options | <not_specific> | def parse_options(args):
"""
Parse commandline arguments into options for Graph
:param args:
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument("--logfile", default="connection.log",
help="Where is the connection quality data stored")
parser.add_argument("--outfile", default="graph.png",
help="Where to store the generated graph")
parser.add_argument("--dpi", default=100.0,
help="Target DPI for graph")
parser.add_argument("--datapoints", default=None,
help="Limit number of datapoints to show")
parser.add_argument("--start", default=None,
help="Only include entries starting from this datetime")
parser.add_argument("--end", default=None,
help="Only include entries until this datetime")
return parser.parse_args(args) |
Parse commandline arguments into options for Graph
:param args:
:return:
| Parse commandline arguments into options for Graph | [
"Parse",
"commandline",
"arguments",
"into",
"options",
"for",
"Graph"
] | def parse_options(args):
parser = argparse.ArgumentParser()
parser.add_argument("--logfile", default="connection.log",
help="Where is the connection quality data stored")
parser.add_argument("--outfile", default="graph.png",
help="Where to store the generated graph")
parser.add_argument("--dpi", default=100.0,
help="Target DPI for graph")
parser.add_argument("--datapoints", default=None,
help="Limit number of datapoints to show")
parser.add_argument("--start", default=None,
help="Only include entries starting from this datetime")
parser.add_argument("--end", default=None,
help="Only include entries until this datetime")
return parser.parse_args(args) | [
"def",
"parse_options",
"(",
"args",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"--logfile\"",
",",
"default",
"=",
"\"connection.log\"",
",",
"help",
"=",
"\"Where is the connection quality data stored\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--outfile\"",
",",
"default",
"=",
"\"graph.png\"",
",",
"help",
"=",
"\"Where to store the generated graph\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--dpi\"",
",",
"default",
"=",
"100.0",
",",
"help",
"=",
"\"Target DPI for graph\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--datapoints\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Limit number of datapoints to show\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--start\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Only include entries starting from this datetime\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--end\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Only include entries until this datetime\"",
")",
"return",
"parser",
".",
"parse_args",
"(",
"args",
")"
] | Parse commandline arguments into options for Graph | [
"Parse",
"commandline",
"arguments",
"into",
"options",
"for",
"Graph"
] | [
"\"\"\"\n Parse commandline arguments into options for Graph\n :param args:\n :return:\n \"\"\""
] | [
{
"param": "args",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "args",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import argparse
def parse_options(args):
parser = argparse.ArgumentParser()
parser.add_argument("--logfile", default="connection.log",
help="Where is the connection quality data stored")
parser.add_argument("--outfile", default="graph.png",
help="Where to store the generated graph")
parser.add_argument("--dpi", default=100.0,
help="Target DPI for graph")
parser.add_argument("--datapoints", default=None,
help="Limit number of datapoints to show")
parser.add_argument("--start", default=None,
help="Only include entries starting from this datetime")
parser.add_argument("--end", default=None,
help="Only include entries until this datetime")
return parser.parse_args(args) | 79 | 849 |
b2ac457c81aad66f249904c1bf51af4a546e4401 | azadoks/aiida-core | tests/manage/test_caching_config.py | [
"MIT",
"BSD-3-Clause"
] | Python | configure_caching | <not_specific> | def configure_caching(config_with_profile_factory):
"""
Fixture to set the caching configuration in the test profile to
a specific dictionary. This is done by creating a temporary
caching configuration file.
"""
config = config_with_profile_factory()
@contextlib.contextmanager
def inner(config_dict):
for key, value in config_dict.items():
config.set_option(f'caching.{key}', value)
yield
# reset the configuration
for key in config_dict.keys():
config.unset_option(f'caching.{key}')
return inner |
Fixture to set the caching configuration in the test profile to
a specific dictionary. This is done by creating a temporary
caching configuration file.
| Fixture to set the caching configuration in the test profile to
a specific dictionary. This is done by creating a temporary
caching configuration file. | [
"Fixture",
"to",
"set",
"the",
"caching",
"configuration",
"in",
"the",
"test",
"profile",
"to",
"a",
"specific",
"dictionary",
".",
"This",
"is",
"done",
"by",
"creating",
"a",
"temporary",
"caching",
"configuration",
"file",
"."
] | def configure_caching(config_with_profile_factory):
config = config_with_profile_factory()
@contextlib.contextmanager
def inner(config_dict):
for key, value in config_dict.items():
config.set_option(f'caching.{key}', value)
yield
for key in config_dict.keys():
config.unset_option(f'caching.{key}')
return inner | [
"def",
"configure_caching",
"(",
"config_with_profile_factory",
")",
":",
"config",
"=",
"config_with_profile_factory",
"(",
")",
"@",
"contextlib",
".",
"contextmanager",
"def",
"inner",
"(",
"config_dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"config_dict",
".",
"items",
"(",
")",
":",
"config",
".",
"set_option",
"(",
"f'caching.{key}'",
",",
"value",
")",
"yield",
"for",
"key",
"in",
"config_dict",
".",
"keys",
"(",
")",
":",
"config",
".",
"unset_option",
"(",
"f'caching.{key}'",
")",
"return",
"inner"
] | Fixture to set the caching configuration in the test profile to
a specific dictionary. | [
"Fixture",
"to",
"set",
"the",
"caching",
"configuration",
"in",
"the",
"test",
"profile",
"to",
"a",
"specific",
"dictionary",
"."
] | [
"\"\"\"\n Fixture to set the caching configuration in the test profile to\n a specific dictionary. This is done by creating a temporary\n caching configuration file.\n \"\"\"",
"# reset the configuration"
] | [
{
"param": "config_with_profile_factory",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "config_with_profile_factory",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import contextlib
def configure_caching(config_with_profile_factory):
config = config_with_profile_factory()
@contextlib.contextmanager
def inner(config_dict):
for key, value in config_dict.items():
config.set_option(f'caching.{key}', value)
yield
for key in config_dict.keys():
config.unset_option(f'caching.{key}')
return inner | 80 | 407 |
462585d4e0f04e68e994e80943e1282299537ded | jon-athon/content | Packs/MISP/Integrations/MISPV3/MISPV3.py | [
"MIT"
] | Python | build_tag_output | null | def build_tag_output(given_object):
"""given_object is attribute or event, depends on the called function"""
if given_object.get('Tag'):
given_object['Tag'] = [
{'Name': tag.get('name'),
'is_galaxy': tag.get('is_galaxy')
} for tag in given_object.get('Tag')
] | given_object is attribute or event, depends on the called function | given_object is attribute or event, depends on the called function | [
"given_object",
"is",
"attribute",
"or",
"event",
"depends",
"on",
"the",
"called",
"function"
] | def build_tag_output(given_object):
if given_object.get('Tag'):
given_object['Tag'] = [
{'Name': tag.get('name'),
'is_galaxy': tag.get('is_galaxy')
} for tag in given_object.get('Tag')
] | [
"def",
"build_tag_output",
"(",
"given_object",
")",
":",
"if",
"given_object",
".",
"get",
"(",
"'Tag'",
")",
":",
"given_object",
"[",
"'Tag'",
"]",
"=",
"[",
"{",
"'Name'",
":",
"tag",
".",
"get",
"(",
"'name'",
")",
",",
"'is_galaxy'",
":",
"tag",
".",
"get",
"(",
"'is_galaxy'",
")",
"}",
"for",
"tag",
"in",
"given_object",
".",
"get",
"(",
"'Tag'",
")",
"]"
] | given_object is attribute or event, depends on the called function | [
"given_object",
"is",
"attribute",
"or",
"event",
"depends",
"on",
"the",
"called",
"function"
] | [
"\"\"\"given_object is attribute or event, depends on the called function\"\"\""
] | [
{
"param": "given_object",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "given_object",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def build_tag_output(given_object):
if given_object.get('Tag'):
given_object['Tag'] = [
{'Name': tag.get('name'),
'is_galaxy': tag.get('is_galaxy')
} for tag in given_object.get('Tag')
] | 81 | 867 |
477d3cf9b9e459f83b232fa785d8088adc781aa9 | Phlya/pysradb | pysradb/filter_attrs.py | [
"BSD-3-Clause"
] | Python | guess_tissue_type | <not_specific> | def guess_tissue_type(sample_attribute):
"""Guess tissue type from sample_attribute data.
Parameters
----------
sample_attribute: string
sample_attribute string as in the metadata column
Returns
-------
tissue_type: string
Possible cell type of sample.
Returns None if no match found.
"""
sample_attribute = str(sample_attribute)
tissue_type = None
if "tissue: " in sample_attribute:
x = re.search(r"tissue: \w+", sample_attribute)
tissue_type = re.sub(r"\s+", " ", x.group(0).lstrip("tissue:").lower().strip())
else:
warnings.warn(
"Couldn't parse {} for tissue".format(sample_attribute), UserWarning
)
return tissue_type | Guess tissue type from sample_attribute data.
Parameters
----------
sample_attribute: string
sample_attribute string as in the metadata column
Returns
-------
tissue_type: string
Possible cell type of sample.
Returns None if no match found.
| Guess tissue type from sample_attribute data.
Parameters
string
sample_attribute string as in the metadata column
Returns
string
Possible cell type of sample.
Returns None if no match found. | [
"Guess",
"tissue",
"type",
"from",
"sample_attribute",
"data",
".",
"Parameters",
"string",
"sample_attribute",
"string",
"as",
"in",
"the",
"metadata",
"column",
"Returns",
"string",
"Possible",
"cell",
"type",
"of",
"sample",
".",
"Returns",
"None",
"if",
"no",
"match",
"found",
"."
] | def guess_tissue_type(sample_attribute):
sample_attribute = str(sample_attribute)
tissue_type = None
if "tissue: " in sample_attribute:
x = re.search(r"tissue: \w+", sample_attribute)
tissue_type = re.sub(r"\s+", " ", x.group(0).lstrip("tissue:").lower().strip())
else:
warnings.warn(
"Couldn't parse {} for tissue".format(sample_attribute), UserWarning
)
return tissue_type | [
"def",
"guess_tissue_type",
"(",
"sample_attribute",
")",
":",
"sample_attribute",
"=",
"str",
"(",
"sample_attribute",
")",
"tissue_type",
"=",
"None",
"if",
"\"tissue: \"",
"in",
"sample_attribute",
":",
"x",
"=",
"re",
".",
"search",
"(",
"r\"tissue: \\w+\"",
",",
"sample_attribute",
")",
"tissue_type",
"=",
"re",
".",
"sub",
"(",
"r\"\\s+\"",
",",
"\" \"",
",",
"x",
".",
"group",
"(",
"0",
")",
".",
"lstrip",
"(",
"\"tissue:\"",
")",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"Couldn't parse {} for tissue\"",
".",
"format",
"(",
"sample_attribute",
")",
",",
"UserWarning",
")",
"return",
"tissue_type"
] | Guess tissue type from sample_attribute data. | [
"Guess",
"tissue",
"type",
"from",
"sample_attribute",
"data",
"."
] | [
"\"\"\"Guess tissue type from sample_attribute data.\n\n Parameters\n ----------\n sample_attribute: string\n sample_attribute string as in the metadata column\n\n Returns\n -------\n tissue_type: string\n Possible cell type of sample.\n Returns None if no match found.\n \"\"\""
] | [
{
"param": "sample_attribute",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "sample_attribute",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import warnings
import re
def guess_tissue_type(sample_attribute):
sample_attribute = str(sample_attribute)
tissue_type = None
if "tissue: " in sample_attribute:
x = re.search(r"tissue: \w+", sample_attribute)
tissue_type = re.sub(r"\s+", " ", x.group(0).lstrip("tissue:").lower().strip())
else:
warnings.warn(
"Couldn't parse {} for tissue".format(sample_attribute), UserWarning
)
return tissue_type | 82 | 186 |
eb550db30fae881fd7dc4ae147163ec908b5cae7 | lsbloo/GeradorHorariosUfpb | algortimoGA/virtualenv/lib/python3.8/site-packages/jaraco/functools.py | [
"MIT"
] | Python | assign_params | <not_specific> | def assign_params(func, namespace):
"""
Assign parameters from namespace where func solicits.
>>> def func(x, y=3):
... print(x, y)
>>> assigned = assign_params(func, dict(x=2, z=4))
>>> assigned()
2 3
The usual errors are raised if a function doesn't receive
its required parameters:
>>> assigned = assign_params(func, dict(y=3, z=4))
>>> assigned()
Traceback (most recent call last):
TypeError: func() ...argument...
It even works on methods:
>>> class Handler:
... def meth(self, arg):
... print(arg)
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
crystal
"""
sig = inspect.signature(func)
params = sig.parameters.keys()
call_ns = {k: namespace[k] for k in params if k in namespace}
return functools.partial(func, **call_ns) |
Assign parameters from namespace where func solicits.
>>> def func(x, y=3):
... print(x, y)
>>> assigned = assign_params(func, dict(x=2, z=4))
>>> assigned()
2 3
The usual errors are raised if a function doesn't receive
its required parameters:
>>> assigned = assign_params(func, dict(y=3, z=4))
>>> assigned()
Traceback (most recent call last):
TypeError: func() ...argument...
It even works on methods:
>>> class Handler:
... def meth(self, arg):
... print(arg)
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
crystal
| Assign parameters from namespace where func solicits.
The usual errors are raised if a function doesn't receive
its required parameters.
It even works on methods.
| [
"Assign",
"parameters",
"from",
"namespace",
"where",
"func",
"solicits",
".",
"The",
"usual",
"errors",
"are",
"raised",
"if",
"a",
"function",
"doesn",
"'",
"t",
"receive",
"its",
"required",
"parameters",
".",
"It",
"even",
"works",
"on",
"methods",
"."
] | def assign_params(func, namespace):
sig = inspect.signature(func)
params = sig.parameters.keys()
call_ns = {k: namespace[k] for k in params if k in namespace}
return functools.partial(func, **call_ns) | [
"def",
"assign_params",
"(",
"func",
",",
"namespace",
")",
":",
"sig",
"=",
"inspect",
".",
"signature",
"(",
"func",
")",
"params",
"=",
"sig",
".",
"parameters",
".",
"keys",
"(",
")",
"call_ns",
"=",
"{",
"k",
":",
"namespace",
"[",
"k",
"]",
"for",
"k",
"in",
"params",
"if",
"k",
"in",
"namespace",
"}",
"return",
"functools",
".",
"partial",
"(",
"func",
",",
"**",
"call_ns",
")"
] | Assign parameters from namespace where func solicits. | [
"Assign",
"parameters",
"from",
"namespace",
"where",
"func",
"solicits",
"."
] | [
"\"\"\"\n Assign parameters from namespace where func solicits.\n\n >>> def func(x, y=3):\n ... print(x, y)\n >>> assigned = assign_params(func, dict(x=2, z=4))\n >>> assigned()\n 2 3\n\n The usual errors are raised if a function doesn't receive\n its required parameters:\n\n >>> assigned = assign_params(func, dict(y=3, z=4))\n >>> assigned()\n Traceback (most recent call last):\n TypeError: func() ...argument...\n\n It even works on methods:\n\n >>> class Handler:\n ... def meth(self, arg):\n ... print(arg)\n >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()\n crystal\n \"\"\""
] | [
{
"param": "func",
"type": null
},
{
"param": "namespace",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "func",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "namespace",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import inspect
import functools
def assign_params(func, namespace):
sig = inspect.signature(func)
params = sig.parameters.keys()
call_ns = {k: namespace[k] for k in params if k in namespace}
return functools.partial(func, **call_ns) | 83 | 504 |
7cf20fd0161c0d248819f73cd81dba7e24ab50e0 | prizm1/bartsummarizer | fairseq/tasks/sentence_pair_classification_task.py | [
"BSD-3-Clause"
] | Python | add_args | null | def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='path to data directory')
parser.add_argument('--raw-text', default=False, action='store_true',
help='load raw text dataset')
parser.add_argument('--num-labels', type=int, default=3,
help='number of labels')
parser.add_argument('--concat-sentences-mode', default='none',
help='concat sentences in the dataset. none = dont concat, eos = eos concat, unk = unk concat')
parser.add_argument('--use-bos', default=False, action='store_true',
help='if true, uses a separate bos tokens to indicate beginning of string') | Add task-specific arguments to the parser. | Add task-specific arguments to the parser. | [
"Add",
"task",
"-",
"specific",
"arguments",
"to",
"the",
"parser",
"."
] | def add_args(parser):
parser.add_argument('data', help='path to data directory')
parser.add_argument('--raw-text', default=False, action='store_true',
help='load raw text dataset')
parser.add_argument('--num-labels', type=int, default=3,
help='number of labels')
parser.add_argument('--concat-sentences-mode', default='none',
help='concat sentences in the dataset. none = dont concat, eos = eos concat, unk = unk concat')
parser.add_argument('--use-bos', default=False, action='store_true',
help='if true, uses a separate bos tokens to indicate beginning of string') | [
"def",
"add_args",
"(",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'data'",
",",
"help",
"=",
"'path to data directory'",
")",
"parser",
".",
"add_argument",
"(",
"'--raw-text'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'load raw text dataset'",
")",
"parser",
".",
"add_argument",
"(",
"'--num-labels'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"3",
",",
"help",
"=",
"'number of labels'",
")",
"parser",
".",
"add_argument",
"(",
"'--concat-sentences-mode'",
",",
"default",
"=",
"'none'",
",",
"help",
"=",
"'concat sentences in the dataset. none = dont concat, eos = eos concat, unk = unk concat'",
")",
"parser",
".",
"add_argument",
"(",
"'--use-bos'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'if true, uses a separate bos tokens to indicate beginning of string'",
")"
] | Add task-specific arguments to the parser. | [
"Add",
"task",
"-",
"specific",
"arguments",
"to",
"the",
"parser",
"."
] | [
"\"\"\"Add task-specific arguments to the parser.\"\"\""
] | [
{
"param": "parser",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "parser",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def add_args(parser):
parser.add_argument('data', help='path to data directory')
parser.add_argument('--raw-text', default=False, action='store_true',
help='load raw text dataset')
parser.add_argument('--num-labels', type=int, default=3,
help='number of labels')
parser.add_argument('--concat-sentences-mode', default='none',
help='concat sentences in the dataset. none = dont concat, eos = eos concat, unk = unk concat')
parser.add_argument('--use-bos', default=False, action='store_true',
help='if true, uses a separate bos tokens to indicate beginning of string') | 84 | 849 |
adb638de9aa86bd0333d454f873d42a7e0db5a9c | izikeros/sequence_to_sequence_translation | preporcessing.py | [
"MIT"
] | Python | create_mapping | <not_specific> | def create_mapping(vocab):
"""Creating an array of words from the vocabulary set,
use this array as index-to-word dictionary
"""
idx_to_word = [word[0] for word in vocab]
# Adding the word "ZERO" to the beginning of the array
idx_to_word.insert(0, 'ZERO')
# Adding the word 'UNK' to the end of the array (stands for UNKNOWN words)
idx_to_word.append('UNK')
# Creating the word-to-index dictionary from the array created above
word_to_idx = {word: idx for idx, word in enumerate(idx_to_word)}
return word_to_idx, idx_to_word | Creating an array of words from the vocabulary set,
use this array as index-to-word dictionary
| Creating an array of words from the vocabulary set,
use this array as index-to-word dictionary | [
"Creating",
"an",
"array",
"of",
"words",
"from",
"the",
"vocabulary",
"set",
"use",
"this",
"array",
"as",
"index",
"-",
"to",
"-",
"word",
"dictionary"
] | def create_mapping(vocab):
idx_to_word = [word[0] for word in vocab]
idx_to_word.insert(0, 'ZERO')
idx_to_word.append('UNK')
word_to_idx = {word: idx for idx, word in enumerate(idx_to_word)}
return word_to_idx, idx_to_word | [
"def",
"create_mapping",
"(",
"vocab",
")",
":",
"idx_to_word",
"=",
"[",
"word",
"[",
"0",
"]",
"for",
"word",
"in",
"vocab",
"]",
"idx_to_word",
".",
"insert",
"(",
"0",
",",
"'ZERO'",
")",
"idx_to_word",
".",
"append",
"(",
"'UNK'",
")",
"word_to_idx",
"=",
"{",
"word",
":",
"idx",
"for",
"idx",
",",
"word",
"in",
"enumerate",
"(",
"idx_to_word",
")",
"}",
"return",
"word_to_idx",
",",
"idx_to_word"
] | Creating an array of words from the vocabulary set,
use this array as index-to-word dictionary | [
"Creating",
"an",
"array",
"of",
"words",
"from",
"the",
"vocabulary",
"set",
"use",
"this",
"array",
"as",
"index",
"-",
"to",
"-",
"word",
"dictionary"
] | [
"\"\"\"Creating an array of words from the vocabulary set,\r\n\r\n use this array as index-to-word dictionary\r\n \"\"\"",
"# Adding the word \"ZERO\" to the beginning of the array\r",
"# Adding the word 'UNK' to the end of the array (stands for UNKNOWN words)\r",
"# Creating the word-to-index dictionary from the array created above\r"
] | [
{
"param": "vocab",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "vocab",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def create_mapping(vocab):
idx_to_word = [word[0] for word in vocab]
idx_to_word.insert(0, 'ZERO')
idx_to_word.append('UNK')
word_to_idx = {word: idx for idx, word in enumerate(idx_to_word)}
return word_to_idx, idx_to_word | 85 | 961 |
f501502e7424fef95f2c2b66c1aa33b1a59481ea | WarwickRSE/SOUP | 004_Recursion/list_combination.py | [
"BSD-3-Clause"
] | Python | combine_lists | <not_specific> | def combine_lists(list_of_lists):
"""Produce all combinations of one word from each list in input list of lists
The key principle is to think only about combining this list with all previous ones,
assuming those have already been dealt with. I.e. an inductive solution.
"""
list_size = len(list_of_lists)
tmp = []
output = list_of_lists[0]
total= len(output)
for i in range(1, list_size):
list=list_of_lists[i]
total*=len(list)
for element in list:
for i in range(len(output)):
tmp.append(output[i]+ element)
output = tmp
tmp = []
return output | Produce all combinations of one word from each list in input list of lists
The key principle is to think only about combining this list with all previous ones,
assuming those have already been dealt with. I.e. an inductive solution.
| Produce all combinations of one word from each list in input list of lists
The key principle is to think only about combining this list with all previous ones,
assuming those have already been dealt with. | [
"Produce",
"all",
"combinations",
"of",
"one",
"word",
"from",
"each",
"list",
"in",
"input",
"list",
"of",
"lists",
"The",
"key",
"principle",
"is",
"to",
"think",
"only",
"about",
"combining",
"this",
"list",
"with",
"all",
"previous",
"ones",
"assuming",
"those",
"have",
"already",
"been",
"dealt",
"with",
"."
] | def combine_lists(list_of_lists):
list_size = len(list_of_lists)
tmp = []
output = list_of_lists[0]
total= len(output)
for i in range(1, list_size):
list=list_of_lists[i]
total*=len(list)
for element in list:
for i in range(len(output)):
tmp.append(output[i]+ element)
output = tmp
tmp = []
return output | [
"def",
"combine_lists",
"(",
"list_of_lists",
")",
":",
"list_size",
"=",
"len",
"(",
"list_of_lists",
")",
"tmp",
"=",
"[",
"]",
"output",
"=",
"list_of_lists",
"[",
"0",
"]",
"total",
"=",
"len",
"(",
"output",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"list_size",
")",
":",
"list",
"=",
"list_of_lists",
"[",
"i",
"]",
"total",
"*=",
"len",
"(",
"list",
")",
"for",
"element",
"in",
"list",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"output",
")",
")",
":",
"tmp",
".",
"append",
"(",
"output",
"[",
"i",
"]",
"+",
"element",
")",
"output",
"=",
"tmp",
"tmp",
"=",
"[",
"]",
"return",
"output"
] | Produce all combinations of one word from each list in input list of lists
The key principle is to think only about combining this list with all previous ones,
assuming those have already been dealt with. | [
"Produce",
"all",
"combinations",
"of",
"one",
"word",
"from",
"each",
"list",
"in",
"input",
"list",
"of",
"lists",
"The",
"key",
"principle",
"is",
"to",
"think",
"only",
"about",
"combining",
"this",
"list",
"with",
"all",
"previous",
"ones",
"assuming",
"those",
"have",
"already",
"been",
"dealt",
"with",
"."
] | [
"\"\"\"Produce all combinations of one word from each list in input list of lists\n The key principle is to think only about combining this list with all previous ones,\n assuming those have already been dealt with. I.e. an inductive solution.\n\n \"\"\""
] | [
{
"param": "list_of_lists",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "list_of_lists",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def combine_lists(list_of_lists):
list_size = len(list_of_lists)
tmp = []
output = list_of_lists[0]
total= len(output)
for i in range(1, list_size):
list=list_of_lists[i]
total*=len(list)
for element in list:
for i in range(len(output)):
tmp.append(output[i]+ element)
output = tmp
tmp = []
return output | 86 | 98 |
fe9544d5782ec48c533757f0ef15c32050542ed7 | MikeOMa/DriftMLP | driftmlp/drifter_indexing/driftiter/__init__.py | [
"MIT"
] | Python | drifter_meta | <not_specific> | def drifter_meta(drifter_id_group, variables):
"""
Simple function to grab metadata from the h5py group.
Input:
drifter_id_group: The group corresponding to one id from the h5py file.
Output:
dict_out: with variables as the keys.
"""
dict_out = {key: drifter_id_group.attrs[key][:] for key in variables}
return dict_out |
Simple function to grab metadata from the h5py group.
Input:
drifter_id_group: The group corresponding to one id from the h5py file.
Output:
dict_out: with variables as the keys.
| Simple function to grab metadata from the h5py group.
Input:
drifter_id_group: The group corresponding to one id from the h5py file.
Output:
dict_out: with variables as the keys. | [
"Simple",
"function",
"to",
"grab",
"metadata",
"from",
"the",
"h5py",
"group",
".",
"Input",
":",
"drifter_id_group",
":",
"The",
"group",
"corresponding",
"to",
"one",
"id",
"from",
"the",
"h5py",
"file",
".",
"Output",
":",
"dict_out",
":",
"with",
"variables",
"as",
"the",
"keys",
"."
] | def drifter_meta(drifter_id_group, variables):
dict_out = {key: drifter_id_group.attrs[key][:] for key in variables}
return dict_out | [
"def",
"drifter_meta",
"(",
"drifter_id_group",
",",
"variables",
")",
":",
"dict_out",
"=",
"{",
"key",
":",
"drifter_id_group",
".",
"attrs",
"[",
"key",
"]",
"[",
":",
"]",
"for",
"key",
"in",
"variables",
"}",
"return",
"dict_out"
] | Simple function to grab metadata from the h5py group. | [
"Simple",
"function",
"to",
"grab",
"metadata",
"from",
"the",
"h5py",
"group",
"."
] | [
"\"\"\"\n Simple function to grab metadata from the h5py group.\n Input:\n drifter_id_group: The group corresponding to one id from the h5py file.\n Output:\n dict_out: with variables as the keys.\n \"\"\""
] | [
{
"param": "drifter_id_group",
"type": null
},
{
"param": "variables",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "drifter_id_group",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "variables",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def drifter_meta(drifter_id_group, variables):
dict_out = {key: drifter_id_group.attrs[key][:] for key in variables}
return dict_out | 87 | 592 |
449492c6887e5d02f66a26e88e759e39fd8995e1 | Ranga123/test1 | build/run_tests.py | [
"Apache-2.0"
] | Python | _get_term_width | <not_specific> | def _get_term_width():
'Attempt to discern the width of the terminal'
# This may not work on all platforms, in which case the default of 80
# characters is used. Improvements welcomed.
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = None
if not cr:
try:
cr = (os.environ['SVN_MAKE_CHECK_LINES'],
os.environ['SVN_MAKE_CHECK_COLUMNS'])
except:
cr = None
if not cr:
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
cr = None
if not cr:
# Default
if sys.platform == 'win32':
cr = (25, 79)
else:
cr = (25, 80)
return int(cr[1]) | Attempt to discern the width of the terminal | Attempt to discern the width of the terminal | [
"Attempt",
"to",
"discern",
"the",
"width",
"of",
"the",
"terminal"
] | def _get_term_width():
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = None
if not cr:
try:
cr = (os.environ['SVN_MAKE_CHECK_LINES'],
os.environ['SVN_MAKE_CHECK_COLUMNS'])
except:
cr = None
if not cr:
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
cr = None
if not cr:
if sys.platform == 'win32':
cr = (25, 79)
else:
cr = (25, 80)
return int(cr[1]) | [
"def",
"_get_term_width",
"(",
")",
":",
"def",
"ioctl_GWINSZ",
"(",
"fd",
")",
":",
"try",
":",
"import",
"fcntl",
",",
"termios",
",",
"struct",
",",
"os",
"cr",
"=",
"struct",
".",
"unpack",
"(",
"'hh'",
",",
"fcntl",
".",
"ioctl",
"(",
"fd",
",",
"termios",
".",
"TIOCGWINSZ",
",",
"'1234'",
")",
")",
"except",
":",
"return",
"None",
"return",
"cr",
"cr",
"=",
"None",
"if",
"not",
"cr",
":",
"try",
":",
"cr",
"=",
"(",
"os",
".",
"environ",
"[",
"'SVN_MAKE_CHECK_LINES'",
"]",
",",
"os",
".",
"environ",
"[",
"'SVN_MAKE_CHECK_COLUMNS'",
"]",
")",
"except",
":",
"cr",
"=",
"None",
"if",
"not",
"cr",
":",
"cr",
"=",
"ioctl_GWINSZ",
"(",
"0",
")",
"or",
"ioctl_GWINSZ",
"(",
"1",
")",
"or",
"ioctl_GWINSZ",
"(",
"2",
")",
"if",
"not",
"cr",
":",
"try",
":",
"fd",
"=",
"os",
".",
"open",
"(",
"os",
".",
"ctermid",
"(",
")",
",",
"os",
".",
"O_RDONLY",
")",
"cr",
"=",
"ioctl_GWINSZ",
"(",
"fd",
")",
"os",
".",
"close",
"(",
"fd",
")",
"except",
":",
"pass",
"if",
"not",
"cr",
":",
"try",
":",
"cr",
"=",
"(",
"os",
".",
"environ",
"[",
"'LINES'",
"]",
",",
"os",
".",
"environ",
"[",
"'COLUMNS'",
"]",
")",
"except",
":",
"cr",
"=",
"None",
"if",
"not",
"cr",
":",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
":",
"cr",
"=",
"(",
"25",
",",
"79",
")",
"else",
":",
"cr",
"=",
"(",
"25",
",",
"80",
")",
"return",
"int",
"(",
"cr",
"[",
"1",
"]",
")"
] | Attempt to discern the width of the terminal | [
"Attempt",
"to",
"discern",
"the",
"width",
"of",
"the",
"terminal"
] | [
"'Attempt to discern the width of the terminal'",
"# This may not work on all platforms, in which case the default of 80",
"# characters is used. Improvements welcomed.",
"# Default"
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | import fcntl
import sys
import struct
import os
import termios
def _get_term_width():
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = None
if not cr:
try:
cr = (os.environ['SVN_MAKE_CHECK_LINES'],
os.environ['SVN_MAKE_CHECK_COLUMNS'])
except:
cr = None
if not cr:
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
cr = None
if not cr:
if sys.platform == 'win32':
cr = (25, 79)
else:
cr = (25, 80)
return int(cr[1]) | 88 | 31 |
3a04cc0858ea1984ea4e383ce4809c47e34ed64d | jkrukoff/Exercism | python/pangram/pangram.py | [
"MIT"
] | Python | is_pangram | bool | def is_pangram(sentence: str) -> bool:
"""
Determine if a given string contains all the characters from a to z.
sentence -- Any string.
returns -- true/false for if string contains all letters from a to z.
"""
letters = set(string.ascii_lowercase)
return letters.issubset(sentence.lower()) |
Determine if a given string contains all the characters from a to z.
sentence -- Any string.
returns -- true/false for if string contains all letters from a to z.
| Determine if a given string contains all the characters from a to z.
sentence -- Any string.
returns -- true/false for if string contains all letters from a to z. | [
"Determine",
"if",
"a",
"given",
"string",
"contains",
"all",
"the",
"characters",
"from",
"a",
"to",
"z",
".",
"sentence",
"--",
"Any",
"string",
".",
"returns",
"--",
"true",
"/",
"false",
"for",
"if",
"string",
"contains",
"all",
"letters",
"from",
"a",
"to",
"z",
"."
] | def is_pangram(sentence: str) -> bool:
letters = set(string.ascii_lowercase)
return letters.issubset(sentence.lower()) | [
"def",
"is_pangram",
"(",
"sentence",
":",
"str",
")",
"->",
"bool",
":",
"letters",
"=",
"set",
"(",
"string",
".",
"ascii_lowercase",
")",
"return",
"letters",
".",
"issubset",
"(",
"sentence",
".",
"lower",
"(",
")",
")"
] | Determine if a given string contains all the characters from a to z.
sentence -- Any string. | [
"Determine",
"if",
"a",
"given",
"string",
"contains",
"all",
"the",
"characters",
"from",
"a",
"to",
"z",
".",
"sentence",
"--",
"Any",
"string",
"."
] | [
"\"\"\"\n Determine if a given string contains all the characters from a to z.\n\n sentence -- Any string.\n returns -- true/false for if string contains all letters from a to z.\n \"\"\""
] | [
{
"param": "sentence",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "sentence",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import string
def is_pangram(sentence: str) -> bool:
letters = set(string.ascii_lowercase)
return letters.issubset(sentence.lower()) | 89 | 646 |
cfec1b5ed01d104807ec2c5af1d813be7e96110d | CSeq/manticore | manticore/core/cpu/x86.py | [
"Apache-2.0"
] | Python | TEST | null | def TEST(cpu, src1, src2):
'''
Logical compare.
Computes the bit-wise logical AND of first operand (source 1 operand)
and the second operand (source 2 operand) and sets the SF, ZF, and PF
status flags according to the result. The result is then discarded::
TEMP = SRC1 AND SRC2;
SF = MSB(TEMP);
IF TEMP = 0
THEN ZF = 1;
ELSE ZF = 0;
FI:
PF = BitwiseXNOR(TEMP[0:7]);
CF = 0;
OF = 0;
(*AF is Undefined*)
:param cpu: current CPU.
:param src1: first operand.
:param src2: second operand.
'''
#Defined Flags: szp
temp = src1.read() & src2.read();
cpu.SF = (temp & (1 << (src1.size-1))) != 0
cpu.ZF = temp == 0
cpu.PF = cpu._calculate_parity_flag(temp)
cpu.CF = False
cpu.OF = False |
Logical compare.
Computes the bit-wise logical AND of first operand (source 1 operand)
and the second operand (source 2 operand) and sets the SF, ZF, and PF
status flags according to the result. The result is then discarded::
TEMP = SRC1 AND SRC2;
SF = MSB(TEMP);
IF TEMP = 0
THEN ZF = 1;
ELSE ZF = 0;
FI:
PF = BitwiseXNOR(TEMP[0:7]);
CF = 0;
OF = 0;
(*AF is Undefined*)
:param cpu: current CPU.
:param src1: first operand.
:param src2: second operand.
| Logical compare.
Computes the bit-wise logical AND of first operand (source 1 operand)
and the second operand (source 2 operand) and sets the SF, ZF, and PF
status flags according to the result. The result is then discarded:.
| [
"Logical",
"compare",
".",
"Computes",
"the",
"bit",
"-",
"wise",
"logical",
"AND",
"of",
"first",
"operand",
"(",
"source",
"1",
"operand",
")",
"and",
"the",
"second",
"operand",
"(",
"source",
"2",
"operand",
")",
"and",
"sets",
"the",
"SF",
"ZF",
"and",
"PF",
"status",
"flags",
"according",
"to",
"the",
"result",
".",
"The",
"result",
"is",
"then",
"discarded",
":",
"."
] | def TEST(cpu, src1, src2):
temp = src1.read() & src2.read();
cpu.SF = (temp & (1 << (src1.size-1))) != 0
cpu.ZF = temp == 0
cpu.PF = cpu._calculate_parity_flag(temp)
cpu.CF = False
cpu.OF = False | [
"def",
"TEST",
"(",
"cpu",
",",
"src1",
",",
"src2",
")",
":",
"temp",
"=",
"src1",
".",
"read",
"(",
")",
"&",
"src2",
".",
"read",
"(",
")",
";",
"cpu",
".",
"SF",
"=",
"(",
"temp",
"&",
"(",
"1",
"<<",
"(",
"src1",
".",
"size",
"-",
"1",
")",
")",
")",
"!=",
"0",
"cpu",
".",
"ZF",
"=",
"temp",
"==",
"0",
"cpu",
".",
"PF",
"=",
"cpu",
".",
"_calculate_parity_flag",
"(",
"temp",
")",
"cpu",
".",
"CF",
"=",
"False",
"cpu",
".",
"OF",
"=",
"False"
] | Logical compare. | [
"Logical",
"compare",
"."
] | [
"'''\n Logical compare. \n \n Computes the bit-wise logical AND of first operand (source 1 operand) \n and the second operand (source 2 operand) and sets the SF, ZF, and PF \n status flags according to the result. The result is then discarded::\n\n TEMP = SRC1 AND SRC2;\n SF = MSB(TEMP);\n IF TEMP = 0\n THEN ZF = 1;\n ELSE ZF = 0;\n FI:\n PF = BitwiseXNOR(TEMP[0:7]);\n CF = 0;\n OF = 0;\n (*AF is Undefined*)\n \n :param cpu: current CPU. \n :param src1: first operand.\n :param src2: second operand.\n '''",
"#Defined Flags: szp"
] | [
{
"param": "cpu",
"type": null
},
{
"param": "src1",
"type": null
},
{
"param": "src2",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cpu",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "src1",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "src2",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def TEST(cpu, src1, src2):
temp = src1.read() & src2.read();
cpu.SF = (temp & (1 << (src1.size-1))) != 0
cpu.ZF = temp == 0
cpu.PF = cpu._calculate_parity_flag(temp)
cpu.CF = False
cpu.OF = False | 90 | 237 |
cceb367304af50013cb611d35ef35ece567c7c9d | jennyfong/pythonbits | file_groups.py | [
"MIT"
] | Python | numRange | <not_specific> | def numRange(numList):
"""
Get the first and last number in the list
"""
start = numList[0]
end = numList[-1]
if start != end:
return "{}-{}".format(start, end)
else:
return "{}".format(start) |
Get the first and last number in the list
| Get the first and last number in the list | [
"Get",
"the",
"first",
"and",
"last",
"number",
"in",
"the",
"list"
] | def numRange(numList):
start = numList[0]
end = numList[-1]
if start != end:
return "{}-{}".format(start, end)
else:
return "{}".format(start) | [
"def",
"numRange",
"(",
"numList",
")",
":",
"start",
"=",
"numList",
"[",
"0",
"]",
"end",
"=",
"numList",
"[",
"-",
"1",
"]",
"if",
"start",
"!=",
"end",
":",
"return",
"\"{}-{}\"",
".",
"format",
"(",
"start",
",",
"end",
")",
"else",
":",
"return",
"\"{}\"",
".",
"format",
"(",
"start",
")"
] | Get the first and last number in the list | [
"Get",
"the",
"first",
"and",
"last",
"number",
"in",
"the",
"list"
] | [
"\"\"\"\n Get the first and last number in the list\n \"\"\""
] | [
{
"param": "numList",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "numList",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def numRange(numList):
start = numList[0]
end = numList[-1]
if start != end:
return "{}-{}".format(start, end)
else:
return "{}".format(start) | 91 | 603 |
a3c8d79203071fbd0a3231ff242144dce321cc3a | akapila011/Text-to-Image | text_to_image/utilities.py | [
"MIT"
] | Python | convert_char_to_int | <not_specific> | def convert_char_to_int(char, limit=256):
"""
Take a character and return an integer value while ensuring the values returned do not go above the limit.
:param str char: A single character.
:param int limit: An integer representing the largest value which will start requiring reducing char value.
(default=256).
:return int: A number between 1 (0=NULL) and the limit that represents the int value of the char.
"""
value = ord(char) % limit
value = 1 if value == 0 else value
return value |
Take a character and return an integer value while ensuring the values returned do not go above the limit.
:param str char: A single character.
:param int limit: An integer representing the largest value which will start requiring reducing char value.
(default=256).
:return int: A number between 1 (0=NULL) and the limit that represents the int value of the char.
| Take a character and return an integer value while ensuring the values returned do not go above the limit. | [
"Take",
"a",
"character",
"and",
"return",
"an",
"integer",
"value",
"while",
"ensuring",
"the",
"values",
"returned",
"do",
"not",
"go",
"above",
"the",
"limit",
"."
] | def convert_char_to_int(char, limit=256):
value = ord(char) % limit
value = 1 if value == 0 else value
return value | [
"def",
"convert_char_to_int",
"(",
"char",
",",
"limit",
"=",
"256",
")",
":",
"value",
"=",
"ord",
"(",
"char",
")",
"%",
"limit",
"value",
"=",
"1",
"if",
"value",
"==",
"0",
"else",
"value",
"return",
"value"
] | Take a character and return an integer value while ensuring the values returned do not go above the limit. | [
"Take",
"a",
"character",
"and",
"return",
"an",
"integer",
"value",
"while",
"ensuring",
"the",
"values",
"returned",
"do",
"not",
"go",
"above",
"the",
"limit",
"."
] | [
"\"\"\"\n Take a character and return an integer value while ensuring the values returned do not go above the limit.\n :param str char: A single character.\n :param int limit: An integer representing the largest value which will start requiring reducing char value.\n (default=256).\n :return int: A number between 1 (0=NULL) and the limit that represents the int value of the char.\n \"\"\""
] | [
{
"param": "char",
"type": null
},
{
"param": "limit",
"type": null
}
] | {
"returns": [
{
"docstring": "A number between 1 (0=NULL) and the limit that represents the int value of the char.",
"docstring_tokens": [
"A",
"number",
"between",
"1",
"(",
"0",
"=",
"NULL",
")",
"and",
"the",
"limit",
"that",
"represents",
"the",
"int",
"value",
"of",
"the",
"char",
"."
],
"type": "int"
}
],
"raises": [],
"params": [
{
"identifier": "char",
"type": null,
"docstring": "A single character.",
"docstring_tokens": [
"A",
"single",
"character",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "limit",
"type": null,
"docstring": "An integer representing the largest value which will start requiring reducing char value.",
"docstring_tokens": [
"An",
"integer",
"representing",
"the",
"largest",
"value",
"which",
"will",
"start",
"requiring",
"reducing",
"char",
"value",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def convert_char_to_int(char, limit=256):
value = ord(char) % limit
value = 1 if value == 0 else value
return value | 92 | 384 |
d73c866df2d6c0c82eec69683313d6c98803488a | mtholder/peyotl | peyotl/__init__.py | [
"BSD-2-Clause"
] | Python | iter_tree | null | def iter_tree(nex_obj):
"""Generator over each tree object in the NexSON object."""
for tb in nex_obj.get('trees', []):
for tree in tb.get('tree', []):
yield tree | Generator over each tree object in the NexSON object. | Generator over each tree object in the NexSON object. | [
"Generator",
"over",
"each",
"tree",
"object",
"in",
"the",
"NexSON",
"object",
"."
] | def iter_tree(nex_obj):
for tb in nex_obj.get('trees', []):
for tree in tb.get('tree', []):
yield tree | [
"def",
"iter_tree",
"(",
"nex_obj",
")",
":",
"for",
"tb",
"in",
"nex_obj",
".",
"get",
"(",
"'trees'",
",",
"[",
"]",
")",
":",
"for",
"tree",
"in",
"tb",
".",
"get",
"(",
"'tree'",
",",
"[",
"]",
")",
":",
"yield",
"tree"
] | Generator over each tree object in the NexSON object. | [
"Generator",
"over",
"each",
"tree",
"object",
"in",
"the",
"NexSON",
"object",
"."
] | [
"\"\"\"Generator over each tree object in the NexSON object.\"\"\""
] | [
{
"param": "nex_obj",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "nex_obj",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def iter_tree(nex_obj):
for tb in nex_obj.get('trees', []):
for tree in tb.get('tree', []):
yield tree | 93 | 706 |
5cc205938fb7dd2eef8c9924bb92b3d1fb712255 | erwanp/pyspeckit | pyspeckit/spectrum/plotters.py | [
"MIT",
"BSD-3-Clause"
] | Python | parse_norm | <not_specific> | def parse_norm(norm):
"""
Expected format: norm = 10E15
"""
try:
base, exp = norm.split('E')
except ValueError:
base, exp = norm.split('e')
if float(base) == 1.0:
norm = '10'
else:
norm = base
norm += '^{%s}' % exp
return norm |
Expected format: norm = 10E15
| Expected format: norm = 10E15 | [
"Expected",
"format",
":",
"norm",
"=",
"10E15"
] | def parse_norm(norm):
try:
base, exp = norm.split('E')
except ValueError:
base, exp = norm.split('e')
if float(base) == 1.0:
norm = '10'
else:
norm = base
norm += '^{%s}' % exp
return norm | [
"def",
"parse_norm",
"(",
"norm",
")",
":",
"try",
":",
"base",
",",
"exp",
"=",
"norm",
".",
"split",
"(",
"'E'",
")",
"except",
"ValueError",
":",
"base",
",",
"exp",
"=",
"norm",
".",
"split",
"(",
"'e'",
")",
"if",
"float",
"(",
"base",
")",
"==",
"1.0",
":",
"norm",
"=",
"'10'",
"else",
":",
"norm",
"=",
"base",
"norm",
"+=",
"'^{%s}'",
"%",
"exp",
"return",
"norm"
] | Expected format: norm = 10E15 | [
"Expected",
"format",
":",
"norm",
"=",
"10E15"
] | [
"\"\"\"\n Expected format: norm = 10E15\n \"\"\""
] | [
{
"param": "norm",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "norm",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def parse_norm(norm):
try:
base, exp = norm.split('E')
except ValueError:
base, exp = norm.split('e')
if float(base) == 1.0:
norm = '10'
else:
norm = base
norm += '^{%s}' % exp
return norm | 94 | 609 |
e9db6a6856a6ea3fe010f6fed54a060e5f1f996e | dialoguemd/pypred | pypred/util.py | [
"BSD-3-Clause"
] | Python | harmonic_mean | <not_specific> | def harmonic_mean(lst):
"Returns the harmonic mean. Will crash if any value is zero."
n = len(lst)
inv_sum = sum((1.0 / x) for x in lst)
return (1.0 / ((1.0 / n) * inv_sum)) | Returns the harmonic mean. Will crash if any value is zero. | Returns the harmonic mean. Will crash if any value is zero. | [
"Returns",
"the",
"harmonic",
"mean",
".",
"Will",
"crash",
"if",
"any",
"value",
"is",
"zero",
"."
] | def harmonic_mean(lst):
n = len(lst)
inv_sum = sum((1.0 / x) for x in lst)
return (1.0 / ((1.0 / n) * inv_sum)) | [
"def",
"harmonic_mean",
"(",
"lst",
")",
":",
"n",
"=",
"len",
"(",
"lst",
")",
"inv_sum",
"=",
"sum",
"(",
"(",
"1.0",
"/",
"x",
")",
"for",
"x",
"in",
"lst",
")",
"return",
"(",
"1.0",
"/",
"(",
"(",
"1.0",
"/",
"n",
")",
"*",
"inv_sum",
")",
")"
] | Returns the harmonic mean. | [
"Returns",
"the",
"harmonic",
"mean",
"."
] | [
"\"Returns the harmonic mean. Will crash if any value is zero.\""
] | [
{
"param": "lst",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "lst",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def harmonic_mean(lst):
n = len(lst)
inv_sum = sum((1.0 / x) for x in lst)
return (1.0 / ((1.0 / n) * inv_sum)) | 96 | 845 |
2d122810280dbaf3cd0bf286636fcd56a6b6a47a | scivision/asyncio-subprocess-examples | src/asyncio_subprocess_examples/__init__.py | [
"MIT"
] | Python | fortran_compiler_ok_sync | typing.Tuple[str, bool] | def fortran_compiler_ok_sync(compiler: str, name: str, src: str) -> typing.Tuple[str, bool]:
"""check that Fortran compiler is able to compile a basic program"""
with tempfile.NamedTemporaryFile("w", suffix=".f90", delete=False) as f:
f.write(src)
logging.debug(f"testing {compiler} with source: {src}")
cmd = [compiler, f.name]
ret = subprocess.run(cmd, stderr=subprocess.DEVNULL)
# this and delete=False is necessary due to Windows no double-open file limitation
os.unlink(f.name)
return name, ret.returncode == 0 | check that Fortran compiler is able to compile a basic program | check that Fortran compiler is able to compile a basic program | [
"check",
"that",
"Fortran",
"compiler",
"is",
"able",
"to",
"compile",
"a",
"basic",
"program"
] | def fortran_compiler_ok_sync(compiler: str, name: str, src: str) -> typing.Tuple[str, bool]:
with tempfile.NamedTemporaryFile("w", suffix=".f90", delete=False) as f:
f.write(src)
logging.debug(f"testing {compiler} with source: {src}")
cmd = [compiler, f.name]
ret = subprocess.run(cmd, stderr=subprocess.DEVNULL)
os.unlink(f.name)
return name, ret.returncode == 0 | [
"def",
"fortran_compiler_ok_sync",
"(",
"compiler",
":",
"str",
",",
"name",
":",
"str",
",",
"src",
":",
"str",
")",
"->",
"typing",
".",
"Tuple",
"[",
"str",
",",
"bool",
"]",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"\"w\"",
",",
"suffix",
"=",
"\".f90\"",
",",
"delete",
"=",
"False",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"src",
")",
"logging",
".",
"debug",
"(",
"f\"testing {compiler} with source: {src}\"",
")",
"cmd",
"=",
"[",
"compiler",
",",
"f",
".",
"name",
"]",
"ret",
"=",
"subprocess",
".",
"run",
"(",
"cmd",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"os",
".",
"unlink",
"(",
"f",
".",
"name",
")",
"return",
"name",
",",
"ret",
".",
"returncode",
"==",
"0"
] | check that Fortran compiler is able to compile a basic program | [
"check",
"that",
"Fortran",
"compiler",
"is",
"able",
"to",
"compile",
"a",
"basic",
"program"
] | [
"\"\"\"check that Fortran compiler is able to compile a basic program\"\"\"",
"# this and delete=False is necessary due to Windows no double-open file limitation"
] | [
{
"param": "compiler",
"type": "str"
},
{
"param": "name",
"type": "str"
},
{
"param": "src",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "compiler",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "name",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "src",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import subprocess
import tempfile
import logging
import os
import typing
def fortran_compiler_ok_sync(compiler: str, name: str, src: str) -> typing.Tuple[str, bool]:
with tempfile.NamedTemporaryFile("w", suffix=".f90", delete=False) as f:
f.write(src)
logging.debug(f"testing {compiler} with source: {src}")
cmd = [compiler, f.name]
ret = subprocess.run(cmd, stderr=subprocess.DEVNULL)
os.unlink(f.name)
return name, ret.returncode == 0 | 97 | 926 |
7a4b77500026829e09347d9b3969e9a4c978a15e | liam-i/firebase-ios-sdk | Firestore/Protos/build_protos.py | [
"Apache-2.0"
] | Python | cpp_rename_in | <not_specific> | def cpp_rename_in(lines):
"""Renames an IN symbol to IN_.
If a proto uses a enum member named 'IN', protobuf happily uses that in the
message definition. This conflicts with the IN parameter annotation macro in
windows.h.
Args:
lines: The lines to fix.
Returns:
The lines, fixed.
"""
in_macro = re.compile(r'\bIN\b')
return [in_macro.sub('IN_', line) for line in lines] | Renames an IN symbol to IN_.
If a proto uses a enum member named 'IN', protobuf happily uses that in the
message definition. This conflicts with the IN parameter annotation macro in
windows.h.
Args:
lines: The lines to fix.
Returns:
The lines, fixed.
| Renames an IN symbol to IN_.
If a proto uses a enum member named 'IN', protobuf happily uses that in the
message definition. This conflicts with the IN parameter annotation macro in
windows.h. | [
"Renames",
"an",
"IN",
"symbol",
"to",
"IN_",
".",
"If",
"a",
"proto",
"uses",
"a",
"enum",
"member",
"named",
"'",
"IN",
"'",
"protobuf",
"happily",
"uses",
"that",
"in",
"the",
"message",
"definition",
".",
"This",
"conflicts",
"with",
"the",
"IN",
"parameter",
"annotation",
"macro",
"in",
"windows",
".",
"h",
"."
] | def cpp_rename_in(lines):
in_macro = re.compile(r'\bIN\b')
return [in_macro.sub('IN_', line) for line in lines] | [
"def",
"cpp_rename_in",
"(",
"lines",
")",
":",
"in_macro",
"=",
"re",
".",
"compile",
"(",
"r'\\bIN\\b'",
")",
"return",
"[",
"in_macro",
".",
"sub",
"(",
"'IN_'",
",",
"line",
")",
"for",
"line",
"in",
"lines",
"]"
] | Renames an IN symbol to IN_. | [
"Renames",
"an",
"IN",
"symbol",
"to",
"IN_",
"."
] | [
"\"\"\"Renames an IN symbol to IN_.\n\n If a proto uses a enum member named 'IN', protobuf happily uses that in the\n message definition. This conflicts with the IN parameter annotation macro in\n windows.h.\n\n Args:\n lines: The lines to fix.\n\n Returns:\n The lines, fixed.\n \"\"\""
] | [
{
"param": "lines",
"type": null
}
] | {
"returns": [
{
"docstring": "The lines, fixed.",
"docstring_tokens": [
"The",
"lines",
"fixed",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "lines",
"type": null,
"docstring": "The lines to fix.",
"docstring_tokens": [
"The",
"lines",
"to",
"fix",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
def cpp_rename_in(lines):
in_macro = re.compile(r'\bIN\b')
return [in_macro.sub('IN_', line) for line in lines] | 98 | 618 |
9e4e5a5a2028afec1fb905195a7b2b3b874ff7ca | EsoNes0/dork | dork/types.py | [
"MIT"
] | Python | use | <not_specific> | def use(target, name):
"""Useless use method
This method is called by filler items to be used
Concrete unusable item class use method
Args:
target (Player): passes Player as target to be used on
name (str): passed as name of item used
returns:
blurb (str): returns "You find no use of this item"
"""
return "You find no use of this item" | Useless use method
This method is called by filler items to be used
Concrete unusable item class use method
Args:
target (Player): passes Player as target to be used on
name (str): passed as name of item used
returns:
blurb (str): returns "You find no use of this item"
| Useless use method
This method is called by filler items to be used
Concrete unusable item class use method | [
"Useless",
"use",
"method",
"This",
"method",
"is",
"called",
"by",
"filler",
"items",
"to",
"be",
"used",
"Concrete",
"unusable",
"item",
"class",
"use",
"method"
] | def use(target, name):
return "You find no use of this item" | [
"def",
"use",
"(",
"target",
",",
"name",
")",
":",
"return",
"\"You find no use of this item\""
] | Useless use method
This method is called by filler items to be used | [
"Useless",
"use",
"method",
"This",
"method",
"is",
"called",
"by",
"filler",
"items",
"to",
"be",
"used"
] | [
"\"\"\"Useless use method\n\n This method is called by filler items to be used\n\n Concrete unusable item class use method\n\n Args:\n target (Player): passes Player as target to be used on\n name (str): passed as name of item used\n\n returns:\n blurb (str): returns \"You find no use of this item\"\n\n \"\"\""
] | [
{
"param": "target",
"type": null
},
{
"param": "name",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "target",
"type": null,
"docstring": "passes Player as target to be used on",
"docstring_tokens": [
"passes",
"Player",
"as",
"target",
"to",
"be",
"used",
"on"
],
"default": null,
"is_optional": false
},
{
"identifier": "name",
"type": null,
"docstring": "passed as name of item used",
"docstring_tokens": [
"passed",
"as",
"name",
"of",
"item",
"used"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def use(target, name):
return "You find no use of this item" | 99 | 1,006 |
74b245113943f48daaed839f4beea4a217a7a9fe | c3e/gridpy | gridpy.py | [
"MIT"
] | Python | split_in_2bytes | <not_specific> | def split_in_2bytes(value):
"""
Returns a tuple with 2 integers (upper,lower) matching the according bytes
The AMG88 usually uses 2 byte to store 12bit values.
"""
upper = value >> 9
lower = 0b011111111 & value
return (upper, lower) |
Returns a tuple with 2 integers (upper,lower) matching the according bytes
The AMG88 usually uses 2 byte to store 12bit values.
| Returns a tuple with 2 integers (upper,lower) matching the according bytes
The AMG88 usually uses 2 byte to store 12bit values. | [
"Returns",
"a",
"tuple",
"with",
"2",
"integers",
"(",
"upper",
"lower",
")",
"matching",
"the",
"according",
"bytes",
"The",
"AMG88",
"usually",
"uses",
"2",
"byte",
"to",
"store",
"12bit",
"values",
"."
] | def split_in_2bytes(value):
upper = value >> 9
lower = 0b011111111 & value
return (upper, lower) | [
"def",
"split_in_2bytes",
"(",
"value",
")",
":",
"upper",
"=",
"value",
">>",
"9",
"lower",
"=",
"0b011111111",
"&",
"value",
"return",
"(",
"upper",
",",
"lower",
")"
] | Returns a tuple with 2 integers (upper,lower) matching the according bytes
The AMG88 usually uses 2 byte to store 12bit values. | [
"Returns",
"a",
"tuple",
"with",
"2",
"integers",
"(",
"upper",
"lower",
")",
"matching",
"the",
"according",
"bytes",
"The",
"AMG88",
"usually",
"uses",
"2",
"byte",
"to",
"store",
"12bit",
"values",
"."
] | [
"\"\"\"\n Returns a tuple with 2 integers (upper,lower) matching the according bytes\n The AMG88 usually uses 2 byte to store 12bit values.\n \"\"\""
] | [
{
"param": "value",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def split_in_2bytes(value):
upper = value >> 9
lower = 0b011111111 & value
return (upper, lower) | 100 | 20 |
2e09acbff09afa7a8f17170f3121237522f9568d | aidan-lane/GMFinalProject | server/main.py | [
"MIT"
] | Python | cardinal_direction | <not_specific> | def cardinal_direction(b):
""" Calculate the cardinal direction for a given bearing.
Ex: 0 is 'North
Args:
b: bearing (in degrees)
Returns:
A string representing cardinal direction
"""
dirs = ["North", "North-East", "East", "South-East", "South", "South-West", "West",
"North-West"]
degree = 337.5
for dir in dirs:
if b >= degree and b < degree + 45:
return dir
if degree + 45 >= 360:
degree = 22.5
else:
degree += 45
return None | Calculate the cardinal direction for a given bearing.
Ex: 0 is 'North
Args:
b: bearing (in degrees)
Returns:
A string representing cardinal direction
| Calculate the cardinal direction for a given bearing.
Ex: 0 is 'North | [
"Calculate",
"the",
"cardinal",
"direction",
"for",
"a",
"given",
"bearing",
".",
"Ex",
":",
"0",
"is",
"'",
"North"
] | def cardinal_direction(b):
dirs = ["North", "North-East", "East", "South-East", "South", "South-West", "West",
"North-West"]
degree = 337.5
for dir in dirs:
if b >= degree and b < degree + 45:
return dir
if degree + 45 >= 360:
degree = 22.5
else:
degree += 45
return None | [
"def",
"cardinal_direction",
"(",
"b",
")",
":",
"dirs",
"=",
"[",
"\"North\"",
",",
"\"North-East\"",
",",
"\"East\"",
",",
"\"South-East\"",
",",
"\"South\"",
",",
"\"South-West\"",
",",
"\"West\"",
",",
"\"North-West\"",
"]",
"degree",
"=",
"337.5",
"for",
"dir",
"in",
"dirs",
":",
"if",
"b",
">=",
"degree",
"and",
"b",
"<",
"degree",
"+",
"45",
":",
"return",
"dir",
"if",
"degree",
"+",
"45",
">=",
"360",
":",
"degree",
"=",
"22.5",
"else",
":",
"degree",
"+=",
"45",
"return",
"None"
] | Calculate the cardinal direction for a given bearing. | [
"Calculate",
"the",
"cardinal",
"direction",
"for",
"a",
"given",
"bearing",
"."
] | [
"\"\"\" Calculate the cardinal direction for a given bearing.\n\n Ex: 0 is 'North\n\n Args:\n b: bearing (in degrees)\n\n Returns:\n A string representing cardinal direction\n \"\"\""
] | [
{
"param": "b",
"type": null
}
] | {
"returns": [
{
"docstring": "A string representing cardinal direction",
"docstring_tokens": [
"A",
"string",
"representing",
"cardinal",
"direction"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "b",
"type": null,
"docstring": "bearing (in degrees)",
"docstring_tokens": [
"bearing",
"(",
"in",
"degrees",
")"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def cardinal_direction(b):
dirs = ["North", "North-East", "East", "South-East", "South", "South-West", "West",
"North-West"]
degree = 337.5
for dir in dirs:
if b >= degree and b < degree + 45:
return dir
if degree + 45 >= 360:
degree = 22.5
else:
degree += 45
return None | 101 | 29 |
8c8ef114651c3d6d6990d715e2a9605ca646ee43 | avcopan/elstruct-interface | elstruct/writer/storage/__init__.py | [
"Apache-2.0"
] | Python | single_val_writer | <not_specific> | def single_val_writer(file_name, output_string):
""" Writes a single float value to the file.
"""
with open(file_name, 'w') as info_file:
info_file.write( )
return None | Writes a single float value to the file.
| Writes a single float value to the file. | [
"Writes",
"a",
"single",
"float",
"value",
"to",
"the",
"file",
"."
] | def single_val_writer(file_name, output_string):
with open(file_name, 'w') as info_file:
info_file.write( )
return None | [
"def",
"single_val_writer",
"(",
"file_name",
",",
"output_string",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"'w'",
")",
"as",
"info_file",
":",
"info_file",
".",
"write",
"(",
")",
"return",
"None"
] | Writes a single float value to the file. | [
"Writes",
"a",
"single",
"float",
"value",
"to",
"the",
"file",
"."
] | [
"\"\"\" Writes a single float value to the file.\n \"\"\""
] | [
{
"param": "file_name",
"type": null
},
{
"param": "output_string",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "file_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "output_string",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def single_val_writer(file_name, output_string):
with open(file_name, 'w') as info_file:
info_file.write( )
return None | 102 | 389 |
de2877a0a841293fa84498df50935e88f30a931d | wbazant/kneaddata | kneaddata/read_count_table_concat_pairs.py | [
"MIT"
] | Python | parse_arguments | <not_specific> | def parse_arguments(args):
""" Parse the arguments from the user"""
parser=argparse.ArgumentParser(
description="Count paired/orphan reads from kneaddata run on a set of paired end reads concatenated into a single file.")
parser.add_argument("--input", help="The folder of kneaddata output files for all samples", required=True)
parser.add_argument("--output", help="The output file of reads to write.", required=True)
return parser.parse_args() | Parse the arguments from the user | Parse the arguments from the user | [
"Parse",
"the",
"arguments",
"from",
"the",
"user"
] | def parse_arguments(args):
parser=argparse.ArgumentParser(
description="Count paired/orphan reads from kneaddata run on a set of paired end reads concatenated into a single file.")
parser.add_argument("--input", help="The folder of kneaddata output files for all samples", required=True)
parser.add_argument("--output", help="The output file of reads to write.", required=True)
return parser.parse_args() | [
"def",
"parse_arguments",
"(",
"args",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Count paired/orphan reads from kneaddata run on a set of paired end reads concatenated into a single file.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--input\"",
",",
"help",
"=",
"\"The folder of kneaddata output files for all samples\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--output\"",
",",
"help",
"=",
"\"The output file of reads to write.\"",
",",
"required",
"=",
"True",
")",
"return",
"parser",
".",
"parse_args",
"(",
")"
] | Parse the arguments from the user | [
"Parse",
"the",
"arguments",
"from",
"the",
"user"
] | [
"\"\"\" Parse the arguments from the user\"\"\""
] | [
{
"param": "args",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "args",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import argparse
def parse_arguments(args):
parser=argparse.ArgumentParser(
description="Count paired/orphan reads from kneaddata run on a set of paired end reads concatenated into a single file.")
parser.add_argument("--input", help="The folder of kneaddata output files for all samples", required=True)
parser.add_argument("--output", help="The output file of reads to write.", required=True)
return parser.parse_args() | 103 | 116 |
6693540ea470157df5566140677a3ebb2745f363 | delvor64/practices-of-the-python-pro | ch03/ex_01_greeter.py | [
"MIT"
] | Python | part_of_day | <not_specific> | def part_of_day():
'''
Determine which part of the day to greet the customer with.
'''
hour = datetime.datetime.now().hour
if hour < 12:
part = 'Morning'
elif hour < 17:
part = 'Afternoon'
else:
part = 'Evening'
return part |
Determine which part of the day to greet the customer with.
| Determine which part of the day to greet the customer with. | [
"Determine",
"which",
"part",
"of",
"the",
"day",
"to",
"greet",
"the",
"customer",
"with",
"."
] | def part_of_day():
hour = datetime.datetime.now().hour
if hour < 12:
part = 'Morning'
elif hour < 17:
part = 'Afternoon'
else:
part = 'Evening'
return part | [
"def",
"part_of_day",
"(",
")",
":",
"hour",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"hour",
"if",
"hour",
"<",
"12",
":",
"part",
"=",
"'Morning'",
"elif",
"hour",
"<",
"17",
":",
"part",
"=",
"'Afternoon'",
"else",
":",
"part",
"=",
"'Evening'",
"return",
"part"
] | Determine which part of the day to greet the customer with. | [
"Determine",
"which",
"part",
"of",
"the",
"day",
"to",
"greet",
"the",
"customer",
"with",
"."
] | [
"'''\n Determine which part of the day to greet the customer with.\n '''"
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | import datetime
def part_of_day():
hour = datetime.datetime.now().hour
if hour < 12:
part = 'Morning'
elif hour < 17:
part = 'Afternoon'
else:
part = 'Evening'
return part | 104 | 126 |
3155603a10a0d361d6a7ad339ad45f7b79ae0247 | bernhardreiter/OpenSlides | server/openslides/core/migrations/0002_misc_features.py | [
"MIT"
] | Python | remove_old_countdowns_messages | null | def remove_old_countdowns_messages(apps, schema_editor):
"""
Remove old countdowns and messages created by 2.0 from projector elements which are unusable in 2.1.
"""
Projector = apps.get_model("core", "Projector")
projector = Projector.objects.get(pk=1)
projector_config = projector.config
for key, value in list(projector.config.items()):
if value.get("name") in ("core/countdown", "core/message"):
del projector_config[key]
projector.config = projector_config
projector.save(skip_autoupdate=True) |
Remove old countdowns and messages created by 2.0 from projector elements which are unusable in 2.1.
| Remove old countdowns and messages created by 2.0 from projector elements which are unusable in 2.1. | [
"Remove",
"old",
"countdowns",
"and",
"messages",
"created",
"by",
"2",
".",
"0",
"from",
"projector",
"elements",
"which",
"are",
"unusable",
"in",
"2",
".",
"1",
"."
] | def remove_old_countdowns_messages(apps, schema_editor):
Projector = apps.get_model("core", "Projector")
projector = Projector.objects.get(pk=1)
projector_config = projector.config
for key, value in list(projector.config.items()):
if value.get("name") in ("core/countdown", "core/message"):
del projector_config[key]
projector.config = projector_config
projector.save(skip_autoupdate=True) | [
"def",
"remove_old_countdowns_messages",
"(",
"apps",
",",
"schema_editor",
")",
":",
"Projector",
"=",
"apps",
".",
"get_model",
"(",
"\"core\"",
",",
"\"Projector\"",
")",
"projector",
"=",
"Projector",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"1",
")",
"projector_config",
"=",
"projector",
".",
"config",
"for",
"key",
",",
"value",
"in",
"list",
"(",
"projector",
".",
"config",
".",
"items",
"(",
")",
")",
":",
"if",
"value",
".",
"get",
"(",
"\"name\"",
")",
"in",
"(",
"\"core/countdown\"",
",",
"\"core/message\"",
")",
":",
"del",
"projector_config",
"[",
"key",
"]",
"projector",
".",
"config",
"=",
"projector_config",
"projector",
".",
"save",
"(",
"skip_autoupdate",
"=",
"True",
")"
] | Remove old countdowns and messages created by 2.0 from projector elements which are unusable in 2.1. | [
"Remove",
"old",
"countdowns",
"and",
"messages",
"created",
"by",
"2",
".",
"0",
"from",
"projector",
"elements",
"which",
"are",
"unusable",
"in",
"2",
".",
"1",
"."
] | [
"\"\"\"\n Remove old countdowns and messages created by 2.0 from projector elements which are unusable in 2.1.\n \"\"\""
] | [
{
"param": "apps",
"type": null
},
{
"param": "schema_editor",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "apps",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "schema_editor",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def remove_old_countdowns_messages(apps, schema_editor):
Projector = apps.get_model("core", "Projector")
projector = Projector.objects.get(pk=1)
projector_config = projector.config
for key, value in list(projector.config.items()):
if value.get("name") in ("core/countdown", "core/message"):
del projector_config[key]
projector.config = projector_config
projector.save(skip_autoupdate=True) | 105 | 453 |
a7230b83935edfcbd4f7b042fd3841ef68ade640 | flothesof/rois-maudits-carte-interactive | make_map.py | [
"BSD-3-Clause"
] | Python | make_snippet | <not_specific> | def make_snippet(snippets, location):
"""Makes a colored html snippet."""
output = "<br>".join(sentence.replace(
location, f'<i style="background-color: yellow;">{location}</i>') for sentence in snippets)
return output | Makes a colored html snippet. | Makes a colored html snippet. | [
"Makes",
"a",
"colored",
"html",
"snippet",
"."
] | def make_snippet(snippets, location):
output = "<br>".join(sentence.replace(
location, f'<i style="background-color: yellow;">{location}</i>') for sentence in snippets)
return output | [
"def",
"make_snippet",
"(",
"snippets",
",",
"location",
")",
":",
"output",
"=",
"\"<br>\"",
".",
"join",
"(",
"sentence",
".",
"replace",
"(",
"location",
",",
"f'<i style=\"background-color: yellow;\">{location}</i>'",
")",
"for",
"sentence",
"in",
"snippets",
")",
"return",
"output"
] | Makes a colored html snippet. | [
"Makes",
"a",
"colored",
"html",
"snippet",
"."
] | [
"\"\"\"Makes a colored html snippet.\"\"\""
] | [
{
"param": "snippets",
"type": null
},
{
"param": "location",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "snippets",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "location",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def make_snippet(snippets, location):
output = "<br>".join(sentence.replace(
location, f'<i style="background-color: yellow;">{location}</i>') for sentence in snippets)
return output | 106 | 731 |
2602983113e7dc0d48af4bf30da65e854db2b336 | lboyd93/arcgis-runtime-samples-dotnet | tools/metadata_tools/process_metadata.py | [
"Apache-2.0"
] | Python | write_samples_toc | null | def write_samples_toc(platform_dir, relative_path_to_samples, samples_in_categories):
'''
sample_in_categories is a dictionary of categories, each key is a list of sample_metadata
platform_dir is where the readme.md file should be written
'''
readme_text = "# Table of contents\n\n"
for category in samples_in_categories.keys():
readme_text += f"## {category}\n\n"
for sample in samples_in_categories[category]:
entry_url = f"{relative_path_to_samples}/{sample.category}/{sample.formal_name}/readme.md"
entry_url = urllib.parse.quote(entry_url)
readme_text += f"* [{sample.friendly_name}]({entry_url}) - {sample.description}\n"
readme_text += "\n"
readme_path = os.path.join(platform_dir, "../..", "readme.md")
with open(readme_path, 'w+') as file:
file.write(readme_text) |
sample_in_categories is a dictionary of categories, each key is a list of sample_metadata
platform_dir is where the readme.md file should be written
| sample_in_categories is a dictionary of categories, each key is a list of sample_metadata
platform_dir is where the readme.md file should be written | [
"sample_in_categories",
"is",
"a",
"dictionary",
"of",
"categories",
"each",
"key",
"is",
"a",
"list",
"of",
"sample_metadata",
"platform_dir",
"is",
"where",
"the",
"readme",
".",
"md",
"file",
"should",
"be",
"written"
] | def write_samples_toc(platform_dir, relative_path_to_samples, samples_in_categories):
readme_text = "# Table of contents\n\n"
for category in samples_in_categories.keys():
readme_text += f"## {category}\n\n"
for sample in samples_in_categories[category]:
entry_url = f"{relative_path_to_samples}/{sample.category}/{sample.formal_name}/readme.md"
entry_url = urllib.parse.quote(entry_url)
readme_text += f"* [{sample.friendly_name}]({entry_url}) - {sample.description}\n"
readme_text += "\n"
readme_path = os.path.join(platform_dir, "../..", "readme.md")
with open(readme_path, 'w+') as file:
file.write(readme_text) | [
"def",
"write_samples_toc",
"(",
"platform_dir",
",",
"relative_path_to_samples",
",",
"samples_in_categories",
")",
":",
"readme_text",
"=",
"\"# Table of contents\\n\\n\"",
"for",
"category",
"in",
"samples_in_categories",
".",
"keys",
"(",
")",
":",
"readme_text",
"+=",
"f\"## {category}\\n\\n\"",
"for",
"sample",
"in",
"samples_in_categories",
"[",
"category",
"]",
":",
"entry_url",
"=",
"f\"{relative_path_to_samples}/{sample.category}/{sample.formal_name}/readme.md\"",
"entry_url",
"=",
"urllib",
".",
"parse",
".",
"quote",
"(",
"entry_url",
")",
"readme_text",
"+=",
"f\"* [{sample.friendly_name}]({entry_url}) - {sample.description}\\n\"",
"readme_text",
"+=",
"\"\\n\"",
"readme_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"platform_dir",
",",
"\"../..\"",
",",
"\"readme.md\"",
")",
"with",
"open",
"(",
"readme_path",
",",
"'w+'",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"readme_text",
")"
] | sample_in_categories is a dictionary of categories, each key is a list of sample_metadata
platform_dir is where the readme.md file should be written | [
"sample_in_categories",
"is",
"a",
"dictionary",
"of",
"categories",
"each",
"key",
"is",
"a",
"list",
"of",
"sample_metadata",
"platform_dir",
"is",
"where",
"the",
"readme",
".",
"md",
"file",
"should",
"be",
"written"
] | [
"'''\n sample_in_categories is a dictionary of categories, each key is a list of sample_metadata\n platform_dir is where the readme.md file should be written\n '''"
] | [
{
"param": "platform_dir",
"type": null
},
{
"param": "relative_path_to_samples",
"type": null
},
{
"param": "samples_in_categories",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "platform_dir",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "relative_path_to_samples",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "samples_in_categories",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import urllib
import os
def write_samples_toc(platform_dir, relative_path_to_samples, samples_in_categories):
readme_text = "# Table of contents\n\n"
for category in samples_in_categories.keys():
readme_text += f"## {category}\n\n"
for sample in samples_in_categories[category]:
entry_url = f"{relative_path_to_samples}/{sample.category}/{sample.formal_name}/readme.md"
entry_url = urllib.parse.quote(entry_url)
readme_text += f"* [{sample.friendly_name}]({entry_url}) - {sample.description}\n"
readme_text += "\n"
readme_path = os.path.join(platform_dir, "../..", "readme.md")
with open(readme_path, 'w+') as file:
file.write(readme_text) | 108 | 548 |