Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
100 | def to_python(self, value):
try:
value = self.conv.to_python(value)
except __HOLE__:
return None
else:
if value is not None:
return self.query.filter_by(id=value).first() | ValidationError | dataset/ETHPy150Open SmartTeleMax/iktomi/iktomi/unstable/forms/convs.py/ModelChoice.to_python |
101 | def get_object_label(self, obj):
label = OptionLabel(getattr(obj, self.title_field))
try:
label.published = obj.publish
except __HOLE__:
pass
return label | AttributeError | dataset/ETHPy150Open SmartTeleMax/iktomi/iktomi/unstable/forms/convs.py/ModelChoice.get_object_label |
102 | def memstr_to_kbytes(text):
""" Convert a memory text to it's value in kilobytes.
"""
kilo = 1024
units = dict(K=1, M=kilo, G=kilo ** 2)
try:
size = int(units[text[-1]] * float(text[:-1]))
except (KeyError, __HOLE__):
raise ValueError(
"Invalid literal for size give: %s (type %s) should be "
"alike '10G', '500M', '50K'." % (text, type(text))
)
return size | ValueError | dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/externals/joblib/disk.py/memstr_to_kbytes |
103 | def mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except __HOLE__ as e:
if e.errno != errno.EEXIST:
raise
# if a rmtree operation fails in rm_subdirs, wait for this much time (in secs),
# then retry once. if it still fails, raise the exception | OSError | dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/externals/joblib/disk.py/mkdirp |
104 | def _python_shell_default(python_shell, __pub_jid):
'''
Set python_shell default based on remote execution and __opts__['cmd_safe']
'''
try:
# Default to python_shell=True when run directly from remote execution
# system. Cross-module calls won't have a jid.
if __pub_jid and python_shell is None:
return True
elif __opts__.get('cmd_safe', True) is False and python_shell is None:
# Override-switch for python_shell
return True
except __HOLE__:
pass
return python_shell | NameError | dataset/ETHPy150Open saltstack/salt/salt/modules/cmdmod.py/_python_shell_default |
105 | def _chroot_pids(chroot):
pids = []
for root in glob.glob('/proc/[0-9]*/root'):
try:
link = os.path.realpath(root)
if link.startswith(chroot):
pids.append(int(os.path.basename(
os.path.dirname(root)
)))
except __HOLE__:
pass
return pids | OSError | dataset/ETHPy150Open saltstack/salt/salt/modules/cmdmod.py/_chroot_pids |
106 | def _check_loglevel(level='info', quiet=False):
'''
Retrieve the level code for use in logging.Logger.log().
'''
def _bad_level(level):
log.error(
'Invalid output_loglevel \'{0}\'. Valid levels are: {1}. Falling '
'back to \'info\'.'
.format(
level,
', '.join(
sorted(LOG_LEVELS, key=LOG_LEVELS.get, reverse=True)
)
)
)
return LOG_LEVELS['info']
if salt.utils.is_true(quiet) or str(level).lower() == 'quiet':
return None
try:
level = level.lower()
if level not in LOG_LEVELS:
return _bad_level(level)
except __HOLE__:
return _bad_level(level)
return LOG_LEVELS[level] | AttributeError | dataset/ETHPy150Open saltstack/salt/salt/modules/cmdmod.py/_check_loglevel |
107 | def _run(cmd,
cwd=None,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
output_loglevel='debug',
log_callback=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
clean_env=False,
rstrip=True,
template=None,
umask=None,
timeout=None,
with_communicate=True,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
pillarenv=None,
pillar_override=None,
use_vt=False,
password=None,
bg=False,
encoded_cmd=False,
**kwargs):
'''
Do the DRY thing and only call subprocess.Popen() once
'''
if _is_valid_shell(shell) is False:
log.warning(
'Attempt to run a shell command with what may be an invalid shell! '
'Check to ensure that the shell <{0}> is valid for this user.'
.format(shell))
log_callback = _check_cb(log_callback)
# Set the default working directory to the home directory of the user
# salt-minion is running as. Defaults to home directory of user under which
# the minion is running.
if not cwd:
cwd = os.path.expanduser('~{0}'.format('' if not runas else runas))
# make sure we can access the cwd
# when run from sudo or another environment where the euid is
# changed ~ will expand to the home of the original uid and
# the euid might not have access to it. See issue #1844
if not os.access(cwd, os.R_OK):
cwd = '/'
if salt.utils.is_windows():
cwd = os.tempnam()[:3]
else:
# Handle edge cases where numeric/other input is entered, and would be
# yaml-ified into non-string types
cwd = str(cwd)
if not salt.utils.is_windows():
if not os.path.isfile(shell) or not os.access(shell, os.X_OK):
msg = 'The shell {0} is not available'.format(shell)
raise CommandExecutionError(msg)
if salt.utils.is_windows() and use_vt: # Memozation so not much overhead
raise CommandExecutionError('VT not available on windows')
if shell.lower().strip() == 'powershell':
# If we were called by script(), then fakeout the Windows
# shell to run a Powershell script.
# Else just run a Powershell command.
stack = traceback.extract_stack(limit=2)
# extract_stack() returns a list of tuples.
# The last item in the list [-1] is the current method.
# The third item[2] in each tuple is the name of that method.
if stack[-2][2] == 'script':
cmd = 'Powershell -NonInteractive -ExecutionPolicy Bypass -File ' + cmd
elif encoded_cmd:
cmd = 'Powershell -NonInteractive -EncodedCommand {0}'.format(cmd)
else:
cmd = 'Powershell -NonInteractive "{0}"'.format(cmd.replace('"', '\\"'))
# munge the cmd and cwd through the template
(cmd, cwd) = _render_cmd(cmd, cwd, template, saltenv, pillarenv, pillar_override)
ret = {}
env = _parse_env(env)
for bad_env_key in (x for x, y in six.iteritems(env) if y is None):
log.error('Environment variable \'{0}\' passed without a value. '
'Setting value to an empty string'.format(bad_env_key))
env[bad_env_key] = ''
if runas and salt.utils.is_windows():
if not password:
msg = 'password is a required argument for runas on Windows'
raise CommandExecutionError(msg)
if not HAS_WIN_RUNAS:
msg = 'missing salt/utils/win_runas.py'
raise CommandExecutionError(msg)
if not isinstance(cmd, list):
cmd = salt.utils.shlex_split(cmd, posix=False)
cmd = ' '.join(cmd)
return win_runas(cmd, runas, password, cwd)
if runas:
# Save the original command before munging it
try:
pwd.getpwnam(runas)
except KeyError:
raise CommandExecutionError(
'User \'{0}\' is not available'.format(runas)
)
try:
# Getting the environment for the runas user
# There must be a better way to do this.
py_code = (
'import sys, os, itertools; '
'sys.stdout.write(\"\\0\".join(itertools.chain(*os.environ.items())))'
)
if __grains__['os'] in ['MacOS', 'Darwin']:
env_cmd = ('sudo', '-i', '-u', runas, '--',
sys.executable)
elif __grains__['os'] in ['FreeBSD']:
env_cmd = ('su', '-', runas, '-c',
"{0} -c {1}".format(shell, sys.executable))
elif __grains__['os_family'] in ['Solaris']:
env_cmd = ('su', '-', runas, '-c', sys.executable)
elif __grains__['os_family'] in ['AIX']:
env_cmd = ('su', runas, '-c', sys.executable)
else:
env_cmd = ('su', '-s', shell, '-', runas, '-c', sys.executable)
env_encoded = subprocess.Popen(
env_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
).communicate(py_code)[0]
import itertools
env_runas = dict(itertools.izip(*[iter(env_encoded.split(b'\0'))]*2))
env_runas.update(env)
env = env_runas
# Encode unicode kwargs to filesystem encoding to avoid a
# UnicodeEncodeError when the subprocess is invoked.
fse = sys.getfilesystemencoding()
for key, val in six.iteritems(env):
if isinstance(val, six.text_type):
env[key] = val.encode(fse)
except ValueError:
raise CommandExecutionError(
'Environment could not be retrieved for User \'{0}\''.format(
runas
)
)
if _check_loglevel(output_loglevel) is not None:
# Always log the shell commands at INFO unless quiet logging is
# requested. The command output is what will be controlled by the
# 'loglevel' parameter.
msg = (
'Executing command {0}{1}{0} {2}in directory \'{3}\'{4}'.format(
'\'' if not isinstance(cmd, list) else '',
cmd,
'as user \'{0}\' '.format(runas) if runas else '',
cwd,
' in the background, no output will be logged' if bg else ''
)
)
log.info(log_callback(msg))
if reset_system_locale is True:
if not salt.utils.is_windows():
# Default to C!
# Salt only knows how to parse English words
# Don't override if the user has passed LC_ALL
env.setdefault('LC_CTYPE', 'C')
env.setdefault('LC_NUMERIC', 'C')
env.setdefault('LC_TIME', 'C')
env.setdefault('LC_COLLATE', 'C')
env.setdefault('LC_MONETARY', 'C')
env.setdefault('LC_MESSAGES', 'C')
env.setdefault('LC_PAPER', 'C')
env.setdefault('LC_NAME', 'C')
env.setdefault('LC_ADDRESS', 'C')
env.setdefault('LC_TELEPHONE', 'C')
env.setdefault('LC_MEASUREMENT', 'C')
env.setdefault('LC_IDENTIFICATION', 'C')
else:
# On Windows set the codepage to US English.
if python_shell:
cmd = 'chcp 437 > nul & ' + cmd
if clean_env:
run_env = env
else:
run_env = os.environ.copy()
run_env.update(env)
if python_shell is None:
python_shell = False
kwargs = {'cwd': cwd,
'shell': python_shell,
'env': run_env,
'stdin': str(stdin) if stdin is not None else stdin,
'stdout': stdout,
'stderr': stderr,
'with_communicate': with_communicate,
'timeout': timeout,
'bg': bg,
}
if umask is not None:
_umask = str(umask).lstrip('0')
if _umask == '':
msg = 'Zero umask is not allowed.'
raise CommandExecutionError(msg)
try:
_umask = int(_umask, 8)
except ValueError:
msg = 'Invalid umask: \'{0}\''.format(umask)
raise CommandExecutionError(msg)
else:
_umask = None
if runas or umask:
kwargs['preexec_fn'] = functools.partial(
salt.utils.chugid_and_umask,
runas,
_umask)
if not salt.utils.is_windows():
# close_fds is not supported on Windows platforms if you redirect
# stdin/stdout/stderr
if kwargs['shell'] is True:
kwargs['executable'] = shell
kwargs['close_fds'] = True
if not os.path.isabs(cwd) or not os.path.isdir(cwd):
raise CommandExecutionError(
'Specified cwd \'{0}\' either not absolute or does not exist'
.format(cwd)
)
if python_shell is not True and not isinstance(cmd, list):
posix = True
if salt.utils.is_windows():
posix = False
cmd = salt.utils.shlex_split(cmd, posix=posix)
if not use_vt:
# This is where the magic happens
try:
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
except (OSError, IOError) as exc:
raise CommandExecutionError(
'Unable to run command \'{0}\' with the context \'{1}\', '
'reason: {2}'.format(cmd, kwargs, exc)
)
try:
proc.run()
except TimedProcTimeoutError as exc:
ret['stdout'] = str(exc)
ret['stderr'] = ''
ret['retcode'] = None
ret['pid'] = proc.process.pid
# ok return code for timeouts?
ret['retcode'] = 1
return ret
out, err = proc.stdout, proc.stderr
if rstrip:
if out is not None:
out = salt.utils.to_str(out).rstrip()
if err is not None:
err = salt.utils.to_str(err).rstrip()
ret['pid'] = proc.process.pid
ret['retcode'] = proc.process.returncode
ret['stdout'] = out
ret['stderr'] = err
else:
to = ''
if timeout:
to = ' (timeout: {0}s)'.format(timeout)
if _check_loglevel(output_loglevel) is not None:
msg = 'Running {0} in VT{1}'.format(cmd, to)
log.debug(log_callback(msg))
stdout, stderr = '', ''
now = time.time()
if timeout:
will_timeout = now + timeout
else:
will_timeout = -1
try:
proc = vt.Terminal(cmd,
shell=True,
log_stdout=True,
log_stderr=True,
cwd=cwd,
preexec_fn=kwargs.get('preexec_fn', None),
env=run_env,
log_stdin_level=output_loglevel,
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
stream_stdout=True,
stream_stderr=True)
ret['pid'] = proc.pid
while proc.has_unread_data:
try:
try:
time.sleep(0.5)
try:
cstdout, cstderr = proc.recv()
except __HOLE__:
cstdout, cstderr = '', ''
if cstdout:
stdout += cstdout
else:
cstdout = ''
if cstderr:
stderr += cstderr
else:
cstderr = ''
if timeout and (time.time() > will_timeout):
ret['stderr'] = (
'SALT: Timeout after {0}s\n{1}').format(
timeout, stderr)
ret['retcode'] = None
break
except KeyboardInterrupt:
ret['stderr'] = 'SALT: User break\n{0}'.format(stderr)
ret['retcode'] = 1
break
except vt.TerminalException as exc:
log.error(
'VT: {0}'.format(exc),
exc_info_on_loglevel=logging.DEBUG)
ret = {'retcode': 1, 'pid': '2'}
break
# only set stdout on success as we already mangled in other
# cases
ret['stdout'] = stdout
if not proc.isalive():
# Process terminated, i.e., not canceled by the user or by
# the timeout
ret['stderr'] = stderr
ret['retcode'] = proc.exitstatus
ret['pid'] = proc.pid
finally:
proc.close(terminate=True, kill=True)
try:
if ignore_retcode:
__context__['retcode'] = 0
else:
__context__['retcode'] = ret['retcode']
except NameError:
# Ignore the context error during grain generation
pass
return ret | IOError | dataset/ETHPy150Open saltstack/salt/salt/modules/cmdmod.py/_run |
108 | def run(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
encoded_cmd=False,
**kwargs):
r'''
Execute the passed command and return the output as a string
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to `/root` (`C:\` in windows)
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param bool bg: If True, run command in background and do not await or deliver it's results
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool encoded_cmd: Specify if the supplied command is encoded.
Only applies to shell 'powershell'.
.. warning::
This function does not process commands through a shell
unless the python_shell flag is set to True. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or
have the python_shell=True flag set here.
The use of python_shell=True means that the shell will accept _any_ input
including potentially malicious commands such as 'good_command;rm -rf /'.
Be absolutely certain that you have sanitized your input prior to using
python_shell=True
CLI Example:
.. code-block:: bash
salt '*' cmd.run "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run cmd='sed -e s/=/:/g'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
shell=shell,
python_shell=python_shell,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
password=kwargs.get('password', None),
bg=bg,
encoded_cmd=encoded_cmd)
log_callback = _check_cb(log_callback)
if 'pid' in ret and '__pub_jid' in kwargs:
# Stuff the child pid in the JID file
try:
proc_dir = os.path.join(__opts__['cachedir'], 'proc')
jid_file = os.path.join(proc_dir, kwargs['__pub_jid'])
if os.path.isfile(jid_file):
serial = salt.payload.Serial(__opts__)
with salt.utils.fopen(jid_file, 'rb') as fn_:
jid_dict = serial.load(fn_)
if 'child_pids' in jid_dict:
jid_dict['child_pids'].append(ret['pid'])
else:
jid_dict['child_pids'] = [ret['pid']]
# Rewrite file
with salt.utils.fopen(jid_file, 'w+b') as fn_:
fn_.write(serial.dumps(jid_dict))
except (NameError, __HOLE__):
# Avoids errors from msgpack not being loaded in salt-ssh
pass
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout'])))
return ret['stdout'] | TypeError | dataset/ETHPy150Open saltstack/salt/salt/modules/cmdmod.py/run |
109 | def script(source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
template=None,
umask=None,
output_loglevel='debug',
log_callback=None,
quiet=False,
timeout=None,
reset_system_locale=True,
saltenv='base',
use_vt=False,
bg=False,
**kwargs):
'''
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
:param str source: The location of the script to download. If the file is
located on the master in the directory named spam, and is called eggs, the
source string is salt://spam/eggs
:param str args: String of command line args to pass to the script. Only
used if no args are specified as part of the `name` argument. To pass a
string containing spaces in YAML, you will need to doubly-quote it:
"arg1 'arg two' arg3"
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param bool bg: If True, run script in background and do not await or deliver it's results
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG)regardless, unless ``quiet`` is used for this value.
:param bool quiet: The command will be executed quietly, meaning no log
entries of the actual command or its return data. This is deprecated as of
the **2014.1.0** release, and is being replaced with ``output_loglevel: quiet``.
:param int timeout: If the command has not terminated after timeout seconds,
send the subprocess sigterm, and if sigterm is ignored, follow up with
sigkill
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
CLI Example:
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh
salt '*' cmd.script salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
def _cleanup_tempfile(path):
try:
os.remove(path)
except (__HOLE__, OSError) as exc:
log.error(
'cmd.script: Unable to clean tempfile \'{0}\': {1}'.format(
path,
exc
)
)
if '__env__' in kwargs:
salt.utils.warn_until(
'Oxygen',
'Parameter \'__env__\' has been detected in the argument list. This '
'parameter is no longer used and has been replaced by \'saltenv\' '
'as of Salt Carbon. This warning will be removed in Salt Oxygen.'
)
kwargs.pop('__env__')
path = salt.utils.mkstemp(dir=cwd, suffix=os.path.splitext(source)[1])
if template:
if 'pillarenv' in kwargs or 'pillar' in kwargs:
pillarenv = kwargs.get('pillarenv', __opts__.get('pillarenv'))
kwargs['pillar'] = _gather_pillar(pillarenv, kwargs.get('pillar'))
fn_ = __salt__['cp.get_template'](source,
path,
template,
saltenv,
**kwargs)
if not fn_:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
else:
fn_ = __salt__['cp.cache_file'](source, saltenv)
if not fn_:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
shutil.copyfile(fn_, path)
if not salt.utils.is_windows():
os.chmod(path, 320)
os.chown(path, __salt__['file.user_to_uid'](runas), -1)
ret = _run(path + ' ' + str(args) if args else path,
cwd=cwd,
stdin=stdin,
output_loglevel=output_loglevel,
log_callback=log_callback,
runas=runas,
shell=shell,
python_shell=python_shell,
env=env,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
password=kwargs.get('password', None),
bg=bg)
_cleanup_tempfile(path)
return ret | IOError | dataset/ETHPy150Open saltstack/salt/salt/modules/cmdmod.py/script |
110 | def tty(device, echo=None):
'''
Echo a string to a specific tty
CLI Example:
.. code-block:: bash
salt '*' cmd.tty tty0 'This is a test'
salt '*' cmd.tty pts3 'This is a test'
'''
if device.startswith('tty'):
teletype = '/dev/{0}'.format(device)
elif device.startswith('pts'):
teletype = '/dev/{0}'.format(device.replace('pts', 'pts/'))
else:
return {'Error': 'The specified device is not a valid TTY'}
try:
with salt.utils.fopen(teletype, 'wb') as tty_device:
tty_device.write(echo)
return {
'Success': 'Message was successfully echoed to {0}'.format(teletype)
}
except __HOLE__:
return {
'Error': 'Echoing to {0} returned error'.format(teletype)
} | IOError | dataset/ETHPy150Open saltstack/salt/salt/modules/cmdmod.py/tty |
111 | def _is_valid_shell(shell):
'''
Attempts to search for valid shells on a system and
see if a given shell is in the list
'''
if salt.utils.is_windows():
return True # Don't even try this for Windows
shells = '/etc/shells'
available_shells = []
if os.path.exists(shells):
try:
with salt.utils.fopen(shells, 'r') as shell_fp:
lines = shell_fp.read().splitlines()
for line in lines:
if line.startswith('#'):
continue
else:
available_shells.append(line)
except __HOLE__:
return True
else:
# No known method of determining available shells
return None
if shell in available_shells:
return True
else:
return False | OSError | dataset/ETHPy150Open saltstack/salt/salt/modules/cmdmod.py/_is_valid_shell |
112 | def shells():
'''
Lists the valid shells on this system via the /etc/shells file
.. versionadded:: 2015.5.0
CLI Example::
salt '*' cmd.shells
'''
shells_fn = '/etc/shells'
ret = []
if os.path.exists(shells_fn):
try:
with salt.utils.fopen(shells_fn, 'r') as shell_fp:
lines = shell_fp.read().splitlines()
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
elif not line:
continue
else:
ret.append(line)
except __HOLE__:
log.error("File '{0}' was not found".format(shells_fn))
return ret | OSError | dataset/ETHPy150Open saltstack/salt/salt/modules/cmdmod.py/shells |
113 | def __iter__(self):
try:
return iter(self._trials)
except __HOLE__:
print >> sys.stderr, "You have to refresh before you iterate"
raise | AttributeError | dataset/ETHPy150Open hyperopt/hyperopt/hyperopt/base.py/Trials.__iter__ |
114 | def __len__(self):
try:
return len(self._trials)
except __HOLE__:
print >> sys.stderr, "You have to refresh before you compute len"
raise | AttributeError | dataset/ETHPy150Open hyperopt/hyperopt/hyperopt/base.py/Trials.__len__ |
115 | def evaluate(self, config, ctrl, attach_attachments=True):
memo = self.memo_from_config(config)
use_obj_for_literal_in_memo(self.expr, ctrl, Ctrl, memo)
if self.pass_expr_memo_ctrl:
rval = self.fn(expr=self.expr, memo=memo, ctrl=ctrl)
else:
# -- the "work" of evaluating `config` can be written
# either into the pyll part (self.expr)
# or the normal Python part (self.fn)
pyll_rval = pyll.rec_eval(
self.expr,
memo=memo,
print_node_on_error=self.rec_eval_print_node_on_error)
rval = self.fn(pyll_rval)
if isinstance(rval, (float, int, np.number)):
dict_rval = {'loss': float(rval), 'status': STATUS_OK}
else:
dict_rval = dict(rval)
status = dict_rval['status']
if status not in STATUS_STRINGS:
raise InvalidResultStatus(dict_rval)
if status == STATUS_OK:
# -- make sure that the loss is present and valid
try:
dict_rval['loss'] = float(dict_rval['loss'])
except (TypeError, __HOLE__):
raise InvalidLoss(dict_rval)
if attach_attachments:
attachments = dict_rval.pop('attachments', {})
for key, val in attachments.items():
ctrl.attachments[key] = val
# -- don't do this here because SON-compatibility is only a requirement
# for trials destined for a mongodb. In-memory rvals can contain
# anything.
#return base.SONify(dict_rval)
return dict_rval | KeyError | dataset/ETHPy150Open hyperopt/hyperopt/hyperopt/base.py/Domain.evaluate |
116 | def true_loss(self, result, config=None):
"""Return a true loss, in the case that the `loss` is a surrogate"""
# N.B. don't use get() here, it evaluates self.loss un-necessarily
try:
return result['true_loss']
except __HOLE__:
return self.loss(result, config=config) | KeyError | dataset/ETHPy150Open hyperopt/hyperopt/hyperopt/base.py/Domain.true_loss |
117 | def cast_arg(t, val):
"""Attempts to call t on val, raising a ArgumentTypeError
on ValueError.
If 't' is the special string 'intbool', attempts to cast first
to an int and then to a bool (i.e., 1=True, 0=False).
"""
if t == 'intbool':
return cast_arg(bool, cast_arg(int, val))
else:
try:
return t(val)
except __HOLE__:
raise ArgumentTypeError() | ValueError | dataset/ETHPy150Open beetbox/beets/beetsplug/bpd/__init__.py/cast_arg |
118 | def cmd_delete(self, conn, index):
"""Remove the song at index from the playlist."""
index = cast_arg(int, index)
try:
del(self.playlist[index])
except __HOLE__:
raise ArgumentIndexError()
self.playlist_version += 1
if self.current_index == index: # Deleted playing song.
self.cmd_stop(conn)
elif index < self.current_index: # Deleted before playing.
# Shift playing index down.
self.current_index -= 1 | IndexError | dataset/ETHPy150Open beetbox/beets/beetsplug/bpd/__init__.py/BaseServer.cmd_delete |
119 | def cmd_move(self, conn, idx_from, idx_to):
"""Move a track in the playlist."""
idx_from = cast_arg(int, idx_from)
idx_to = cast_arg(int, idx_to)
try:
track = self.playlist.pop(idx_from)
self.playlist.insert(idx_to, track)
except __HOLE__:
raise ArgumentIndexError()
# Update currently-playing song.
if idx_from == self.current_index:
self.current_index = idx_to
elif idx_from < self.current_index <= idx_to:
self.current_index -= 1
elif idx_from > self.current_index >= idx_to:
self.current_index += 1
self.playlist_version += 1 | IndexError | dataset/ETHPy150Open beetbox/beets/beetsplug/bpd/__init__.py/BaseServer.cmd_move |
120 | def cmd_swap(self, conn, i, j):
"""Swaps two tracks in the playlist."""
i = cast_arg(int, i)
j = cast_arg(int, j)
try:
track_i = self.playlist[i]
track_j = self.playlist[j]
except __HOLE__:
raise ArgumentIndexError()
self.playlist[j] = track_i
self.playlist[i] = track_j
# Update currently-playing song.
if self.current_index == i:
self.current_index = j
elif self.current_index == j:
self.current_index = i
self.playlist_version += 1 | IndexError | dataset/ETHPy150Open beetbox/beets/beetsplug/bpd/__init__.py/BaseServer.cmd_swap |
121 | def cmd_playlistinfo(self, conn, index=-1):
"""Gives metadata information about the entire playlist or a
single track, given by its index.
"""
index = cast_arg(int, index)
if index == -1:
for track in self.playlist:
yield self._item_info(track)
else:
try:
track = self.playlist[index]
except __HOLE__:
raise ArgumentIndexError()
yield self._item_info(track) | IndexError | dataset/ETHPy150Open beetbox/beets/beetsplug/bpd/__init__.py/BaseServer.cmd_playlistinfo |
122 | def __init__(self, library, host, port, password):
try:
from beetsplug.bpd import gstplayer
except __HOLE__ as e:
# This is a little hacky, but it's the best I know for now.
if e.args[0].endswith(' gst'):
raise NoGstreamerError()
else:
raise
super(Server, self).__init__(host, port, password)
self.lib = library
self.player = gstplayer.GstPlayer(self.play_finished)
self.cmd_update(None) | ImportError | dataset/ETHPy150Open beetbox/beets/beetsplug/bpd/__init__.py/Server.__init__ |
123 | def find_cost(room):
if room and room.size:
try:
return exit_cost_map[room.size]
except __HOLE__:
exit_cost_map[room.size] = {'action': int(room_stamina * room.size / default_room_size),
'stamina': int(room_action * room.size / default_room_size)}
return exit_cost_map[room.size] | KeyError | dataset/ETHPy150Open genzgd/Lampost-Mud/lampost/lpmud/env.py/find_cost |
124 | def parse(uri, user=None, port=22):
"""
parses ssh connection uri-like sentences.
ex:
- root@google.com -> (root, google.com, 22)
- noreply@facebook.com:22 -> (noreply, facebook.com, 22)
- facebook.com:3306 -> ($USER, facebook.com, 3306)
- twitter.com -> ($USER, twitter.com, 22)
default port: 22
default user: $USER (getpass.getuser())
"""
uri = uri.strip()
if not user:
user = getpass.getuser()
# get user
if '@' in uri:
user = uri.split("@")[0]
# get port
if ':' in uri:
port = uri.split(":")[-1]
try:
port = int(port)
except __HOLE__:
raise ValueError("port must be numeric.")
# get host
uri = re.sub(":.*", "", uri)
uri = re.sub(".*@", "", uri)
host = uri
return (
user,
host,
port,
) | ValueError | dataset/ETHPy150Open emre/storm/storm/parsers/ssh_uri_parser.py/parse |
125 | def _delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
try:
del self.COOKIES[key]
except __HOLE__:
pass | KeyError | dataset/ETHPy150Open opps/opps/opps/contrib/mobile/middleware.py/_delete_cookie |
126 | def apply_referrer(self, user, request):
try:
referrer = Referrer.objects.get(pk=request.session.pop(settings.SESSION_KEY))
except __HOLE__:
pass
else:
user_referrer = UserReferrer(user=user, referrer=referrer)
user_referrer.save() | KeyError | dataset/ETHPy150Open byteweaver/django-referral/referral/models.py/UserReferrerManager.apply_referrer |
127 | def validate(self, signature, timestamp, nonce):
"""Validate request signature.
:param signature: A string signature parameter sent by weixin.
:param timestamp: A int timestamp parameter sent by weixin.
:param nonce: A int nonce parameter sent by weixin.
"""
if not self.token:
raise RuntimeError('WEIXIN_TOKEN is missing')
if self.expires_in:
try:
timestamp = int(timestamp)
except (__HOLE__, TypeError):
# fake timestamp
return False
delta = time.time() - timestamp
if delta < 0:
# this is a fake timestamp
return False
if delta > self.expires_in:
# expired timestamp
return False
values = [self.token, str(timestamp), str(nonce)]
s = ''.join(sorted(values))
hsh = hashlib.sha1(s.encode('utf-8')).hexdigest()
return signature == hsh | ValueError | dataset/ETHPy150Open lepture/flask-weixin/flask_weixin.py/Weixin.validate |
128 | def view_func(self):
"""Default view function for Flask app.
This is a simple implementation for view func, you can add it to
your Flask app::
weixin = Weixin(app)
app.add_url_rule('/', view_func=weixin.view_func)
"""
if request is None:
raise RuntimeError('view_func need Flask be installed')
signature = request.args.get('signature')
timestamp = request.args.get('timestamp')
nonce = request.args.get('nonce')
if not self.validate(signature, timestamp, nonce):
return 'signature failed', 400
if request.method == 'GET':
echostr = request.args.get('echostr', '')
return echostr
try:
ret = self.parse(request.data)
except __HOLE__:
return 'invalid', 400
if 'type' not in ret:
# not a valid message
return 'invalid', 400
if ret['type'] == 'text' and ret['content'] in self._registry:
func = self._registry[ret['content']]
else:
ret_set = frozenset(ret.items())
matched_rules = (
_func for _func, _limitation in self._registry_without_key
if _limitation.issubset(ret_set))
func = next(matched_rules, None) # first matched rule
if func is None:
if '*' in self._registry:
func = self._registry['*']
else:
func = 'failed'
if callable(func):
text = func(**ret)
else:
# plain text
text = self.reply(
username=ret['sender'],
sender=ret['receiver'],
content=func,
)
return Response(text, content_type='text/xml; charset=utf-8') | ValueError | dataset/ETHPy150Open lepture/flask-weixin/flask_weixin.py/Weixin.view_func |
129 | def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except __HOLE__:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name | ValueError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/getopt.py/do_longs |
130 | def test_bad(self):
def ar(s):
try: SemVer(s)
except __HOLE__: pass
else: self.fail("ValueError not raised: '%s'" % s)
for s in [
'1.2',
'1.2.02',
'1.2.2-',
'1.2.3b#5',
'03.3.3',
'v1.2.2',
'1.3b',
'1.4b.0',
'1v',
'1v.2.2v',
'1.2.4b.5', ]:
ar(s) | ValueError | dataset/ETHPy150Open dvarrazzo/pgxnclient/pgxnclient/tests/test_semver.py/SemVerTestCase.test_bad |
131 | def test_cant_clean(self):
def ar(s):
try: SemVer.clean(s)
except __HOLE__: pass
else: self.fail("ValueError not raised: '%s'" % s)
for s in [
'1.2.0 beta 4',
'1.2.2-',
'1.2.3b#5',
'v1.2.2',
'1.4b.0',
'1v.2.2v',
'1.2.4b.5',
'1.2.3.4',
'1.2.3 4',
'1.2000000000000000.3.4',]:
ar(s) | ValueError | dataset/ETHPy150Open dvarrazzo/pgxnclient/pgxnclient/tests/test_semver.py/SemVerTestCase.test_cant_clean |
132 | def load(self):
"""
We load the data from the key itself instead of fetching from
some external data store. Opposite of _get_session_key(),
raises BadSignature if signature fails.
"""
try:
return signing.loads(self.session_key,
serializer=self.serializer,
# This doesn't handle non-default expiry dates, see #19201
max_age=settings.SESSION_COOKIE_AGE,
salt='django.contrib.sessions.backends.signed_cookies')
except (signing.BadSignature, __HOLE__):
self.create()
return {} | ValueError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/contrib/sessions/backends/signed_cookies.py/SessionStore.load |
133 | @patch('luigi.contrib.external_program.subprocess.Popen')
def test_app_interruption(self, proc):
def interrupt():
raise KeyboardInterrupt()
proc.return_value.wait = interrupt
try:
job = TestSparkSubmitTask()
job.run()
except __HOLE__:
pass
proc.return_value.kill.check_called() | KeyboardInterrupt | dataset/ETHPy150Open spotify/luigi/test/contrib/spark_test.py/SparkSubmitTaskTest.test_app_interruption |
134 | def parse_templates(cfg, templates, repo, next_custom, preview):
"""
Parse one or more templates, substitute placeholder variables with
real values and write the result to the file specified in the template.
If preview is True, then the output will be written to the stdout while
informative messages will be output to the stderr.
"""
for t in templates.split(' '):
tpath = template_path(t)
if os.path.exists(tpath):
with open(tpath, 'r') as fp:
lines = fp.readlines()
if len(lines) < 2:
term.err("The template \"" + t + "\" is not valid, aborting.")
return
if not lines[0].startswith('#'):
term.err("The template \"" + t + "\" doesn't define any valid "
"output, aborting.")
return
output = str(lines[0]).strip(' #\n')
# resolve relative paths to the project's root
if not os.path.isabs(output):
output = os.path.join(PRJ_ROOT, output)
outdir = os.path.dirname(output)
if not os.path.exists(outdir):
term.err("The template output directory \"" + outdir +
"\" doesn't exists.")
term.info("Processing template \"" + bold(t) + "\" for " + output +
"...")
lines = lines[1:]
xformed = Template("".join(lines))
vstring = build_version_string(cfg, repo, False, next_custom)
args = build_format_args(cfg, repo, next_custom)
keywords = {
'CURRENT_VERSION': vstring,
'MAJOR': args['maj'],
'MINOR': args['min'],
'PATCH': args['patch'],
'REV': args['rev'],
'REV_PREFIX': args['rev_prefix'],
'BUILD_ID': args['build_id'],
'FULL_BUILD_ID': args['build_id_full'],
'COMMIT_COUNT': args['commit_count'],
'COMMIT_COUNT_STR':
str(args['commit_count']) if args['commit_count'] > 0 else '',
'COMMIT_COUNT_PREFIX': args['commit_count_prefix'],
'META_PR': args['meta_pr'],
'META_PR_PREFIX': args['meta_pr_prefix']
}
try:
res = xformed.substitute(keywords)
except KeyError as e:
term.err("Unknown key \"" + e.message + "\" found, aborting.")
sys.exit(1)
if not preview:
try:
fp = open(output, 'w')
fp.write(res)
fp.close()
except __HOLE__:
term.err("Couldn't write file \"" + output + "\"")
sys.exit(1)
else:
term.out(res)
wrote_bytes = len(res) if preview else os.stat(output).st_size
term.info("Done, " + str(wrote_bytes) + " bytes written.")
else:
term.err("Couldn't find the \"" + t + "\" template")
sys.exit(1) | IOError | dataset/ETHPy150Open manuelbua/gitver/gitver/commands.py/parse_templates |
135 | def parse_user_next_stable(user):
"""
Parse the specified user-defined string containing the next stable version
numbers and returns the discretized matches in a dictionary.
"""
try:
data = re.match(user_version_matcher, user).groupdict()
if len(data) < 3:
raise AttributeError
except __HOLE__:
return False
return data | AttributeError | dataset/ETHPy150Open manuelbua/gitver/gitver/commands.py/parse_user_next_stable |
136 | def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except __HOLE__: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
# If is its own copy, don't memoize.
if y is not x:
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y | TypeError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/copy.py/deepcopy |
137 | def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
# We're not going to put the tuple in the memo, but it's still important we
# check for it, in case the tuple contains recursive mutable structures.
try:
return memo[id(x)]
except __HOLE__:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
return y | KeyError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/copy.py/_deepcopy_tuple |
138 | def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except __HOLE__:
# aha, this is the first one :-)
memo[id(memo)]=[x] | KeyError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/copy.py/_keep_alive |
139 | def delete_queue(self):
try:
return self.connection.delete_queue()['msg']
except __HOLE__:
return False | HTTPError | dataset/ETHPy150Open Koed00/django-q/django_q/brokers/ironmq.py/IronMQBroker.delete_queue |
140 | def delete(self, task_id):
try:
return self.connection.delete(task_id)['msg']
except __HOLE__:
return False | HTTPError | dataset/ETHPy150Open Koed00/django-q/django_q/brokers/ironmq.py/IronMQBroker.delete |
141 | def check(store_op_fun):
def op_checker(store, *args, **kwargs):
# NOTE(zhiyan): Trigger the hook of updating store
# dynamic capabilities based on current store status.
if store.conf.glance_store.store_capabilities_update_min_interval > 0:
_schedule_capabilities_update(store)
get_capabilities = [
BitMasks.READ_ACCESS,
BitMasks.READ_OFFSET if kwargs.get('offset') else BitMasks.NONE,
BitMasks.READ_CHUNK if kwargs.get('chunk_size') else BitMasks.NONE
]
op_cap_map = {
'get': get_capabilities,
'add': [BitMasks.WRITE_ACCESS],
'delete': [BitMasks.WRITE_ACCESS]}
op_exec_map = {
'get': (exceptions.StoreRandomGetNotSupported
if kwargs.get('offset') or kwargs.get('chunk_size') else
exceptions.StoreGetNotSupported),
'add': exceptions.StoreAddDisabled,
'delete': exceptions.StoreDeleteNotSupported}
op = store_op_fun.__name__.lower()
try:
req_cap = op_cap_map[op]
except __HOLE__:
LOG.warning(_LW('The capability of operation "%s" '
'could not be checked.'), op)
else:
if not store.is_capable(*req_cap):
kwargs.setdefault('offset', 0)
kwargs.setdefault('chunk_size', None)
raise op_exec_map[op](**kwargs)
return store_op_fun(store, *args, **kwargs)
return op_checker | KeyError | dataset/ETHPy150Open openstack/glance_store/glance_store/capabilities.py/check |
142 | def bump_version(version):
try:
parts = map(int, version.split('.'))
except __HOLE__:
fail('Current version is not numeric')
parts[-1] += 1
return '.'.join(map(str, parts)) | ValueError | dataset/ETHPy150Open mattupstate/flask-principal/scripts/release.py/bump_version |
143 | def check_for_psutils(self):
try:
import psutil # noqa
except __HOLE__:
self.skipTest('Could not import psutils, skipping test.') | ImportError | dataset/ETHPy150Open enthought/pikos/pikos/tests/test_focused_line_memory_monitor.py/TestFocusedLineMemoryMonitor.check_for_psutils |
144 | def opt_maildirdbmdomain(self, domain):
"""generate an SMTP/POP3 virtual domain which saves to \"path\"
"""
try:
name, path = domain.split('=')
except __HOLE__:
raise usage.UsageError("Argument to --maildirdbmdomain must be of the form 'name=path'")
self.last_domain = maildir.MaildirDirdbmDomain(self.service, os.path.abspath(path))
self.service.addDomain(name, self.last_domain) | ValueError | dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/mail/tap.py/Options.opt_maildirdbmdomain |
145 | def opt_user(self, user_pass):
"""add a user/password to the last specified domains
"""
try:
user, password = user_pass.split('=', 1)
except __HOLE__:
raise usage.UsageError("Argument to --user must be of the form 'user=password'")
if self.last_domain:
self.last_domain.addUser(user, password)
else:
raise usage.UsageError("Specify a domain before specifying users") | ValueError | dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/mail/tap.py/Options.opt_user |
146 | def _toEndpoint(description, certificate=None):
"""
Tries to guess whether a description is a bare TCP port or a endpoint. If a
bare port is specified and a certificate file is present, returns an
SSL4ServerEndpoint and otherwise returns a TCP4ServerEndpoint.
"""
from twisted.internet import reactor
try:
port = int(description)
except __HOLE__:
return endpoints.serverFromString(reactor, description)
warnings.warn(
"Specifying plain ports and/or a certificate is deprecated since "
"Twisted 11.0; use endpoint descriptions instead.",
category=DeprecationWarning, stacklevel=3)
if certificate:
from twisted.internet.ssl import DefaultOpenSSLContextFactory
ctx = DefaultOpenSSLContextFactory(certificate, certificate)
return endpoints.SSL4ServerEndpoint(reactor, port, ctx)
return endpoints.TCP4ServerEndpoint(reactor, port) | ValueError | dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/mail/tap.py/_toEndpoint |
147 | def _run(self):
# Sanity check and warning if pyqtgraph isn't found
if self.do_plot:
try:
import pyqtgraph as pg
except __HOLE__ as e:
binwalk.core.common.warning("Failed to import pyqtgraph module, visual entropy graphing will be disabled")
self.do_plot = False
for fp in iter(self.next_file, None):
if self.display_results:
self.header()
self.calculate_file_entropy(fp)
if self.display_results:
self.footer()
if self.do_plot:
if not self.save_plot:
from pyqtgraph.Qt import QtGui
QtGui.QApplication.instance().exec_()
pg.exit() | ImportError | dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/entropy.py/Entropy._run |
148 | def plot_entropy(self, fname):
try:
import numpy as np
import pyqtgraph as pg
import pyqtgraph.exporters as exporters
except __HOLE__ as e:
return
i = 0
x = []
y = []
plotted_colors = {}
for r in self.results:
x.append(r.offset)
y.append(r.entropy)
plt = pg.plot(title=fname, clear=True)
# Disable auto-ranging of the Y (entropy) axis, as it
# can cause some very un-intuitive graphs, particularly
#for files with only high-entropy data.
plt.setYRange(0, 1)
if self.show_legend and has_key(self.file_markers, fname):
plt.addLegend(size=(self.max_description_length*10, 0))
for (offset, description) in self.file_markers[fname]:
# If this description has already been plotted at a different offset, we need to
# use the same color for the marker, but set the description to None to prevent
# duplicate entries in the graph legend.
#
# Else, get the next color and use it to mark descriptions of this type.
if has_key(plotted_colors, description):
color = plotted_colors[description]
description = None
else:
color = self.COLORS[i]
plotted_colors[description] = color
i += 1
if i >= len(self.COLORS):
i = 0
plt.plot(x=[offset,offset], y=[0,1.1], name=description, pen=pg.mkPen(color, width=2.5))
# Plot data points
plt.plot(x, y, pen='y')
# TODO: legend is not displayed properly when saving plots to disk
if self.save_plot:
# Save graph to CWD
out_file = os.path.join(os.getcwd(), os.path.basename(fname))
# exporters.ImageExporter is different in different versions of pyqtgraph
try:
exporter = exporters.ImageExporter(plt.plotItem)
except TypeError:
exporter = exporters.ImageExporter.ImageExporter(plt.plotItem)
exporter.parameters()['width'] = self.FILE_WIDTH
exporter.export(binwalk.core.common.unique_file_name(out_file, self.FILE_FORMAT))
else:
plt.setLabel('left', self.YLABEL, units=self.YUNITS)
plt.setLabel('bottom', self.XLABEL, units=self.XUNITS) | ImportError | dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/entropy.py/Entropy.plot_entropy |
149 | def edit_resource(self, transaction, path):
"""
Render a POST on an already created resource.
:param transaction: the transaction
:return: the transaction
"""
resource_node = self._parent.root[path]
transaction.resource = resource_node
# If-Match
if transaction.request.if_match:
if None not in transaction.request.if_match and str(transaction.resource.etag) \
not in transaction.request.if_match:
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
method = getattr(resource_node, "render_POST", None)
try:
resource = method(request=transaction.request)
except __HOLE__:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif isinstance(resource, tuple) and len(resource) == 2:
resource, callback = resource
resource = self._handle_separate(transaction, callback)
if not isinstance(resource, Resource): # pragma: no cover
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else: # pragma: no cover
# Handle error
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if resource.path is None:
resource.path = path
resource.observe_count = resource_node.observe_count
if resource is resource_node:
transaction.response.code = defines.Codes.CHANGED.number
else:
transaction.response.code = defines.Codes.CREATED.number
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
assert(isinstance(resource, Resource))
if resource.etag is not None:
transaction.response.etag = resource.etag
transaction.response.location_path = resource.path
if resource.location_query is not None and len(resource.location_query) > 0:
transaction.response.location_query = resource.location_query
transaction.response.payload = None
self._parent.root[resource.path] = resource
return transaction | NotImplementedError | dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/layers/resourcelayer.py/ResourceLayer.edit_resource |
150 | def add_resource(self, transaction, parent_resource, lp):
"""
Render a POST on a new resource.
:param request: the request
:param response: the response
:param parent_resource: the parent of the resource
:param lp: the location_path attribute of the resource
:return: the response
"""
method = getattr(parent_resource, "render_POST", None)
try:
resource = method(request=transaction.request)
except __HOLE__:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif isinstance(resource, tuple) and len(resource) == 2:
resource, callback = resource
resource = self._handle_separate(transaction, callback)
if not isinstance(resource, Resource): # pragma: no cover
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else: # pragma: no cover
# Handle error
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
resource.path = lp
if resource.etag is not None:
transaction.response.etag = resource.etag
transaction.response.location_path = resource.path
if resource.location_query is not None and len(resource.location_query) > 0:
transaction.response.location_query = resource.location_query
transaction.response.code = defines.Codes.CREATED.number
transaction.response.payload = None
resource.changed = True
transaction.resource = resource
self._parent.root[resource.path] = resource
return transaction | NotImplementedError | dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/layers/resourcelayer.py/ResourceLayer.add_resource |
151 | def update_resource(self, transaction):
"""
Render a PUT request.
:param request: the request
:param response: the response
:param resource: the resource
:return: the response
"""
# If-Match
if transaction.request.if_match:
if None not in transaction.request.if_match and str(transaction.resource.etag) \
not in transaction.request.if_match:
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
# If-None-Match
if transaction.request.if_none_match:
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
method = getattr(transaction.resource, "render_PUT", None)
try:
resource = method(request=transaction.request)
except __HOLE__:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif isinstance(resource, tuple) and len(resource) == 2:
resource, callback = resource
resource = self._handle_separate(transaction, callback)
if not isinstance(resource, Resource): # pragma: no cover
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else: # pragma: no cover
# Handle error
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if resource.etag is not None:
transaction.response.etag = resource.etag
transaction.response.code = defines.Codes.CHANGED.number
transaction.response.payload = None
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
return transaction | NotImplementedError | dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/layers/resourcelayer.py/ResourceLayer.update_resource |
152 | def delete_resource(self, transaction, path):
"""
Render a DELETE request.
:param request: the request
:param response: the response
:param path: the path
:return: the response
"""
resource = transaction.resource
method = getattr(resource, 'render_DELETE', None)
try:
ret = method(request=transaction.request)
except __HOLE__:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(ret, bool):
pass
elif isinstance(ret, tuple) and len(ret) == 2:
resource, callback = ret
ret = self._handle_separate(transaction, callback)
if not isinstance(ret, bool): # pragma: no cover
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else: # pragma: no cover
# Handle error
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if ret:
del self._parent.root[path]
transaction.response.code = defines.Codes.DELETED.number
transaction.response.payload = None
transaction.resource.deleted = True
else: # pragma: no cover
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction | NotImplementedError | dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/layers/resourcelayer.py/ResourceLayer.delete_resource |
153 | def get_resource(self, transaction):
"""
Render a GET request.
:param transaction: the transaction
:return: the transaction
"""
method = getattr(transaction.resource, 'render_GET', None)
transaction.resource.actual_content_type = None
# Accept
if transaction.request.accept is not None:
transaction.resource.actual_content_type = transaction.request.accept
# Render_GET
try:
resource = method(request=transaction.request)
except __HOLE__:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif isinstance(resource, tuple) and len(resource) == 2:
resource, callback = resource
resource = self._handle_separate(transaction, callback)
if not isinstance(resource, Resource): # pragma: no cover
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else: # pragma: no cover
# Handle error
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction.response
if resource.etag in transaction.request.etag:
transaction.response.code = defines.Codes.VALID.number
else:
transaction.response.code = defines.Codes.CONTENT.number
try:
transaction.response.payload = resource.payload
if resource.actual_content_type is not None \
and resource.actual_content_type != defines.Content_types["text/plain"]:
transaction.response.content_type = resource.actual_content_type
except KeyError:
transaction.response.code = defines.Codes.NOT_ACCEPTABLE.number
return transaction.response
assert(isinstance(resource, Resource))
if resource.etag is not None:
transaction.response.etag = resource.etag
if resource.max_age is not None:
transaction.response.max_age = resource.max_age
transaction.resource = resource
return transaction | NotImplementedError | dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/layers/resourcelayer.py/ResourceLayer.get_resource |
154 | def get_response_and_time(self, key, default=(None, None)):
""" Retrieves response and timestamp for `key` if it's stored in cache,
otherwise returns `default`
:param key: key of resource
:param default: return this if `key` not found in cache
:returns: tuple (response, datetime)
.. note:: Response is restored after unpickling with :meth:`restore_response`
"""
try:
if key not in self.responses:
key = self.keys_map[key]
response, timestamp = self.responses[key]
except __HOLE__:
return default
return self.restore_response(response), timestamp | KeyError | dataset/ETHPy150Open reclosedev/requests-cache/requests_cache/backends/base.py/BaseCache.get_response_and_time |
155 | def delete(self, key):
""" Delete `key` from cache. Also deletes all responses from response history
"""
try:
if key in self.responses:
response, _ = self.responses[key]
del self.responses[key]
else:
response, _ = self.responses[self.keys_map[key]]
del self.keys_map[key]
for r in response.history:
del self.keys_map[self.create_key(r.request)]
except __HOLE__:
pass | KeyError | dataset/ETHPy150Open reclosedev/requests-cache/requests_cache/backends/base.py/BaseCache.delete |
156 | def remove_old_entries(self, created_before):
""" Deletes entries from cache with creation time older than ``created_before``
"""
keys_to_delete = set()
for key in self.responses:
try:
response, created_at = self.responses[key]
except __HOLE__:
continue
if created_at < created_before:
keys_to_delete.add(key)
for key in keys_to_delete:
self.delete(key) | KeyError | dataset/ETHPy150Open reclosedev/requests-cache/requests_cache/backends/base.py/BaseCache.remove_old_entries |
157 | def reduce_response(self, response, seen=None):
""" Reduce response object to make it compatible with ``pickle``
"""
if seen is None:
seen = {}
try:
return seen[id(response)]
except __HOLE__:
pass
result = _Store()
# prefetch
response.content
for field in self._response_attrs:
setattr(result, field, self._picklable_field(response, field))
seen[id(response)] = result
result.history = tuple(self.reduce_response(r, seen) for r in response.history)
return result | KeyError | dataset/ETHPy150Open reclosedev/requests-cache/requests_cache/backends/base.py/BaseCache.reduce_response |
158 | def restore_response(self, response, seen=None):
""" Restore response object after unpickling
"""
if seen is None:
seen = {}
try:
return seen[id(response)]
except __HOLE__:
pass
result = requests.Response()
for field in self._response_attrs:
setattr(result, field, getattr(response, field, None))
result.raw._cached_content_ = result.content
seen[id(response)] = result
result.history = tuple(self.restore_response(r, seen) for r in response.history)
return result | KeyError | dataset/ETHPy150Open reclosedev/requests-cache/requests_cache/backends/base.py/BaseCache.restore_response |
159 | def _get_translation_object(self, queryset, language_code):
try:
return queryset.filter(
Q(language_code__iexact=language_code)
| Q(language_code__iexact=short_language_code(language_code))
).order_by('-language_code')[0]
except IndexError:
try:
return queryset.filter(
Q(language_code__istartswith=settings.LANGUAGE_CODE)
| Q(language_code__istartswith=short_language_code(
settings.LANGUAGE_CODE))
).order_by('-language_code')[0]
except __HOLE__:
try:
return queryset.all()[0]
except IndexError:
raise queryset.model.DoesNotExist | IndexError | dataset/ETHPy150Open feincms/feincms/feincms/translations.py/TranslatedObjectMixin._get_translation_object |
160 | def get_translation(self, language_code=None):
if not language_code:
language_code = translation.get_language()
key = self.get_translation_cache_key(language_code)
trans = cache.get(key)
if trans is None:
try:
trans = self._get_translation_object(
self.translations.all(), language_code)
except __HOLE__:
trans = _NoTranslation
cache.set(key, trans)
if trans is _NoTranslation:
return None
# Assign self to prevent additional database queries
trans.parent = self
return trans | ObjectDoesNotExist | dataset/ETHPy150Open feincms/feincms/feincms/translations.py/TranslatedObjectMixin.get_translation |
161 | def __str__(self):
try:
translation = self.translation
except __HOLE__:
return self.__class__.__name__
if translation:
return '%s' % translation
return self.__class__.__name__ | ObjectDoesNotExist | dataset/ETHPy150Open feincms/feincms/feincms/translations.py/TranslatedObjectMixin.__str__ |
162 | def purge_translation_cache(self):
cache.delete(self.get_translation_cache_key())
for lang in self.available_translations:
cache.delete(self.get_translation_cache_key(lang))
try:
del self._cached_translation
except __HOLE__:
pass | AttributeError | dataset/ETHPy150Open feincms/feincms/feincms/translations.py/TranslatedObjectMixin.purge_translation_cache |
163 | def testGetDataDict(self):
"""Test _GetDataDict()."""
try:
self.bdo._GetDataDict()
self.fail('NotImplementedError not raised')
except __HOLE__:
pass | NotImplementedError | dataset/ETHPy150Open google/simian/src/tests/simian/auth/x509_test.py/BaseDataObjectTest.testGetDataDict |
164 | def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except __HOLE__:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst))) | OSError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/_samefile |
165 | def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except __HOLE__:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst) | OSError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/copyfile |
166 | def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except __HOLE__ as why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise | OSError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/copystat |
167 | def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except __HOLE__ as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors) | OSError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/copytree |
168 | def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except __HOLE__:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info()) | OSError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/rmtree |
169 | def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except __HOLE__:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src) | OSError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/move |
170 | def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except __HOLE__:
result = None
if result is not None:
return result[2]
return None | KeyError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/_get_gid |
171 | def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except __HOLE__:
result = None
if result is not None:
return result[2]
return None | KeyError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/_get_uid |
172 | def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except __HOLE__:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename | ImportError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/_make_zipfile |
173 | def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except __HOLE__:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename | KeyError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/make_archive |
174 | def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except __HOLE__:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close() | ImportError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/_unpack_zipfile |
175 | def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except __HOLE__:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs) | KeyError | dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/unpack_archive |
176 | def run_once(self, **kwargs):
tasks = self.get_tasks()
# This is done to make sure we submit one task per node until we've
# submitted all the tasks. This helps ensure we don't hammer a
# single node with monitoring tasks
while True:
working_index = tasks.keys()
if not working_index:
break
for node in working_index:
try:
task_context = tasks[node].pop()
task_id = self.task_id_template.format(**task_context)
task = Task({
'id': task_id,
'context': task_context})
except __HOLE__:
del(tasks[node])
continue
self.submit_task(task, **kwargs) | IndexError | dataset/ETHPy150Open cloudtools/nymms/nymms/scheduler/Scheduler.py/Scheduler.run_once |
177 | def parse_tag_content(self):
f = self.f
sound_flags = get_ui8(f)
read_bytes = 1
self.sound_format = (sound_flags & 0xF0) >> 4
self.sound_rate = (sound_flags & 0xC) >> 2
self.sound_size = (sound_flags & 0x2) >> 1
self.sound_type = sound_flags & 0x1
if self.sound_format == SOUND_FORMAT_AAC:
# AAC packets can be sequence headers or raw data.
# The former contain codec information needed by the decoder to be
# able to interpret the rest of the data.
self.aac_packet_type = get_ui8(f)
read_bytes += 1
# AAC always has sampling rate of 44 kHz
ensure(self.sound_rate, SOUND_RATE_44_KHZ,
"AAC sound format with incorrect sound rate: %d" %
self.sound_rate)
# AAC is always stereo
ensure(self.sound_type, SOUND_TYPE_STEREO,
"AAC sound format with incorrect sound type: %d" %
self.sound_type)
if strict_parser():
try:
sound_format_to_string[self.sound_format]
except KeyError:
raise MalformedFLV("Invalid sound format: %d",
self.sound_format)
try:
(self.aac_packet_type and
aac_packet_type_to_string[self.aac_packet_type])
except __HOLE__:
raise MalformedFLV("Invalid AAC packet type: %d",
self.aac_packet_type)
f.seek(self.size - read_bytes, os.SEEK_CUR) | KeyError | dataset/ETHPy150Open mrknow/filmkodi/plugin.video.specto/resources/lib/libraries/f4mproxy/flvlib/tags.py/AudioTag.parse_tag_content |
178 | def parse_tag_content(self):
f = self.f
video_flags = get_ui8(f)
read_bytes = 1
self.frame_type = (video_flags & 0xF0) >> 4
self.codec_id = video_flags & 0xF
if self.codec_id == CODEC_ID_H264:
# H.264 packets can be sequence headers, NAL units or sequence
# ends.
self.h264_packet_type = get_ui8(f)
read_bytes += 1
if strict_parser():
try:
frame_type_to_string[self.frame_type]
except KeyError:
raise MalformedFLV("Invalid frame type: %d", self.frame_type)
try:
codec_id_to_string[self.codec_id]
except __HOLE__:
raise MalformedFLV("Invalid codec ID: %d", self.codec_id)
try:
(self.h264_packet_type and
h264_packet_type_to_string[self.h264_packet_type])
except KeyError:
raise MalformedFLV("Invalid H.264 packet type: %d",
self.h264_packet_type)
f.seek(self.size - read_bytes, os.SEEK_CUR) | KeyError | dataset/ETHPy150Open mrknow/filmkodi/plugin.video.specto/resources/lib/libraries/f4mproxy/flvlib/tags.py/VideoTag.parse_tag_content |
179 | def tag_type_to_class(self, tag_type):
try:
return tag_to_class[tag_type]
except __HOLE__:
raise MalformedFLV("Invalid tag type: %d", tag_type) | KeyError | dataset/ETHPy150Open mrknow/filmkodi/plugin.video.specto/resources/lib/libraries/f4mproxy/flvlib/tags.py/FLV.tag_type_to_class |
180 | def getPendingTasksForHost(self, host):
try:
return self.host_cache[host]
except __HOLE__:
v = self._getPendingTasksForHost(host)
self.host_cache[host] = v
return v | KeyError | dataset/ETHPy150Open douban/dpark/dpark/job.py/SimpleJob.getPendingTasksForHost |
181 | def test_connection_source_address(self):
try:
# source_address does not exist in Py26-
conn = HTTPConnection('localhost', 12345, source_address='127.0.0.1')
except __HOLE__ as e:
self.fail('HTTPConnection raised TypeError on source_adddress: %r' % e) | TypeError | dataset/ETHPy150Open shazow/urllib3/test/test_compatibility.py/TestVersionCompatibility.test_connection_source_address |
182 | def guess_zulip_user_from_jira(jira_username, realm):
try:
# Try to find a matching user in Zulip
# We search a user's full name, short name,
# and beginning of email address
user = UserProfile.objects.filter(
Q(full_name__iexact=jira_username) |
Q(short_name__iexact=jira_username) |
Q(email__istartswith=jira_username),
is_active=True,
realm=realm).order_by("id")[0]
return user
except __HOLE__:
return None | IndexError | dataset/ETHPy150Open zulip/zulip/zerver/views/webhooks/jira.py/guess_zulip_user_from_jira |
183 | @api_key_only_webhook_view
def api_jira_webhook(request, user_profile):
try:
payload = ujson.loads(request.body)
except ValueError:
return json_error("Malformed JSON input")
try:
stream = request.GET['stream']
except (AttributeError, KeyError):
stream = 'jira'
def get_in(payload, keys, default=''):
try:
for key in keys:
payload = payload[key]
except (AttributeError, __HOLE__, TypeError):
return default
return payload
event = payload.get('webhookEvent')
author = get_in(payload, ['user', 'displayName'])
issueId = get_in(payload, ['issue', 'key'])
# Guess the URL as it is not specified in the payload
# We assume that there is a /browse/BUG-### page
# from the REST url of the issue itself
baseUrl = re.match("(.*)\/rest\/api/.*", get_in(payload, ['issue', 'self']))
if baseUrl and len(baseUrl.groups()):
issue = "[%s](%s/browse/%s)" % (issueId, baseUrl.group(1), issueId)
else:
issue = issueId
title = get_in(payload, ['issue', 'fields', 'summary'])
priority = get_in(payload, ['issue', 'fields', 'priority', 'name'])
assignee = get_in(payload, ['issue', 'fields', 'assignee', 'displayName'], 'no one')
assignee_email = get_in(payload, ['issue', 'fields', 'assignee', 'emailAddress'], '')
assignee_mention = ''
if assignee_email != '':
try:
assignee_profile = get_user_profile_by_email(assignee_email)
assignee_mention = "@**%s**" % (assignee_profile.full_name,)
except UserProfile.DoesNotExist:
assignee_mention = "**%s**" % (assignee_email,)
subject = "%s: %s" % (issueId, title)
if event == 'jira:issue_created':
content = "%s **created** %s priority %s, assigned to @**%s**:\n\n> %s" % \
(author, issue, priority, assignee, title)
elif event == 'jira:issue_deleted':
content = "%s **deleted** %s!" % \
(author, issue)
elif event == 'jira:issue_updated':
# Reassigned, commented, reopened, and resolved events are all bundled
# into this one 'updated' event type, so we try to extract the meaningful
# event that happened
if assignee_mention != '':
assignee_blurb = " (assigned to %s)" % (assignee_mention,)
else:
assignee_blurb = ''
content = "%s **updated** %s%s:\n\n" % (author, issue, assignee_blurb)
changelog = get_in(payload, ['changelog',])
comment = get_in(payload, ['comment', 'body'])
if changelog != '':
# Use the changelog to display the changes, whitelist types we accept
items = changelog.get('items')
for item in items:
field = item.get('field')
# Convert a user's target to a @-mention if possible
targetFieldString = "**%s**" % (item.get('toString'),)
if field == 'assignee' and assignee_mention != '':
targetFieldString = assignee_mention
fromFieldString = item.get('fromString')
if targetFieldString or fromFieldString:
content += "* Changed %s from **%s** to %s\n" % (field, fromFieldString, targetFieldString)
if comment != '':
comment = convert_jira_markup(comment, user_profile.realm)
content += "\n%s\n" % (comment,)
elif event in ['jira:worklog_updated']:
# We ignore these event types
return json_success()
elif 'transition' in payload:
from_status = get_in(payload, ['transition', 'from_status'])
to_status = get_in(payload, ['transition', 'to_status'])
content = "%s **transitioned** %s from %s to %s" % (author, issue, from_status, to_status)
else:
# Unknown event type
if not settings.TEST_SUITE:
if event is None:
logging.warning("Got JIRA event with None event type: %s" % (payload,))
else:
logging.warning("Got JIRA event type we don't understand: %s" % (event,))
return json_error("Unknown JIRA event type")
check_send_message(user_profile, get_client("ZulipJIRAWebhook"), "stream",
[stream], subject, content)
return json_success() | KeyError | dataset/ETHPy150Open zulip/zulip/zerver/views/webhooks/jira.py/api_jira_webhook |
184 | @staticmethod
def validateInfo(doc):
"""
Makes sure the root field is a valid absolute path and is writeable.
It also conveniently update the root field replacing the initial
component by the user home directory running the server if it matches
``~`` or ``~user``.
"""
doc['root'] = os.path.expanduser(doc['root'])
if not os.path.isabs(doc['root']):
raise ValidationException('You must provide an absolute path '
'for the root directory.', 'root')
try:
mkdir(doc['root'])
except __HOLE__:
msg = 'Could not make directory "%s".' % doc['root']
logger.exception(msg)
raise ValidationException(msg)
if not os.access(doc['root'], os.W_OK):
raise ValidationException(
'Unable to write into directory "%s".' % doc['root']) | OSError | dataset/ETHPy150Open girder/girder/girder/utility/filesystem_assetstore_adapter.py/FilesystemAssetstoreAdapter.validateInfo |
185 | def __init__(self, assetstore):
super(FilesystemAssetstoreAdapter, self).__init__(assetstore)
# If we can't create the temp directory, the assetstore still needs to
# be initialized so that it can be deleted or modified. The validation
# prevents invalid new assetstores from being created, so this only
# happens to existing assetstores that no longer can access their temp
# directories.
self.tempDir = os.path.join(self.assetstore['root'], 'temp')
try:
mkdir(self.tempDir)
except __HOLE__:
self.unavailable = True
logger.exception('Failed to create filesystem assetstore '
'directories %s' % self.tempDir)
if not os.access(self.assetstore['root'], os.W_OK):
self.unavailable = True
logger.error('Could not write to assetstore root: %s',
self.assetstore['root']) | OSError | dataset/ETHPy150Open girder/girder/girder/utility/filesystem_assetstore_adapter.py/FilesystemAssetstoreAdapter.__init__ |
186 | def capacityInfo(self):
"""
For filesystem assetstores, we just need to report the free and total
space on the filesystem where the assetstore lives.
"""
try:
usage = psutil.disk_usage(self.assetstore['root'])
return {'free': usage.free, 'total': usage.total}
except __HOLE__:
logger.exception(
'Failed to get disk usage of %s' % self.assetstore['root'])
# If psutil.disk_usage fails or we can't query the assetstore's root
# directory, just report nothing regarding disk capacity
return { # pragma: no cover
'free': None,
'total': None
} | OSError | dataset/ETHPy150Open girder/girder/girder/utility/filesystem_assetstore_adapter.py/FilesystemAssetstoreAdapter.capacityInfo |
187 | def finalizeUpload(self, upload, file):
"""
Moves the file into its permanent content-addressed location within the
assetstore. Directory hierarchy yields 256^2 buckets.
"""
hash = hash_state.restoreHex(upload['sha512state'],
'sha512').hexdigest()
dir = os.path.join(hash[0:2], hash[2:4])
absdir = os.path.join(self.assetstore['root'], dir)
path = os.path.join(dir, hash)
abspath = os.path.join(self.assetstore['root'], path)
mkdir(absdir)
if os.path.exists(abspath):
# Already have this file stored, just delete temp file.
os.remove(upload['tempFile'])
else:
# Move the temp file to permanent location in the assetstore.
# shutil.move works across filesystems
shutil.move(upload['tempFile'], abspath)
try:
os.chmod(abspath, stat.S_IRUSR | stat.S_IWUSR)
except __HOLE__:
# some filesystems may not support POSIX permissions
pass
file['sha512'] = hash
file['path'] = path
return file | OSError | dataset/ETHPy150Open girder/girder/girder/utility/filesystem_assetstore_adapter.py/FilesystemAssetstoreAdapter.finalizeUpload |
188 | def _decode_escape_characters(opt):
# This function attempts to parse special characters (e.g. \n) in a
# command-line option.
if opt:
try:
opt = opt.decode('string_escape')
except __HOLE__:
# We may get a ValueError here if the string has an odd number
# of backslashes. In that case, we just return the original
# string.
pass
return opt | ValueError | dataset/ETHPy150Open memsql/memsql-loader/memsql_loader/cli/load.py/_decode_escape_characters |
189 | def process_options(self):
""" This function validates the command line options and converts the options
into a spec file. The spec file is then passed through the job schema validator
for further validation."""
if self.options.help:
self.options.subparser.print_help()
sys.exit(0)
log.update_verbosity(debug=self.options.debug)
if self.options.spec:
try:
with open(self.options.spec, 'r') as f:
base_spec = json.loads(f.read())
except JSONDecodeError:
print >>sys.stderr, "Failed to load spec file '%s': invalid JSON" % self.options.spec
sys.exit(1)
except __HOLE__ as e:
print >>sys.stderr, "Unable to open spec file '%s': %s" % (self.options.spec, str(e))
sys.exit(1)
else:
base_spec = {}
self.pre_process_options(self.options, self.logger)
if self.options.password == _PasswordNotSpecified:
password = getpass.getpass('Enter password: ')
self.options.password = password
try:
merged_spec = schema.build_spec(base_spec, self.options)
except schema.InvalidKeyException as e:
self.logger.error(str(e))
sys.exit(1)
# only pull the AWS arguments from the command line if there is
# at least one S3 path. This is more of a UX thing, as having them
# there won't break anything.
try:
paths = merged_spec['source']['paths']
except KeyError:
paths = []
for path in paths:
if path.startswith('s3://'):
schema.DEFAULT_AWS_ACCESS_KEY = os.getenv('AWS_ACCESS_KEY_ID')
schema.DEFAULT_AWS_SECRET_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
break
try:
self.job = Job(merged_spec)
self.logger.debug("Produced spec:\n%s", json.pformat(self.job.spec))
except V.Invalid as err:
if isinstance(err, V.MultipleInvalid):
errors = err.errors
else:
errors = [err]
error_msgs = []
seen_paths = []
if self.options.spec:
er_config_validation = """\
Invalid specification:
%(formatted_spec)s
Error(s):
%(error_msg)s"""
er_msg_fmt = " - Error [%(error_path)s] (or %(error_cmd_line)s on the command line): %(message)s."
else:
er_config_validation = """\
Invalid command line options for load:
%(error_msg)s"""
er_msg_fmt = " - Invalid value for %(error_cmd_line)s: %(message)s."
for e in errors:
extra_key = (e.message == 'extra keys not allowed')
error_path = '.'.join([str(leg) for leg in e.path])
cmd_line_opt = schema.get_command_line_mapping([x for x in e.path])
if cmd_line_opt == 'paths':
cmd_line_opt = "the path argument (positional)"
else:
cmd_line_opt = '--' + cmd_line_opt
if any((error_path in seen_path) for seen_path in seen_paths):
# we do this because voluptuous triggers missing
# key errors for any required key that has a sub-error
continue
seen_paths.append(error_path)
error_msgs.append(
er_msg_fmt % {
'error_path': error_path, 'error_cmd_line': cmd_line_opt,
'message': 'key %s is not allowed' % error_path if extra_key else e.error_message})
self.logger.error(er_config_validation % {
'formatted_spec': json.pformat(merged_spec),
'error_msg': "\n".join(error_msgs)
})
sys.exit(1)
if self.options.print_spec:
print json.pformat(self.job.spec)
sys.exit(0) | IOError | dataset/ETHPy150Open memsql/memsql-loader/memsql_loader/cli/load.py/RunLoad.process_options |
190 | def queue_job(self):
all_keys = list(self.job.get_files(s3_conn=self.s3_conn))
paths = self.job.spec.source.paths
if self.options.dry_run:
print "DRY RUN SUMMARY:"
print "----------------"
if len(all_keys) == 0:
print "Paths %s matched no files" % ([str(p) for p in paths])
else:
print "List of files to load:"
for key in all_keys:
print key.name
print "Example LOAD DATA statement to execute:"
file_id = self.job.get_file_id(all_keys[0])
print load_data.build_example_query(self.job, file_id)
sys.exit(0)
elif len(all_keys) == 0:
self.logger.warning("Paths %s matched no files. Please check your path specification (be careful with relative paths)." % ([str(p) for p in paths]))
self.jobs = None
spec = self.job.spec
try:
self.logger.info('Creating job')
self.jobs = Jobs()
self.jobs.save(self.job)
self.tasks = Tasks()
etags = []
for key in all_keys:
if key.scheme in ['s3', 'hdfs']:
etags.append(key.etag)
if etags and not self.options.force:
database, table = spec.target.database, spec.target.table
host, port = spec.connection.host, spec.connection.port
competing_job_ids = [j.id for j in self.jobs.query_target(host, port, database, table)]
md5_map = self.get_current_tasks_md5_map(etags, competing_job_ids)
else:
# For files loading on the filesystem, we are not going to MD5 files
# for performance reasons. We are also basing this on the assumption
# that filesystem loads are generally a one-time operation.
md5_map = None
if self.options.force:
self.logger.info('Loading all files in this job, regardless of identical files that are currently loading or were previously loaded (because of the --force flag)')
if self.job.spec.options.file_id_column is not None:
self.logger.info('Since you\'re using file_id_column, duplicate records will be checked and avoided')
count = self.submit_files(all_keys, md5_map, self.job, self.options.force)
if count == 0:
self.logger.info('Deleting the job, it has no child tasks')
try:
self.jobs.delete(self.job)
except:
self.logger.error("Rollback failed for job: %s", self.job.id)
else:
self.logger.info("Successfully queued job with id: %s", self.job.id)
if not servers.is_server_running():
self.start_server()
if self.options.sync:
self.wait_for_job()
except (Exception, __HOLE__):
self.logger.error('Failed to submit files, attempting to roll back job creation...')
exc_info = sys.exc_info()
if self.jobs is not None:
try:
self.jobs.delete(self.job)
except:
self.logger.error("Rollback failed for job: %s", self.job.id)
# Have to use this old-style raise because raise just throws
# the last exception that occured, which could be the one in
# the above try/except block and not the original exception.
raise exc_info[0], exc_info[1], exc_info[2] | AssertionError | dataset/ETHPy150Open memsql/memsql-loader/memsql_loader/cli/load.py/RunLoad.queue_job |
191 | def wait_for_job(self):
self.logger.info("Waiting for job %s to finish..." % self.job.id)
num_unfinished_tasks = -1
unfinished_states = [ shared.TaskState.RUNNING, shared.TaskState.QUEUED ]
predicate = ('job_id = :job_id', { 'job_id': self.job.id })
while num_unfinished_tasks != 0:
try:
time.sleep(0.5)
num_unfinished_tasks = len(self.tasks.get_tasks_in_state(
unfinished_states, extra_predicate=predicate))
except __HOLE__:
self.logger.info(
'Caught Ctrl-C. This load will continue running in the '
'background. You can monitor its progress with '
'memsql-loader job %s' % (self.job.id))
sys.exit(0)
successful_tasks = self.tasks.get_tasks_in_state(
[ shared.TaskState.SUCCESS ], extra_predicate=predicate)
error_tasks = self.tasks.get_tasks_in_state(
[ shared.TaskState.ERROR ], extra_predicate=predicate)
cancelled_tasks = self.tasks.get_tasks_in_state(
[ shared.TaskState.CANCELLED ], extra_predicate=predicate)
self.logger.info("Job %s finished with %s successful tasks, %s cancelled tasks, and %s errored tasks" % (self.job.id, len(successful_tasks), len(cancelled_tasks), len(error_tasks)))
if error_tasks:
self.logger.info("Error messages include: ")
for task in error_tasks[:10]:
if task.data.get('error'):
self.logger.error(task.data['error'])
self.logger.info("To see all error messages, run: memsql-loader tasks %s" % self.job.id)
sys.exit(1)
sys.exit(0) | KeyboardInterrupt | dataset/ETHPy150Open memsql/memsql-loader/memsql_loader/cli/load.py/RunLoad.wait_for_job |
192 | def create_ip_pool(self, subnet, vsm_ip=None):
"""Create a subnet on VSM.
:param subnet: subnet dict
:param vsm_ip: string representing the IP address of the VSM
"""
if subnet['cidr']:
try:
ip = netaddr.IPNetwork(subnet['cidr'])
netmask = str(ip.netmask)
network_address = str(ip.network)
except (__HOLE__, netaddr.AddrFormatError):
msg = _("Invalid input for CIDR")
raise n_exc.InvalidInput(error_message=msg)
else:
netmask = network_address = ""
if subnet['allocation_pools']:
address_range_start = subnet['allocation_pools'][0]['start']
address_range_end = subnet['allocation_pools'][0]['end']
else:
address_range_start = None
address_range_end = None
body = {'addressRangeStart': address_range_start,
'addressRangeEnd': address_range_end,
'ipAddressSubnet': netmask,
'description': subnet['name'],
'gateway': subnet['gateway_ip'],
'dhcp': subnet['enable_dhcp'],
'dnsServersList': subnet['dns_nameservers'],
'networkAddress': network_address,
'netSegmentName': subnet['network_id'],
'id': subnet['id'],
'tenantId': subnet['tenant_id']}
return self._post(self.ip_pool_path % subnet['id'],
body=body, vsm_ip=vsm_ip) | ValueError | dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/plugins/ml2/drivers/cisco/n1kv/n1kv_client.py/Client.create_ip_pool |
193 | def _do_request(self, method, action, body=None,
headers=None, vsm_ip=None):
"""Perform the HTTP request.
The response is in either JSON format or plain text. A GET method will
invoke a JSON response while a PUT/POST/DELETE returns message from the
VSM in plain text format.
Exception is raised when VSM replies with an INTERNAL SERVER ERROR HTTP
status code (500) i.e. an error has occurred on the VSM or SERVICE
UNAVAILABLE (404) i.e. VSM is not reachable.
:param method: type of the HTTP request. POST, GET, PUT or DELETE
:param action: path to which the client makes request
:param body: dict for arguments which are sent as part of the request
:param headers: header for the HTTP request
:param vsm_ip: vsm_ip for the HTTP request. If not provided then
request will be sent to all VSMs.
:returns: JSON or plain text in HTTP response
"""
action = self.action_prefix + action
if body:
body = jsonutils.dumps(body)
LOG.debug("req: %s", body)
hosts = []
if vsm_ip:
hosts.append(vsm_ip)
else:
hosts = self.vsm_ips
if not headers:
headers = self._get_auth_header()
headers['Content-Type'] = headers['Accept'] = "application/json"
for vsm_ip in hosts:
if netaddr.valid_ipv6(vsm_ip):
# Enclose IPv6 address in [] in the URL
vsm_action = action % ("[%s]" % vsm_ip)
else:
# IPv4 address
vsm_action = action % vsm_ip
for attempt in range(self.max_vsm_retries + 1):
try:
LOG.debug("[VSM %(vsm)s attempt %(id)s]: Connecting.." %
{"vsm": vsm_ip, "id": attempt})
resp = self.pool.spawn(requests.request,
method,
url=vsm_action,
data=body,
headers=headers,
timeout=self.timeout).wait()
break
except Exception as e:
LOG.debug("[VSM %(vsm)s attempt %(id)s]: Conn timeout." %
{"vsm": vsm_ip, "id": attempt})
if attempt == self.max_vsm_retries:
LOG.error(_LE("VSM %s, Conn failed."), vsm_ip)
raise n1kv_exc.VSMConnectionFailed(reason=e)
if resp.status_code != requests.codes.OK:
LOG.error(_LE("VSM %(vsm)s, Got error: %(err)s"),
{"vsm": vsm_ip, "err": resp.text})
raise n1kv_exc.VSMError(reason=resp.text)
if 'application/json' in resp.headers['content-type']:
try:
return resp.json()
except __HOLE__:
return {}
elif 'text/plain' in resp.headers['content-type']:
LOG.info(_LI("VSM: %s"), resp.text) | ValueError | dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/plugins/ml2/drivers/cisco/n1kv/n1kv_client.py/Client._do_request |
194 | def getValue(self):
v = super(AbstractTextField, self).getValue()
if self._format is None or v is None:
return v
try:
warn('deprecated', DeprecationWarning)
return self._format.format(v) # FIXME format
except __HOLE__:
return v | ValueError | dataset/ETHPy150Open rwl/muntjac/muntjac/ui/abstract_text_field.py/AbstractTextField.getValue |
195 | def get_field_names(filename,
dialect,
col_number=None):
""" Determines names of fields
Inputs:
Outputs:
Misc:
- if the file is empty it will return None
"""
reader = csv.reader(open(filename, 'r'), dialect=dialect)
try:
field_names = reader.next()
except __HOLE__:
return None # empty file
if col_number is None: # get names for all fields
final_names = []
for col_sub in range(len(field_names)):
if dialect.has_header:
final_names.append(field_names[col_sub].strip())
else:
final_names.append('field_%d' % col_sub)
return final_names
else: # get name for single field
final_name = ''
if dialect.has_header:
final_name = field_names[col_number].strip()
else:
final_name = 'field_%d' % col_number
return final_name | StopIteration | dataset/ETHPy150Open kenfar/DataGristle/gristle/field_misc.py/get_field_names |
196 | def get_field_freq(filename,
dialect,
field_number,
max_freq_size=MAX_FREQ_SIZE_DEFAULT):
""" Collects a frequency distribution for a single field by reading
the file provided.
Issues:
- has limited checking for wrong number of fields in rec
"""
freq = collections.defaultdict(int)
rec_cnt = 0
truncated = False
invalid_row_cnt = 0
for fields in csv.reader(open(filename,'r'), dialect=dialect):
rec_cnt += 1
if rec_cnt == 1 and dialect.has_header:
continue
try:
freq[fields[field_number].strip()] += 1
except __HOLE__:
invalid_row_cnt += 1
if len(freq) >= max_freq_size:
print ' WARNING: freq dict is too large - will trunc'
truncated = True
break
return freq, truncated, invalid_row_cnt | IndexError | dataset/ETHPy150Open kenfar/DataGristle/gristle/field_misc.py/get_field_freq |
197 | def get_min(value_type, values):
""" Returns the minimum value of the input. Ignores unknown values, if
no values found besides unknown it will just return 'None'
Inputs:
- value_type - one of integer, float, string, timestap
- dictionary or list of string values
Outputs:
- the single minimum value of the appropriate type
Test Coverage:
- complete via test harness
"""
assert value_type in ['integer', 'float', 'string', 'timestamp', 'unknown', None]
known_vals = []
for val in values:
if not typer.is_unknown(val):
try:
if value_type == 'integer':
known_vals.append(int(val))
elif value_type == 'float':
known_vals.append(float(val))
else:
known_vals.append(val)
except __HOLE__:
pass # ignore invalid values
# next return the minimum value
try:
return str(min(known_vals))
except ValueError:
return None | ValueError | dataset/ETHPy150Open kenfar/DataGristle/gristle/field_misc.py/get_min |
198 | def get_max(value_type, values):
""" Returns the maximum value of the input. Ignores unknown values, if
no values found besides unknown it will just return 'None'
Inputs:
- value_type - one of integer, float, string, timestap
- dictionary or list of string values
Outputs:
- the single maximum value of the appropriate type
Test Coverage:
- complete via test harness
"""
assert value_type in ['integer', 'float', 'string', 'timestamp', 'unknown', None]
known_vals = []
for val in values:
if not typer.is_unknown(val):
try:
if value_type == 'integer':
known_vals.append(int(val))
elif value_type == 'float':
known_vals.append(float(val))
else:
known_vals.append(val)
except ValueError:
pass # ignore invalid values
try:
return str(max(known_vals))
except __HOLE__:
return None | ValueError | dataset/ETHPy150Open kenfar/DataGristle/gristle/field_misc.py/get_max |
199 | def test_raise_exception_on_non_existing_link_removal(self):
gr = hypergraph()
gr.add_node(0)
gr.add_hyperedge(1)
try:
gr.unlink(0, 1)
except __HOLE__:
pass
else:
fail() | ValueError | dataset/ETHPy150Open pmatiello/python-graph/tests/unittests-hypergraph.py/test_hypergraph.test_raise_exception_on_non_existing_link_removal |