Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
8,300 | def isfile_strict(path):
"""Same as os.path.isfile() but does not swallow EACCES / EPERM
exceptions, see:
http://mail.python.org/pipermail/python-dev/2012-June/120787.html
"""
try:
st = os.stat(path)
except __HOLE__:
err = sys.exc_info()[1]
if err.errno in (errno.EPERM, errno.EACCES):
raise
return False
else:
return stat.S_ISREG(st.st_mode)
# --- constants | OSError | dataset/ETHPy150Open packages/psutil/psutil/_common.py/isfile_strict |
8,301 | def get_provider(self, cloud_prop):
"""
Return a suitable cloud Provider object for the given cloud properties
input and specifically the 'provider' type attribute.
"""
provider_id = cloud_prop.get("provider")
if not provider_id:
raise errors.CloudError("cloud 'provider' property not set")
try:
provider_class = PROVIDERS[provider_id]
except __HOLE__:
raise errors.CloudError("unknown cloud provider %r" % (provider_id,))
key = provider_class.get_provider_key(cloud_prop)
cached = self.providers.get(key)
if not cached:
cached = provider_class(cloud_prop)
self.providers[key] = cached
return cached
return cached | KeyError | dataset/ETHPy150Open ohmu/poni/poni/cloud.py/Sky.get_provider |
8,302 | def unquote_string(string):
"""Unquote a string with JavaScript rules. The string has to start with
string delimiters (``'`` or ``"``.)
:return: a string
"""
assert string and string[0] == string[-1] and string[0] in '"\'', \
'string provided is not properly delimited'
string = line_join_re.sub('\\1', string[1:-1])
result = []
add = result.append
pos = 0
while 1:
# scan for the next escape
escape_pos = string.find('\\', pos)
if escape_pos < 0:
break
add(string[pos:escape_pos])
# check which character is escaped
next_char = string[escape_pos + 1]
if next_char in escapes:
add(escapes[next_char])
# unicode escapes. trie to consume up to four characters of
# hexadecimal characters and try to interpret them as unicode
# character point. If there is no such character point, put
# all the consumed characters into the string.
elif next_char in 'uU':
escaped = uni_escape_re.match(string, escape_pos + 2)
if escaped is not None:
escaped_value = escaped.group()
if len(escaped_value) == 4:
try:
add(unichr(int(escaped_value, 16)))
except __HOLE__:
pass
else:
pos = escape_pos + 6
continue
add(next_char + escaped_value)
pos = escaped.end()
continue
else:
add(next_char)
# bogus escape. Just remove the backslash.
else:
add(next_char)
pos = escape_pos + 2
if pos < len(string):
add(string[pos:])
return u''.join(result) | ValueError | dataset/ETHPy150Open mbr/Babel-CLDR/babel/messages/jslexer.py/unquote_string |
8,303 | @classmethod
def build_soft_chain(cls, files):
"""
Build a list of nodes "soft" linked. This means that each has an id
(an integer value) and possibly a backref which is also an integer.
Not a truly "linked" list
Returns an array of SimpleNodes
:rtype : list
"""
nodes = []
for f in files:
if not Constants.FILENAME_STANDARD.search(f):
continue
try:
my_file = open(os.path.join(Constants.ALTER_DIR, f))
head = list(islice(my_file, 4))
except __HOLE__, ex:
raise ReadError("Could not open file '%s'.\n\t=>%s" % (os.path.join(Constants.ALTER_DIR, f), ex.message))
if not MetaDataUtil.parse_direction(head) == 'up':
continue
meta_data = MetaDataUtil.parse_meta(head)
if 'ref' not in meta_data:
continue
node = SimpleNode(filename=f, id=meta_data['ref'])
if 'backref' in meta_data:
node.backref = meta_data['backref']
if 'require-env' in meta_data:
node.require_env = MetaDataUtil.parse_env(meta_data['require-env'])
if 'skip-env' in meta_data:
if 'require-env' in meta_data:
raise Exception('Cannot use skip-env with require-env')
node.skip_env = MetaDataUtil.parse_env(meta_data['skip-env'])
node.meta = meta_data
nodes.append(node)
return nodes | OSError | dataset/ETHPy150Open appnexus/schema-tool/schematool/util/chain.py/ChainUtil.build_soft_chain |
8,304 | def visit_uiexample_html(self, node):
global should_export_flexx_deps
# Fix for rtd
if not hasattr(node, 'code'):
return
# Get code
code = ori_code = node.code.strip() + '\n'
ori_code = '\n'.join([' '*8 + x for x in ori_code.splitlines()]) # for reporting
# Is this a simple example?
if 'import' not in code:
code = SIMPLE_CODE_T + '\n '.join([line for line in code.splitlines()])
# Get id and filename
this_id = hashlib.md5(code.encode('utf-8')).hexdigest()
fname = 'example%s.html' % this_id
filename_html = os.path.join(HTML_DIR, 'ui', 'examples', fname)
filename_py = os.path.join(HTML_DIR, 'ui', 'examples', 'example%s.py' % this_id)
# Compose code
code += '\n\n'
if 'class MyApp' in code:
code += 'App = MyApp\n'
elif 'class Example' in code:
code += 'App = Example\n'
if not 'app' in code:
code += 'from flexx import app\n'
code += 'app.export(App, %r, False)\n' % filename_html
# Write filename so Python can find the source
open(filename_py, 'wt', encoding='utf-8').write(code)
try:
__import__('example%s' % this_id) # import to exec
except __HOLE__ as err:
err_text = str(err)
msg = 'Example not generated. <pre>%s</pre>' % err_text
if os.environ.get('READTHEDOCS', False):
node.height = 60
msg = 'This example is not build on read-the-docs. <pre>%s</pre>' % err_text
open(filename_html, 'wt', encoding='utf-8').write(msg)
warnings.warn('Ui example dependency not met: %s' % err_text)
except Exception as err:
err_text = str(err)
msg = 'Example not generated. <pre>%s</pre>' % err_text
open(filename_html, 'wt', encoding='utf-8').write(msg.replace('\\n', '<br />'))
raise RuntimeError('Could not create ui example: %s\n%s' % (err_text, ori_code) )
#print('Could not create ui example: %s\n%s' % (err_text, ori_code) )
rel_path = '../ui/examples/' + fname
# Styles
astyle = 'font-size:small; float:right;'
dstyle = 'width: 500px; height: %ipx; align: center; resize:both; overflow: hidden; box-shadow: 5px 5px 5px #777;'
istyle = 'width: 100%; height: 100%; border: 2px solid #094;'
# Show app in iframe, wrapped in a resizable div
self.body.append("<a target='new' href='%s' style='%s'>open in new tab</a>" % (rel_path, astyle))
self.body.append("<div style='%s'>" % dstyle % node.height)
self.body.append("<iframe src='%s' style='%s'>iframe not supported</iframe>" % (rel_path, istyle))
self.body.append("</div>")
self.body.append("<br />") | ImportError | dataset/ETHPy150Open zoofIO/flexx/docs/scripts/uiexample.py/visit_uiexample_html |
8,305 | def testCos(self):
self.assertRaises(TypeError, math.cos)
self.ftest('cos(-pi/2)', math.cos(-math.pi/2), 0)
self.ftest('cos(0)', math.cos(0), 1)
self.ftest('cos(pi/2)', math.cos(math.pi/2), 0)
self.ftest('cos(pi)', math.cos(math.pi), -1)
try:
self.assertTrue(math.isnan(math.cos(INF)))
self.assertTrue(math.isnan(math.cos(NINF)))
except __HOLE__:
self.assertRaises(ValueError, math.cos, INF)
self.assertRaises(ValueError, math.cos, NINF)
self.assertTrue(math.isnan(math.cos(NAN))) | ValueError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_math.py/MathTests.testCos |
8,306 | @requires_IEEE_754
@unittest.skipIf(HAVE_DOUBLE_ROUNDING,
"fsum is not exact on machines with double rounding")
def testFsum(self):
# math.fsum relies on exact rounding for correct operation.
# There's a known problem with IA32 floating-point that causes
# inexact rounding in some situations, and will cause the
# math.fsum tests below to fail; see issue #2937. On non IEEE
# 754 platforms, and on IEEE 754 platforms that exhibit the
# problem described in issue #2937, we simply skip the whole
# test.
# Python version of math.fsum, for comparison. Uses a
# different algorithm based on frexp, ldexp and integer
# arithmetic.
from sys import float_info
mant_dig = float_info.mant_dig
etiny = float_info.min_exp - mant_dig
def msum(iterable):
"""Full precision summation. Compute sum(iterable) without any
intermediate accumulation of error. Based on the 'lsum' function
at http://code.activestate.com/recipes/393090/
"""
tmant, texp = 0, 0
for x in iterable:
mant, exp = math.frexp(x)
mant, exp = int(math.ldexp(mant, mant_dig)), exp - mant_dig
if texp > exp:
tmant <<= texp-exp
texp = exp
else:
mant <<= exp-texp
tmant += mant
# Round tmant * 2**texp to a float. The original recipe
# used float(str(tmant)) * 2.0**texp for this, but that's
# a little unsafe because str -> float conversion can't be
# relied upon to do correct rounding on all platforms.
tail = max(len(bin(abs(tmant)))-2 - mant_dig, etiny - texp)
if tail > 0:
h = 1 << (tail-1)
tmant = tmant // (2*h) + bool(tmant & h and tmant & 3*h-1)
texp += tail
return math.ldexp(tmant, texp)
test_values = [
([], 0.0),
([0.0], 0.0),
([1e100, 1.0, -1e100, 1e-100, 1e50, -1.0, -1e50], 1e-100),
([2.0**53, -0.5, -2.0**-54], 2.0**53-1.0),
([2.0**53, 1.0, 2.0**-100], 2.0**53+2.0),
([2.0**53+10.0, 1.0, 2.0**-100], 2.0**53+12.0),
([2.0**53-4.0, 0.5, 2.0**-54], 2.0**53-3.0),
([1./n for n in range(1, 1001)],
float.fromhex('0x1.df11f45f4e61ap+2')),
([(-1.)**n/n for n in range(1, 1001)],
float.fromhex('-0x1.62a2af1bd3624p-1')),
([1.7**(i+1)-1.7**i for i in range(1000)] + [-1.7**1000], -1.0),
([1e16, 1., 1e-16], 10000000000000002.0),
([1e16-2., 1.-2.**-53, -(1e16-2.), -(1.-2.**-53)], 0.0),
# exercise code for resizing partials array
([2.**n - 2.**(n+50) + 2.**(n+52) for n in range(-1074, 972, 2)] +
[-2.**1022],
float.fromhex('0x1.5555555555555p+970')),
]
for i, (vals, expected) in enumerate(test_values):
try:
actual = math.fsum(vals)
except OverflowError:
self.fail("test %d failed: got OverflowError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
except __HOLE__:
self.fail("test %d failed: got ValueError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
self.assertEqual(actual, expected)
from random import random, gauss, shuffle
for j in range(1000):
vals = [7, 1e100, -7, -1e100, -9e-20, 8e-20] * 10
s = 0
for i in range(200):
v = gauss(0, random()) ** 7 - s
s += v
vals.append(v)
shuffle(vals)
s = msum(vals)
self.assertEqual(msum(vals), math.fsum(vals)) | ValueError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_math.py/MathTests.testFsum |
8,307 | def testSin(self):
self.assertRaises(TypeError, math.sin)
self.ftest('sin(0)', math.sin(0), 0)
self.ftest('sin(pi/2)', math.sin(math.pi/2), 1)
self.ftest('sin(-pi/2)', math.sin(-math.pi/2), -1)
try:
self.assertTrue(math.isnan(math.sin(INF)))
self.assertTrue(math.isnan(math.sin(NINF)))
except __HOLE__:
self.assertRaises(ValueError, math.sin, INF)
self.assertRaises(ValueError, math.sin, NINF)
self.assertTrue(math.isnan(math.sin(NAN))) | ValueError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_math.py/MathTests.testSin |
8,308 | def test_exceptions(self):
try:
x = math.exp(-1000000000)
except:
# mathmodule.c is failing to weed out underflows from libm, or
# we've got an fp format with huge dynamic range
self.fail("underflowing exp() should not have raised "
"an exception")
if x != 0:
self.fail("underflowing exp() should have returned 0")
# If this fails, probably using a strict IEEE-754 conforming libm, and x
# is +Inf afterwards. But Python wants overflows detected by default.
try:
x = math.exp(1000000000)
except OverflowError:
pass
else:
self.fail("overflowing exp() didn't trigger OverflowError")
# If this fails, it could be a puzzle. One odd possibility is that
# mathmodule.c's macros are getting confused while comparing
# Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
# as a result (and so raising OverflowError instead).
try:
x = math.sqrt(-1.0)
except __HOLE__:
pass
else:
self.fail("sqrt(-1) didn't raise ValueError") | ValueError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_math.py/MathTests.test_exceptions |
8,309 | @unittest.skip("testfile not supported")
@requires_IEEE_754
def test_testfile(self):
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
# Skip if either the input or result is complex, or if
# flags is nonempty
if ai != 0. or ei != 0. or flags:
continue
if fn in ['rect', 'polar']:
# no real versions of rect, polar
continue
func = getattr(math, fn)
try:
result = func(ar)
except __HOLE__ as exc:
message = (("Unexpected ValueError: %s\n " +
"in test %s:%s(%r)\n") % (exc.args[0], id, fn, ar))
self.fail(message)
except OverflowError:
message = ("Unexpected OverflowError in " +
"test %s:%s(%r)\n" % (id, fn, ar))
self.fail(message)
self.ftest("%s:%s(%r)" % (id, fn, ar), result, er) | ValueError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_math.py/MathTests.test_testfile |
8,310 | @unittest.skip("mtestfile not supported")
@requires_IEEE_754
def test_mtestfile(self):
fail_fmt = "{}:{}({!r}): expected {!r}, got {!r}"
failures = []
for id, fn, arg, expected, flags in parse_mtestfile(math_testcases):
func = getattr(math, fn)
if 'invalid' in flags or 'divide-by-zero' in flags:
expected = 'ValueError'
elif 'overflow' in flags:
expected = 'OverflowError'
try:
got = func(arg)
except __HOLE__:
got = 'ValueError'
except OverflowError:
got = 'OverflowError'
accuracy_failure = None
if isinstance(got, float) and isinstance(expected, float):
if math.isnan(expected) and math.isnan(got):
continue
if not math.isnan(expected) and not math.isnan(got):
if fn == 'lgamma':
# we use a weaker accuracy test for lgamma;
# lgamma only achieves an absolute error of
# a few multiples of the machine accuracy, in
# general.
accuracy_failure = acc_check(expected, got,
rel_err = 5e-15,
abs_err = 5e-15)
elif fn == 'erfc':
# erfc has less-than-ideal accuracy for large
# arguments (x ~ 25 or so), mainly due to the
# error involved in computing exp(-x*x).
#
# XXX Would be better to weaken this test only
# for large x, instead of for all x.
accuracy_failure = ulps_check(expected, got, 2000)
else:
accuracy_failure = ulps_check(expected, got, 20)
if accuracy_failure is None:
continue
if isinstance(got, str) and isinstance(expected, str):
if got == expected:
continue
fail_msg = fail_fmt.format(id, fn, arg, expected, got)
if accuracy_failure is not None:
fail_msg += ' ({})'.format(accuracy_failure)
failures.append(fail_msg)
if failures:
self.fail('Failures in test_mtestfile:\n ' +
'\n '.join(failures)) | ValueError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_math.py/MathTests.test_mtestfile |
8,311 | def try_ipv6_socket():
"""Determine if system really supports IPv6"""
if not socket.has_ipv6:
return False
try:
socket.socket(socket.AF_INET6).close()
return True
except __HOLE__ as error:
logger.debug(
'Platform supports IPv6, but socket creation failed, '
'disabling: %s',
encoding.locale_decode(error))
return False
#: Boolean value that indicates if creating an IPv6 socket will succeed. | IOError | dataset/ETHPy150Open mopidy/mopidy/mopidy/internal/network.py/try_ipv6_socket |
8,312 | def struct(typename, field_names, verbose=False):
"""Returns a new class with named fields.
>>> Point = struct('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p.x + p.y # fields accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not min(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
valuestxt = ', '.join('self.%s' % name for name in field_names)
othervaluestxt = ', '.join('other.%s' % name for name in field_names)
template = '''class %(typename)s(object):
'%(typename)s(%(argtxt)s)' \n
__slots__ = %(field_names)r \n
_fields = %(field_names)r \n
def __init__(self, %(argtxt)s):\n'''
for name in field_names:
template += ''' self.%s = %s\n''' % (name, name)
template += ''' return \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% (%(valuestxt)s) \n
def __cmp__(self, other):
if not isinstance(other, self.__class__):
return -1
return cmp((%(valuestxt)s), (%(othervaluestxt)s)) \n
def _asdict(self):
'Return a new dict which maps field names to their values'
d = {}
for field in self._fields:
d[field] = getattr(self, field)
return d \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
from copy import copy
result = copy(_self)
for key, value in kwds.items():
setattr(result, key, value)
return result \n\n'''
template = template % locals()
if verbose:
print template
# Execute the template string in a temporary namespace
namespace = dict(__name__='struct_%s' % typename)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the struct is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, __HOLE__):
pass
return result | ValueError | dataset/ETHPy150Open probcomp/bayeslite/external/lemonade/dist/lemonade/ccruft.py/struct |
8,313 | def search_fields_to_dict(fields):
"""
In ``SearchableQuerySet`` and ``SearchableManager``, search fields
can either be a sequence, or a dict of fields mapped to weights.
This function converts sequences to a dict mapped to even weights,
so that we're consistently dealing with a dict of fields mapped to
weights, eg: ("title", "content") -> {"title": 1, "content": 1}
"""
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (__HOLE__, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields | TypeError | dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/core/managers.py/search_fields_to_dict |
8,314 | def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in apps.get_models()
if issubclass(m, self.model)]
parents = reduce(ior, [set(m._meta.get_parent_list())
for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = apps.get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured("Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors))
for model in apps.get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except __HOLE__:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs))
return sorted(all_results, key=lambda r: r.result_count, reverse=True) | AttributeError | dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/core/managers.py/SearchableManager.search |
8,315 | def get_default(key):
"""Get a default value from the current app's configuration.
Currently returns None when no app is in context:
>>> get_default('NO_APP_IS_LOADED') is None
True
>>>
"""
try:
return current_app.config.get(key, None)
except (__HOLE__, RuntimeError):
# AttributeError or RuntimeError depending on upstream versions.
# No app is in context, return None as documented.
return None | AttributeError | dataset/ETHPy150Open willowtreeapps/tango-core/tango/filters.py/get_default |
8,316 | def get(self, key, default=Empty, creator=Empty, expire=None):
"""
:para default: if default is callable then invoke it, save it and return it
"""
try:
return self.storage.get(key)
except __HOLE__ as e:
if creator is not Empty:
if callable(creator):
v = creator()
else:
v = creator
self.set(key, v, expire)
return v
else:
if default is not Empty:
if callable(default):
v = default()
return v
return default
else:
raise | KeyError | dataset/ETHPy150Open limodou/uliweb/uliweb/lib/weto/cache.py/Cache.get |
8,317 | def cache(self, k=None, expire=None):
def _f(func):
def f(*args, **kwargs):
if not k:
r = repr(args) + repr(sorted(kwargs.items()))
key = func.__module__ + '.' + func.__name__ + r
else:
key = k
try:
ret = self.get(key)
return ret
except __HOLE__:
ret = func(*args, **kwargs)
self.set(key, ret, expire=expire)
return ret
wrap_func(f, func)
return f
return _f | KeyError | dataset/ETHPy150Open limodou/uliweb/uliweb/lib/weto/cache.py/Cache.cache |
8,318 | def get_constr_generator(args):
feats = get_feat_cls(args)(args.actionfile)
try:
feats.set_landmark_file(args.landmarkfile)
except __HOLE__:
pass
marg = BatchCPMargin(feats)
constr_gen = ConstraintGenerator(feats, marg, args.actionfile)
return constr_gen | AttributeError | dataset/ETHPy150Open rll/lfd/lfd/mmqe/build.py/get_constr_generator |
8,319 | def optimize_model(args):
print 'Found model: {}'.format(args.modelfile)
actions = get_actions(args)
feat_cls = get_feat_cls(args)
mm_model = get_model_cls(args).read(args.modelfile, actions, feat_cls.get_size(len(actions)))
try:
mm_model.scale_objective(args.C, args.D)
except __HOLE__:
mm_model.scale_objective(args.C)
# Use dual simplex method
mm_model.model.setParam('method', 1)
#mm_model.model.setParam('method', 0) # Use primal simplex method to solve model
# mm_model.model.setParam('threads', 1) # Use single thread instead of maximum
# # barrier method (#2) is default for QP, but uses more memory and could lead to error
mm_model.optimize_model()
# mm_model.model.setParam('method', 2) # try solving model with barrier
assert mm_model.model.status == 2
mm_model.save_weights_to_file(args.weightfile) | TypeError | dataset/ETHPy150Open rll/lfd/lfd/mmqe/build.py/optimize_model |
8,320 | def id_map_type(val):
maps = val.split(',')
id_maps = []
for m in maps:
map_vals = m.split(':')
if len(map_vals) != 3:
msg = ('Invalid id map %s, correct syntax is '
'guest-id:host-id:count.')
raise argparse.ArgumentTypeError(msg % val)
try:
vals = [int(i) for i in map_vals]
except __HOLE__:
msg = 'Invalid id map %s, values must be integers' % val
raise argparse.ArgumentTypeError(msg)
id_maps.append(tuple(vals))
return id_maps | ValueError | dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/cmd/idmapshift.py/id_map_type |
8,321 | def get_hash(path, form='sha1', chunk_size=4096):
"""Generate a hash digest string for a file."""
try:
hash_type = getattr(hashlib, form)
except __HOLE__:
raise ValueError('Invalid hash type: {0}'.format(form))
with open(path, 'rb') as ifile:
hash_obj = hash_type()
# read the file in in chunks, not the entire file
for chunk in iter(lambda: ifile.read(chunk_size), b''):
hash_obj.update(chunk)
return hash_obj.hexdigest() | AttributeError | dataset/ETHPy150Open saltstack/salt/salt/client/ssh/ssh_py_shim.py/get_hash |
8,322 | def handle(self, *args, **options):
datapath = options['datapath'][0]
with open(datapath, 'rU') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
acr_id = row[0]
name = row[1]
order = row[2]
try:
acrelation = ACRelation.objects.get(pk=acr_id)
if name:
acrelation.name_as_entered = name
if order:
acrelation.data_display_order = order
acrelation.save()
except __HOLE__:
print "Could not find object with id " + acr_id | ObjectDoesNotExist | dataset/ETHPy150Open upconsulting/IsisCB/isiscb/isisdata/management/commands/missing_acr_data_cmd.py/Command.handle |
8,323 | @signalcommand
def handle(self, *args, **options):
if args:
appname, = args
style = color_style()
if getattr(settings, 'ADMIN_FOR', None):
settings_modules = [__import__(m, {}, {}, ['']) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
for settings_mod in settings_modules:
for app in settings_mod.INSTALLED_APPS:
try:
templatetag_mod = __import__(app + '.templatetags', {}, {}, [''])
except __HOLE__:
continue
mod_path = inspect.getabsfile(templatetag_mod)
mod_files = os.listdir(os.path.dirname(mod_path))
tag_files = [i.rstrip('.py') for i in mod_files if i.endswith('.py') and i[0] != '_']
app_labeled = False
for taglib in tag_files:
try:
lib = get_library(taglib)
except:
continue
if not app_labeled:
self.add_result('App: %s' % style.MODULE_NAME(app))
app_labeled = True
self.add_result('load: %s' % style.TAGLIB(taglib), 1)
for items, label, style_func in [(lib.tags, 'Tag:', style.TAG), (lib.filters, 'Filter:', style.FILTER)]:
for item in items:
self.add_result('%s %s' % (label, style_func(item)), 2)
doc = inspect.getdoc(items[item])
if doc:
self.add_result(format_block(doc, 12))
return self.results
# return "\n".join(results) | ImportError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/show_templatetags.py/Command.handle |
8,324 | def dump(typ, val, tb, include_local_traceback):
"""Dumps the given exceptions info, as returned by ``sys.exc_info()``
:param typ: the exception's type (class)
:param val: the exceptions' value (instance)
:param tb: the exception's traceback (a ``traceback`` object)
:param include_local_traceback: whether or not to include the local traceback
in the dumped info. This may expose the other
side to implementation details (code) and
package structure, and may theoretically impose
a security risk.
:returns: A tuple of ``((module name, exception name), arguments, attributes,
traceback text)``. This tuple can be safely passed to
:func:`brine.dump <rpyc.core.brine.dump>`
"""
if typ is StopIteration:
return consts.EXC_STOP_ITERATION # optimization
if type(typ) is str:
return typ
if include_local_traceback:
tbtext = "".join(traceback.format_exception(typ, val, tb))
else:
tbtext = "<traceback denied>"
attrs = []
args = []
ignored_attrs = frozenset(["_remote_tb", "with_traceback"])
for name in dir(val):
if name == "args":
for a in val.args:
if brine.dumpable(a):
args.append(a)
else:
args.append(repr(a))
elif name.startswith("_") or name in ignored_attrs:
continue
else:
try:
attrval = getattr(val, name)
except __HOLE__:
# skip this attr. see issue #108
continue
if not brine.dumpable(attrval):
attrval = repr(attrval)
attrs.append((name, attrval))
return (typ.__module__, typ.__name__), tuple(args), tuple(attrs), tbtext | AttributeError | dataset/ETHPy150Open tomerfiliba/rpyc/rpyc/core/vinegar.py/dump |
8,325 | def collect(self):
for host in self.config['hosts']:
matches = re.search(
'^([^:]*):([^@]*)@([^:]*):?([^/]*)/([^/]*)/?(.*)$', host)
if not matches:
continue
params = {}
params['host'] = matches.group(3)
try:
params['port'] = int(matches.group(4))
except __HOLE__:
params['port'] = 3306
params['db'] = matches.group(5)
params['user'] = matches.group(1)
params['passwd'] = matches.group(2)
nickname = matches.group(6)
if len(nickname):
nickname += '.'
self.connect(params=params)
if self.config['slave']:
self.slave_load(nickname, 'thread/sql/slave_io')
self.slave_load(nickname, 'thread/sql/slave_sql')
self.db.close() | ValueError | dataset/ETHPy150Open python-diamond/Diamond/src/collectors/mysqlstat/mysql55.py/MySQLPerfCollector.collect |
8,326 | @user_view
def emailchange(request, user, token, hash):
try:
_uid, newemail = EmailResetCode.parse(token, hash)
except __HOLE__:
return http.HttpResponse(status=400)
if _uid != user.id:
# I'm calling this a warning because invalid hashes up to this point
# could be any number of things, but this is a targeted attack from
# one user account to another
log.warning((u"[Tampering] Valid email reset code for UID (%s) "
u"attempted to change email address for user (%s)") %
(_uid, user))
return http.HttpResponse(status=400)
if UserProfile.objects.filter(email=newemail).exists():
log.warning((u"[Tampering] User (%s) tries to change his email to "
u"an existing account with the same email address (%s)") %
(user, newemail))
return http.HttpResponse(status=400)
user.email = newemail
user.save()
l = {'user': user, 'newemail': newemail}
log.info(u"User (%(user)s) confirmed new email address (%(newemail)s)" % l)
messages.success(
request, _('Your email address was changed successfully'),
_(u'From now on, please use {0} to log in.').format(newemail))
return http.HttpResponseRedirect(reverse('users.edit')) | ValueError | dataset/ETHPy150Open mozilla/addons-server/src/olympia/users/views.py/emailchange |
8,327 | @never_cache
@anonymous_csrf
def password_reset_confirm(request, uidb64=None, token=None):
"""
Pulled from django contrib so that we can add user into the form
so then we can show relevant messages about the user.
"""
assert uidb64 is not None and token is not None
user = None
try:
uid_int = urlsafe_base64_decode(uidb64)
user = UserProfile.objects.get(id=uid_int)
except (__HOLE__, UserProfile.DoesNotExist, TypeError):
pass
if (user is not None and user.fxa_migrated()
and waffle.switch_is_active('fxa-auth')):
migrated = True
validlink = False
form = None
elif user is not None and default_token_generator.check_token(user, token):
migrated = False
validlink = True
if request.method == 'POST':
form = forms.SetPasswordForm(user, request.POST)
if form.is_valid():
form.save()
log_cef('Password Changed', 5, request,
username=user.username,
signature='PASSWORDCHANGED',
msg='User changed password')
return redirect(reverse('django.contrib.auth.'
'views.password_reset_complete'))
else:
form = forms.SetPasswordForm(user)
else:
migrated = False
validlink = False
form = None
return render(request, 'users/pwreset_confirm.html',
{'form': form, 'validlink': validlink, 'migrated': migrated}) | ValueError | dataset/ETHPy150Open mozilla/addons-server/src/olympia/users/views.py/password_reset_confirm |
8,328 | @never_cache
def unsubscribe(request, hash=None, token=None, perm_setting=None):
"""
Pulled from django contrib so that we can add user into the form
so then we can show relevant messages about the user.
"""
assert hash is not None and token is not None
user = None
try:
email = UnsubscribeCode.parse(token, hash)
user = UserProfile.objects.get(email=email)
except (__HOLE__, UserProfile.DoesNotExist):
pass
perm_settings = []
if user is not None:
unsubscribed = True
if not perm_setting:
# TODO: make this work. nothing currently links to it, though.
perm_settings = [l for l in notifications.NOTIFICATIONS
if not l.mandatory]
else:
perm_setting = notifications.NOTIFICATIONS_BY_SHORT[perm_setting]
UserNotification.update_or_create(
update={'enabled': False},
user=user, notification_id=perm_setting.id)
perm_settings = [perm_setting]
else:
unsubscribed = False
email = ''
return render(request, 'users/unsubscribe.html',
{'unsubscribed': unsubscribed, 'email': email,
'perm_settings': perm_settings}) | ValueError | dataset/ETHPy150Open mozilla/addons-server/src/olympia/users/views.py/unsubscribe |
8,329 | def monitor_query_queue(self,
job_id,
job_metadata,
query_object=None,
callback_function=None):
query_object = query_object or self
started_checking = datetime.datetime.utcnow()
notification_identifier = ', '.join(filter(None, job_metadata.values()))
self.logger.info('Queued request for %s, received job id: %s',
notification_identifier, job_id)
while True:
try:
job_collection = query_object._authenticated_service.jobs()
job_collection_state = job_collection.get(
projectId=self._project_id,
jobId=job_id).execute()
except (SSLError, Exception, __HOLE__, HttpError,
httplib2.ServerNotFoundError) as caught_error:
self.logger.warn(
'Encountered error (%s) monitoring for %s, could '
'be temporary, not bailing out.', caught_error,
notification_identifier)
job_collection_state = None
if job_collection_state is not None:
time_waiting = int((datetime.datetime.utcnow() -
started_checking).total_seconds())
if job_collection_state['status']['state'] == 'RUNNING':
self.logger.info(
'Waiting for %s to complete, spent %d seconds so '
'far.', notification_identifier, time_waiting)
time.sleep(10)
elif job_collection_state['status']['state'] == 'PENDING':
self.logger.info(
'Waiting for %s to submit, spent %d seconds so '
'far.', notification_identifier, time_waiting)
time.sleep(60)
elif (
(job_collection_state['status']['state'] == 'DONE') and
callback_function is not None):
self.logger.info('Found completion status for %s.',
notification_identifier)
callback_function(job_id, query_object=self)
break
else:
raise Exception('UnknownBigQueryResponse')
return None | AttributeError | dataset/ETHPy150Open m-lab/telescope/telescope/external.py/BigQueryCall.monitor_query_queue |
8,330 | def set_charset(self, charset):
"""Set the charset of the payload to a given character set.
charset can be a Charset instance, a string naming a character set, or
None. If it is a string it will be converted to a Charset instance.
If charset is None, the charset parameter will be removed from the
Content-Type field. Anything else will generate a TypeError.
The message will be assumed to be of type text/* encoded with
charset.input_charset. It will be converted to charset.output_charset
and encoded properly, if needed, when generating the plain text
representation of the message. MIME headers (MIME-Version,
Content-Type, Content-Transfer-Encoding) will be added as needed.
"""
if charset is None:
self.del_param('charset')
self._charset = None
return
if isinstance(charset, basestring):
charset = email.charset.Charset(charset)
if not isinstance(charset, email.charset.Charset):
raise TypeError(charset)
# BAW: should we accept strings that can serve as arguments to the
# Charset constructor?
self._charset = charset
if 'MIME-Version' not in self:
self.add_header('MIME-Version', '1.0')
if 'Content-Type' not in self:
self.add_header('Content-Type', 'text/plain',
charset=charset.get_output_charset())
else:
self.set_param('charset', charset.get_output_charset())
if isinstance(self._payload, unicode):
self._payload = self._payload.encode(charset.output_charset)
if str(charset) != charset.get_output_charset():
self._payload = charset.body_encode(self._payload)
if 'Content-Transfer-Encoding' not in self:
cte = charset.get_body_encoding()
try:
cte(self)
except __HOLE__:
self._payload = charset.body_encode(self._payload)
self.add_header('Content-Transfer-Encoding', cte) | TypeError | dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/email/message.py/Message.set_charset |
8,331 | def _get_params_preserve(self, failobj, header):
# Like get_params() but preserves the quoting of values. BAW:
# should this be part of the public interface?
missing = object()
value = self.get(header, missing)
if value is missing:
return failobj
params = []
for p in _parseparam(';' + value):
try:
name, val = p.split('=', 1)
name = name.strip()
val = val.strip()
except __HOLE__:
# Must have been a bare attribute
name = p.strip()
val = ''
params.append((name, val))
params = utils.decode_params(params)
return params | ValueError | dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/email/message.py/Message._get_params_preserve |
8,332 | def _compile(self, name, tmplPath):
## @@ consider adding an ImportError raiser here
code = str(Compiler(file=tmplPath, moduleName=name,
mainClassName=name))
if _cacheDir:
__file__ = os.path.join(_cacheDir[0],
convertTmplPathToModuleName(tmplPath)) + '.py'
try:
open(__file__, 'w').write(code)
except __HOLE__:
## @@ TR: need to add some error code here
traceback.print_exc(file=sys.stderr)
__file__ = tmplPath
else:
__file__ = tmplPath
co = compile(code+'\n', __file__, 'exec')
mod = imp.new_module(name)
mod.__file__ = co.co_filename
if _cacheDir:
mod.__orig_file__ = tmplPath # @@TR: this is used in the WebKit
# filemonitoring code
mod.__co__ = co
return mod
##################################################
## FUNCTIONS | OSError | dataset/ETHPy150Open skyostil/tracy/src/generator/Cheetah/ImportHooks.py/CheetahDirOwner._compile |
8,333 | @property
def position(self):
"""
Gets the marker position (line number)
:type: int
"""
try:
return self.block.blockNumber()
except __HOLE__:
return self._position # not added yet | AttributeError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/panels/marker.py/Marker.position |
8,334 | def get_owned(self, user_id):
try:
user = CouchUser.get_by_user_id(user_id, self.domain)
except KeyError:
user = None
try:
owner_ids = user.get_owner_ids()
except __HOLE__:
owner_ids = [user_id]
closed = {
CASE_STATUS_OPEN: False,
CASE_STATUS_CLOSED: True,
CASE_STATUS_ALL: None,
}[self.status]
ids = get_case_ids_in_domain_by_owner(
self.domain, owner_id__in=owner_ids, closed=closed)
return self._case_results(ids)
# todo: Make these api functions use generators for streaming
# so that a limit call won't fetch more docs than it needs to
# This could be achieved with something like CommCareCase.paging_view that
# returns a generator but internally batches couch requests
# potentially doubling the batch-size each time in case it really is a lot of data | AttributeError | dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/cloudcare/api.py/CaseAPIHelper.get_owned |
8,335 | def read_csv(user_id, records_path, antennas_path=None, attributes_path=None, network=False, describe=True, warnings=True, errors=False):
"""
Load user records from a CSV file.
Parameters
----------
user_id : str
ID of the user (filename)
records_path : str
Path of the directory all the user files.
antennas_path : str, optional
Path of the CSV file containing (place_id, latitude, longitude) values.
This allows antennas to be mapped to their locations.
attributes_path : str, optional
Path of the directory containing attributes files (``key, value`` CSV file).
Attributes can for instance be variables such as like, age, or gender.
Attributes can be helpful to compute specific metrics.
network : bool, optional
If network is True, bandicoot loads the network of the user's correspondants from the same path. Defaults to False.
describe : boolean
If describe is True, it will print a description of the loaded user to the standard output.
errors : boolean
If errors is True, returns a tuple (user, errors), where user is the user object and errors are the records which could not
be loaded.
Examples
--------
>>> user = bandicoot.read_csv('sample_records', '.')
>>> print len(user.records)
10
>>> user = bandicoot.read_csv('sample_records', 'samples', sample_places.csv')
>>> print len(user.antennas)
5
>>> user = bandicoot.read_csv('sample_records', '.', None, 'sample_attributes.csv')
>>> print user.attributes['age']
25
Notes
-----
- The csv files can be single, or double quoted if needed.
- Empty cells are filled with ``None``. For example, if the column
``call_duration`` is empty for one record, its value will be ``None``.
Other values such as ``"N/A"``, ``"None"``, ``"null"`` will be
considered as a text.
"""
antennas = None
if antennas_path is not None:
with open(antennas_path, 'rb') as csv_file:
reader = csv.DictReader(csv_file)
antennas = dict((d['place_id'], (float(d['latitude']),
float(d['longitude'])))
for d in reader)
user_records = os.path.join(records_path, user_id + '.csv')
with open(user_records, 'rb') as csv_file:
reader = csv.DictReader(csv_file)
records = map(_parse_record, reader)
attributes = None
if attributes_path is not None:
user_attributes = os.path.join(attributes_path, user_id + '.csv')
try:
with open(user_attributes, 'rb') as csv_file:
reader = csv.DictReader(csv_file)
attributes = dict((d['key'], d['value']) for d in reader)
except __HOLE__:
attributes = None
user, bad_records = load(user_id, records, antennas, attributes, antennas_path,
attributes_path=attributes_path, describe=False, warnings=warnings)
# Loads the network
if network is True:
user.network = _read_network(user, records_path, attributes_path, read_csv, antennas_path)
user.recompute_missing_neighbors()
if describe:
user.describe()
if errors:
return user, bad_records
return user | IOError | dataset/ETHPy150Open yvesalexandre/bandicoot/bandicoot/io.py/read_csv |
8,336 | def read_orange(user_id, records_path, antennas_path=None, attributes_path=None, network=False, describe=True, warnings=True, errors=False):
"""
Load user records from a CSV file in *orange* format:
``call_record_type;basic_service;user_msisdn;call_partner_identity;datetime;call_duration;longitude;latitude``
``basic_service`` takes one of the following values:
- 11: telephony;
- 12: emergency calls;
- 21: short message (in)
- 22: short message (out)
Parameters
----------
user_id : str
ID of the user (filename)
records_path : str
Path of the directory all the user files.
antennas_path : str, optional
Path of the CSV file containing (place_id, latitude, longitude) values.
This allows antennas to be mapped to their locations.
attributes_path : str, optional
Path of the directory containing attributes files (``key, value`` CSV file).
Attributes can for instance be variables such as like, age, or gender.
Attributes can be helpful to compute specific metrics.
network : bool, optional
If network is True, bandicoot loads the network of the user's correspondants from the same path. Defaults to False.
describe : boolean
If describe is True, it will print a description of the loaded user to the standard output.
errors : boolean
If errors is True, returns a tuple (user, errors), where user is the user object and errors are the records which could not
be loaded.
"""
def _parse(reader):
records = []
antennas = dict()
for row in reader:
direction = 'out' if row['call_record_type'] == '1' else 'in'
interaction = 'call' if row['basic_service'] in ['11', '12'] else 'text'
contact = row['call_partner_identity']
date = datetime.strptime(row['datetime'], "%Y-%m-%d %H:%M:%S")
call_duration = float(row['call_duration']) if row['call_duration'] != "" else None
lon, lat = float(row['longitude']), float(row['latitude'])
latlon = (lat, lon)
antenna = None
for key, value in antennas.items():
if latlon == value:
antenna = key
break
if antenna is None:
antenna = len(antennas) + 1
antennas[antenna] = latlon
position = Position(antenna=antenna, location=latlon)
record = Record(direction=direction,
interaction=interaction,
correspondent_id=contact,
call_duration=call_duration,
datetime=date,
position=position)
records.append(record)
return records, antennas
user_records = os.path.join(records_path, user_id + ".csv")
fields = ['call_record_type', 'basic_service', 'user_msisdn', 'call_partner_identity', 'datetime', 'call_duration', 'longitude', 'latitude']
with open(user_records, 'rb') as f:
reader = csv.DictReader(f, delimiter=";", fieldnames=fields)
records, antennas = _parse(reader)
attributes = None
try:
if attributes_path is not None:
attributes_file = os.path.join(attributes_path, user_id + ".csv")
with open(attributes_file, 'rb') as f:
reader = csv.DictReader(f, delimiter=";", fieldnames=["key", "value"])
attributes = {a["key"]: a["value"] for a in reader}
except __HOLE__:
pass
user, bad_records = load(user_id, records, antennas, attributes_path=attributes_path, attributes=attributes, warnings=None, describe=False)
if network is True:
user.network = _read_network(user, records_path, attributes_path, read_orange, antennas_path)
user.recompute_missing_neighbors()
if describe:
user.describe()
if errors:
return user, bad_records
return user | IOError | dataset/ETHPy150Open yvesalexandre/bandicoot/bandicoot/io.py/read_orange |
8,337 | def print_iterate(dataset, gql, namespace=None, msg=''):
it = iterate(dataset, gql, namespace)
loaded = 0
try:
while True:
loaded += 1
if loaded % 1000 == 0:
print >> sys.stderr, 'loaded', msg, loaded
string = json.dumps(it.next())
print string
except __HOLE__:
pass
print >> sys.stderr, 'Done', msg, loaded | StopIteration | dataset/ETHPy150Open murer/dsopz/dsopz/reader.py/print_iterate |
8,338 | @staticmethod
def _convert_legacy_ipv6_netmask(netmask):
"""Handle netmask_v6 possibilities from the database.
Historically, this was stored as just an integral CIDR prefix,
but in the future it should be stored as an actual netmask.
Be tolerant of either here.
"""
try:
prefix = int(netmask)
return netaddr.IPNetwork('1::/%i' % prefix).netmask
except __HOLE__:
pass
try:
return netaddr.IPNetwork(netmask).netmask
except netaddr.AddrFormatError:
raise ValueError(_('IPv6 netmask "%s" must be a netmask '
'or integral prefix') % netmask) | ValueError | dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/objects/network.py/Network._convert_legacy_ipv6_netmask |
8,339 | def is_authenticated(self, request, **kwargs):
"""
Copypasted from tastypie, modified to avoid issues with app-loading and
custom user model.
"""
User = get_user_model()
username_field = User.USERNAME_FIELD
try:
username, api_key = self.extract_credentials(request)
except __HOLE__:
return self._unauthorized()
if not username or not api_key:
return self._unauthorized()
try:
lookup_kwargs = {username_field: username}
user = User.objects.get(**lookup_kwargs)
except (User.DoesNotExist, User.MultipleObjectsReturned):
return self._unauthorized()
if not self.check_active(user):
return False
key_auth_check = self.get_key(user, api_key)
if key_auth_check and not isinstance(key_auth_check, HttpUnauthorized):
request.user = user
return key_auth_check | ValueError | dataset/ETHPy150Open python/pythondotorg/pydotorg/resources.py/ApiKeyOrGuestAuthentication.is_authenticated |
8,340 | @slow_test
@testing.requires_testing_data
def test_volume_stc():
"""Test volume STCs
"""
tempdir = _TempDir()
N = 100
data = np.arange(N)[:, np.newaxis]
datas = [data, data, np.arange(2)[:, np.newaxis]]
vertno = np.arange(N)
vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
vertno_reads = [vertno, vertno, np.arange(2)]
for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
stc = VolSourceEstimate(data, vertno, 0, 1)
fname_temp = op.join(tempdir, 'temp-vl.stc')
stc_new = stc
for _ in range(2):
stc_new.save(fname_temp)
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(vertno_read, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert_true(isinstance(stc, VolSourceEstimate))
assert_true('sample' in repr(stc))
stc_new = stc
assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
for _ in range(2):
fname_temp = op.join(tempdir, 'temp-vol.w')
stc_new.save(fname_temp, ftype='w')
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(stc.vertices, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# save the stc as a nifti file and export
try:
import nibabel as nib
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
src = read_source_spaces(fname_vsrc)
vol_fname = op.join(tempdir, 'stc.nii.gz')
stc.save_as_volume(vol_fname, src,
dest='surf', mri_resolution=False)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == src[0]['shape'] + (len(stc.times),))
with warnings.catch_warnings(record=True): # nib<->numpy
t1_img = nib.load(fname_t1)
stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src,
dest='mri', mri_resolution=True)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
# export without saving
img = stc.as_volume(src, dest='mri', mri_resolution=True)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
except __HOLE__:
print('Save as nifti test skipped, needs NiBabel') | ImportError | dataset/ETHPy150Open mne-tools/mne-python/mne/tests/test_source_estimate.py/test_volume_stc |
8,341 | def load_plugins(self, directory):
plugins = {}
sys.path.insert(0, directory)
try:
to_load = [p.strip() for p in self.config['porkchop']['plugins'].split(',')]
except:
to_load = []
for infile in glob.glob(os.path.join(directory, '*.py')):
module_name = os.path.splitext(os.path.split(infile)[1])[0]
if os.path.basename(infile) == '__init__.py':
continue
if to_load and module_name not in to_load:
continue
try:
module = imp.load_source(module_name, infile)
for namek, klass in inspect.getmembers(module):
if inspect.isclass(klass) \
and issubclass(klass, PorkchopPlugin) \
and klass is not PorkchopPlugin:
if hasattr(klass, '__metric_name__'):
plugin_name = klass.__metric_name__
else:
plugin_name = module_name
plugins[plugin_name] = klass
plugins[plugin_name].config_file = os.path.join(
self.config_dir,
'%s.ini' % plugin_name
)
# Only one plugin per module.
break
except __HOLE__:
print 'Unable to load plugin %r' % infile
import traceback
traceback.print_exc()
return plugins | ImportError | dataset/ETHPy150Open disqus/porkchop/porkchop/plugin.py/PorkchopPluginHandler.load_plugins |
8,342 | def set_exec_by_id(self, exec_id):
if not self.log:
return False
try:
workflow_execs = [e for e in self.log
if e.id == int(str(exec_id))]
except __HOLE__:
return False
if len(workflow_execs):
self.notify_app(workflow_execs[0].item, workflow_execs[0])
return True
return False | ValueError | dataset/ETHPy150Open VisTrails/VisTrails/vistrails/gui/collection/vis_log.py/QLogView.set_exec_by_id |
8,343 | def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except __HOLE__:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg) | ValueError | dataset/ETHPy150Open openstack/rack/rack/api/wsgi.py/JSONDeserializer._from_json |
8,344 | def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (__HOLE__, TypeError):
raise exception.InvalidContentType(content_type=content_type) | KeyError | dataset/ETHPy150Open openstack/rack/rack/api/wsgi.py/ResponseObject.get_serializer |
8,345 | def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except __HOLE__:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return decoded.keys()[0] | ValueError | dataset/ETHPy150Open openstack/rack/rack/api/wsgi.py/action_peek_json |
8,346 | def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except __HOLE__:
pass
return args | KeyError | dataset/ETHPy150Open openstack/rack/rack/api/wsgi.py/Resource.get_action_args |
8,347 | def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (__HOLE__, TypeError):
raise exception.InvalidContentType(content_type=content_type)
if (hasattr(deserializer, 'want_controller')
and deserializer.want_controller):
return deserializer(self.controller).deserialize(body)
else:
return deserializer().deserialize(body) | KeyError | dataset/ETHPy150Open openstack/rack/rack/api/wsgi.py/Resource.deserialize |
8,348 | def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except __HOLE__:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None | StopIteration | dataset/ETHPy150Open openstack/rack/rack/api/wsgi.py/Resource.post_process_extensions |
8,349 | def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, __HOLE__):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
if body:
msg = _("Action: '%(action)s', body: "
"%(body)s") % {'action': action,
'body': unicode(body, 'utf-8')}
LOG.debug(logging.mask_password(msg))
LOG.debug(_("Calling method '%(meth)s' (Content-type='%(ctype)s', "
"Accept='%(accept)s')"),
{'meth': str(meth),
'ctype': content_type,
'accept': accept})
# Now, deserialize the request body...
try:
contents = {}
if self._should_have_body(request):
# allow empty body with PUT and POST
if request.content_length == 0:
contents = {'body': None}
else:
contents = self.deserialize(meth, content_type, body)
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('rack.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request URL: URL's project_id '%(project_id)s'"
" doesn't match Context's project_id"
" '%(context_project_id)s'") % \
{'project_id': project_id,
'context_project_id': context.project_id}
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
if hasattr(response, 'headers'):
if context:
response.headers.add('x-compute-request-id',
context.request_id)
for hdr, val in response.headers.items():
# Headers must be utf-8 strings
response.headers[hdr] = utils.utf8(str(val))
return response | TypeError | dataset/ETHPy150Open openstack/rack/rack/api/wsgi.py/Resource._process_stack |
8,350 | def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except __HOLE__:
if (not self.wsgi_actions or
action not in _ROUTES_METHODS + ['action']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, [])) | AttributeError | dataset/ETHPy150Open openstack/rack/rack/api/wsgi.py/Resource._get_method |
8,351 | @staticmethod
def is_valid_body(body, entity_name):
if not (body and entity_name in body):
return False
def is_dict(d):
try:
d.get(None)
return True
except __HOLE__:
return False
if not is_dict(body[entity_name]):
return False
return True | AttributeError | dataset/ETHPy150Open openstack/rack/rack/api/wsgi.py/Controller.is_valid_body |
8,352 | def decode_json_body():
"""
Decode ``bottle.request.body`` to JSON.
Returns:
obj: Structure decoded by ``json.loads()``.
Raises:
HTTPError: 400 in case the data was malformed.
"""
raw_data = request.body.read()
try:
return json.loads(raw_data)
except __HOLE__ as e:
raise HTTPError(400, e.message) | ValueError | dataset/ETHPy150Open Bystroushaak/bottle-rest/src/bottle_rest/bottle_rest.py/decode_json_body |
8,353 | def handle_type_error(fn):
"""
Convert ``TypeError`` to ``bottle.HTTPError`` with ``400`` code and message
about wrong parameters.
Raises:
HTTPError: 400 in case too many/too little function parameters were \
given.
"""
@wraps(fn)
def handle_type_error_wrapper(*args, **kwargs):
def any_match(string_list, obj):
return filter(lambda x: x in obj, string_list)
try:
return fn(*args, **kwargs)
except __HOLE__ as e:
str_list = [
"takes exactly",
"got an unexpected",
"takes no argument",
]
if fn.__name__ in e.message and any_match(str_list, e.message):
raise HTTPError(400, e.message)
raise # This will cause 500: Internal server error
return handle_type_error_wrapper | TypeError | dataset/ETHPy150Open Bystroushaak/bottle-rest/src/bottle_rest/bottle_rest.py/handle_type_error |
8,354 | @staticmethod
def new_by_type(build_name, *args, **kwargs):
"""Find BuildRequest with the given name."""
# Compatibility
if build_name in (PROD_WITHOUT_KOJI_BUILD_TYPE,
PROD_WITH_SECRET_BUILD_TYPE):
build_name = PROD_BUILD_TYPE
try:
build_class = build_classes[build_name]
logger.debug("Instantiating: %s(%s, %s)", build_class.__name__, args, kwargs)
return build_class(*args, **kwargs)
except __HOLE__:
raise RuntimeError("Unknown build type '{0}'".format(build_name)) | KeyError | dataset/ETHPy150Open projectatomic/osbs-client/osbs/build/build_request.py/BuildRequest.new_by_type |
8,355 | @property
def template(self):
if self._template is None:
path = os.path.join(self.build_json_store, "%s.json" % self.key)
logger.debug("loading template from path %s", path)
try:
with open(path, "r") as fp:
self._template = json.load(fp)
except (IOError, __HOLE__) as ex:
raise OsbsException("Can't open template '%s': %s" %
(path, repr(ex)))
return self._template | OSError | dataset/ETHPy150Open projectatomic/osbs-client/osbs/build/build_request.py/BuildRequest.template |
8,356 | def render_store_metadata_in_osv3(self, use_auth=None):
try:
self.dj.dock_json_set_arg('exit_plugins', "store_metadata_in_osv3",
"url",
self.spec.builder_openshift_url.value)
if use_auth is not None:
self.dj.dock_json_set_arg('exit_plugins',
"store_metadata_in_osv3",
"use_auth", use_auth)
except __HOLE__:
# For compatibility with older osbs.conf files
self.dj.dock_json_set_arg('postbuild_plugins',
"store_metadata_in_osv3",
"url",
self.spec.builder_openshift_url.value)
if use_auth is not None:
# For compatibility with older osbs.conf files
self.dj.dock_json_set_arg('postbuild_plugins',
"store_metadata_in_osv3",
"use_auth", use_auth) | RuntimeError | dataset/ETHPy150Open projectatomic/osbs-client/osbs/build/build_request.py/CommonBuild.render_store_metadata_in_osv3 |
8,357 | def set_params(self, **kwargs):
"""
set parameters according to specification
these parameters are accepted:
:param pulp_secret: str, resource name of pulp secret
:param pdc_secret: str, resource name of pdc secret
:param koji_target: str, koji tag with packages used to build the image
:param kojiroot: str, URL from which koji packages are fetched
:param kojihub: str, URL of the koji hub
:param koji_certs_secret: str, resource name of secret that holds the koji certificates
:param pulp_registry: str, name of pulp registry in dockpulp.conf
:param nfs_server_path: str, NFS server and path
:param nfs_dest_dir: str, directory to create on NFS server
:param sources_command: str, command used to fetch dist-git sources
:param architecture: str, architecture we are building for
:param vendor: str, vendor name
:param build_host: str, host the build will run on or None for auto
:param authoritative_registry: str, the docker registry authoritative for this image
:param distribution_scope: str, distribution scope for this image
(private, authoritative-source-only, restricted, public)
:param use_auth: bool, use auth from atomic-reactor?
:param git_push_url: str, URL for git push
"""
# Here we cater to the koji "scratch" build type, this will disable
# all plugins that might cause importing of data to koji
try:
self.scratch = kwargs.pop("scratch")
except __HOLE__:
pass
logger.debug("setting params '%s' for %s", kwargs, self.spec)
self.spec.set_params(**kwargs) | KeyError | dataset/ETHPy150Open projectatomic/osbs-client/osbs/build/build_request.py/ProductionBuild.set_params |
8,358 | def adjust_for_registry_api_versions(self):
"""
Enable/disable plugins depending on supported registry API versions
"""
versions = self.spec.registry_api_versions.value
try:
push_conf = self.dj.dock_json_get_plugin_conf('postbuild_plugins',
'tag_and_push')
tag_and_push_registries = push_conf['args']['registries']
except (KeyError, __HOLE__):
tag_and_push_registries = {}
if 'v1' not in versions:
# Remove v1-only plugins
for phase, name in [('postbuild_plugins', 'pulp_push')]:
logger.info("removing v1-only plugin: %s", name)
self.dj.remove_plugin(phase, name)
# remove extra tag_and_push config
self.remove_tag_and_push_registries(tag_and_push_registries, 'v1')
if 'v2' not in versions:
# Remove v2-only plugins
logger.info("removing v2-only plugin: pulp_sync")
self.dj.remove_plugin('postbuild_plugins', 'pulp_sync')
# remove extra tag_and_push config
self.remove_tag_and_push_registries(tag_and_push_registries, 'v2')
# Remove 'version' from tag_and_push plugin config as it's no
# longer needed
for regdict in tag_and_push_registries.values():
if 'version' in regdict:
del regdict['version'] | IndexError | dataset/ETHPy150Open projectatomic/osbs-client/osbs/build/build_request.py/ProductionBuild.adjust_for_registry_api_versions |
8,359 | def render(self, validate=True):
if validate:
self.spec.validate()
super(SimpleBuild, self).render()
try:
self.dj.dock_json_set_arg('exit_plugins', "store_metadata_in_osv3", "url",
self.spec.builder_openshift_url.value)
except __HOLE__:
# For compatibility with older osbs.conf files
self.dj.dock_json_set_arg('postbuild_plugins', "store_metadata_in_osv3", "url",
self.spec.builder_openshift_url.value)
# Remove 'version' from tag_and_push plugin config as it's no
# longer needed
if self.dj.dock_json_has_plugin_conf('postbuild_plugins',
'tag_and_push'):
push_conf = self.dj.dock_json_get_plugin_conf('postbuild_plugins',
'tag_and_push')
try:
registries = push_conf['args']['registries']
except KeyError:
pass
else:
for regdict in registries.values():
if 'version' in regdict:
del regdict['version']
self.dj.write_dock_json()
self.build_json = self.template
logger.debug(self.build_json)
return self.build_json | RuntimeError | dataset/ETHPy150Open projectatomic/osbs-client/osbs/build/build_request.py/SimpleBuild.render |
8,360 | def __getattr__(self, attr):
try:
return self[self.attrs.index(attr)]
except __HOLE__:
raise AttributeError | ValueError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pwd.py/struct_passwd.__getattr__ |
8,361 | def iter_errback(iterable, errback, *a, **kw):
"""Wraps an iterable calling an errback if an error is caught while
iterating it.
"""
it = iter(iterable)
while 1:
try:
yield next(it)
except __HOLE__:
break
except:
errback(failure.Failure(), *a, **kw) | StopIteration | dataset/ETHPy150Open wcong/ants/ants/utils/defer.py/iter_errback |
8,362 | def dict_to_table(ava, lev=0, width=1):
txt = ['<table border=%s bordercolor="black">\n' % width]
for prop, valarr in ava.items():
txt.append("<tr>\n")
if isinstance(valarr, basestring):
txt.append("<th>%s</th>\n" % str(prop))
try:
txt.append("<td>%s</td>\n" % valarr.encode("utf8"))
except __HOLE__:
txt.append("<td>%s</td>\n" % valarr)
elif isinstance(valarr, list):
i = 0
n = len(valarr)
for val in valarr:
if not i:
txt.append("<th rowspan=%d>%s</td>\n" % (len(valarr), prop))
else:
txt.append("<tr>\n")
if isinstance(val, dict):
txt.append("<td>\n")
txt.extend(dict_to_table(val, lev + 1, width - 1))
txt.append("</td>\n")
else:
try:
txt.append("<td>%s</td>\n" % val.encode("utf8"))
except AttributeError:
txt.append("<td>%s</td>\n" % val)
if n > 1:
txt.append("</tr>\n")
n -= 1
i += 1
elif isinstance(valarr, dict):
txt.append("<th>%s</th>\n" % prop)
txt.append("<td>\n")
txt.extend(dict_to_table(valarr, lev + 1, width - 1))
txt.append("</td>\n")
txt.append("</tr>\n")
txt.append('</table>\n')
return txt | AttributeError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/example/sp-repoze/sp.py/dict_to_table |
8,363 | def slo(environ, start_response, user):
# so here I might get either a LogoutResponse or a LogoutRequest
client = environ['repoze.who.plugins']["saml2auth"]
sc = client.saml_client
if "QUERY_STRING" in environ:
query = parse_qs(environ["QUERY_STRING"])
logger.info("query: %s" % query)
try:
response = sc.parse_logout_request_response(
query["SAMLResponse"][0], binding=BINDING_HTTP_REDIRECT)
if response:
logger.info("LOGOUT response parsed OK")
except __HOLE__:
# return error reply
response = None
if response is None:
request = sc.lo
headers = []
delco = delete_cookie(environ, "pysaml2")
if delco:
headers.append(delco)
resp = Redirect("/done", headers=headers)
return resp(environ, start_response)
#noinspection PyUnusedLocal | KeyError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/example/sp-repoze/sp.py/slo |
8,364 | def application(environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above and store the regular expression
captures in the WSGI environment as `myapp.url_args` so that
the functions from above can access the url placeholders.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get('PATH_INFO', '').lstrip('/')
logger.info("<application> PATH: %s" % path)
if path == "metadata":
return metadata(environ, start_response)
user = environ.get("REMOTE_USER", "")
if not user:
user = environ.get("repoze.who.identity", "")
logger.info("repoze.who.identity: '%s'" % user)
else:
logger.info("REMOTE_USER: '%s'" % user)
#logger.info(logging.Logger.manager.loggerDict)
for regex, callback in urls:
if user:
match = re.search(regex, path)
if match is not None:
try:
environ['myapp.url_args'] = match.groups()[0]
except __HOLE__:
environ['myapp.url_args'] = path
return callback(environ, start_response, user)
else:
return not_authn(environ, start_response)
return not_found(environ, start_response)
# ---------------------------------------------------------------------------- | IndexError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/example/sp-repoze/sp.py/application |
8,365 | def main(self, options, args):
"""
Main routine for running the reference viewer.
`options` is a OptionParser object that has been populated with
values from parsing the command line. It should at least include
the options from add_default_options()
`args` is a list of arguments to the viewer after parsing out
options. It should contain a list of files or URLs to load.
"""
# Create a logger
logger = log.get_logger(name='ginga', options=options)
# Get settings (preferences)
basedir = paths.ginga_home
if not os.path.exists(basedir):
try:
os.mkdir(basedir)
except OSError as e:
logger.warning("Couldn't create ginga settings area (%s): %s" % (
basedir, str(e)))
logger.warning("Preferences will not be able to be saved")
# Set up preferences
prefs = Settings.Preferences(basefolder=basedir, logger=logger)
settings = prefs.createCategory('general')
settings.load(onError='silent')
settings.setDefaults(useMatplotlibColormaps=False,
widgetSet='choose',
WCSpkg='choose', FITSpkg='choose',
recursion_limit=2000)
# default of 1000 is a little too small
sys.setrecursionlimit(settings.get('recursion_limit'))
# So we can find our plugins
sys.path.insert(0, basedir)
moduleHome = os.path.split(sys.modules['ginga.version'].__file__)[0]
childDir = os.path.join(moduleHome, 'misc', 'plugins')
sys.path.insert(0, childDir)
pluginDir = os.path.join(basedir, 'plugins')
sys.path.insert(0, pluginDir)
gc = os.path.join(basedir, "ginga_config.py")
have_ginga_config = os.path.exists(gc)
# User configuration, earliest possible intervention
if have_ginga_config:
try:
import ginga_config
if hasattr(ginga_config, 'init_config'):
ginga_config.init_config(self)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error processing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Choose a toolkit
if options.toolkit:
toolkit = options.toolkit
else:
toolkit = settings.get('widgetSet', 'choose')
if toolkit == 'choose':
try:
from ginga.qtw import QtHelp
except ImportError:
try:
from ginga.gtkw import GtkHelp
except ImportError:
print("You need python-gtk or python-qt to run Ginga!")
sys.exit(1)
else:
ginga_toolkit.use(toolkit)
tkname = ginga_toolkit.get_family()
logger.info("Chosen toolkit (%s) family is '%s'" % (
ginga_toolkit.toolkit, tkname))
# these imports have to be here, otherwise they force the choice
# of toolkit too early
from ginga.gw.GingaGw import GingaView
from ginga.Control import GingaControl, GuiLogHandler
# Define class dynamically based on toolkit choice
class GingaShell(GingaControl, GingaView):
def __init__(self, logger, thread_pool, module_manager, prefs,
ev_quit=None):
GingaView.__init__(self, logger, ev_quit, thread_pool)
GingaControl.__init__(self, logger, thread_pool, module_manager,
prefs, ev_quit=ev_quit)
if settings.get('useMatplotlibColormaps', False):
# Add matplotlib color maps if matplotlib is installed
try:
from ginga import cmap
cmap.add_matplotlib_cmaps()
except Exception as e:
logger.warning("failed to load matplotlib colormaps: %s" % (str(e)))
# User wants to customize the WCS package?
if options.wcspkg:
wcspkg = options.wcspkg
else:
wcspkg = settings.get('WCSpkg', 'choose')
try:
from ginga.util import wcsmod
assert wcsmod.use(wcspkg) == True
except Exception as e:
logger.warning("failed to set WCS package preference: %s" % (str(e)))
# User wants to customize the FITS package?
if options.fitspkg:
fitspkg = options.fitspkg
else:
fitspkg = settings.get('FITSpkg', 'choose')
try:
from ginga.util import io_fits
assert io_fits.use(fitspkg) == True
except Exception as e:
logger.warning("failed to set FITS package preference: %s" % (str(e)))
# Check whether user wants to use OpenCv
use_opencv = settings.get('use_opencv', False)
if use_opencv or options.opencv:
from ginga import trcalc
try:
trcalc.use('opencv')
except Exception as e:
logger.warning("failed to set OpenCv preference: %s" % (str(e)))
# Create the dynamic module manager
mm = ModuleManager.ModuleManager(logger)
# Create and start thread pool
ev_quit = threading.Event()
thread_pool = Task.ThreadPool(options.numthreads, logger,
ev_quit=ev_quit)
thread_pool.startall()
# Create the Ginga main object
ginga_shell = GingaShell(logger, thread_pool, mm, prefs,
ev_quit=ev_quit)
ginga_shell.set_layout(self.layout)
# User configuration (custom star catalogs, etc.)
if have_ginga_config:
try:
if hasattr(ginga_config, 'pre_gui_config'):
ginga_config.pre_gui_config(ginga_shell)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error importing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Build desired layout
ginga_shell.build_toplevel()
# Did user specify a particular geometry?
if options.geometry:
ginga_shell.set_geometry(options.geometry)
# make the list of disabled plugins
disabled_plugins = []
if not (options.disable_plugins is None):
disabled_plugins = options.disable_plugins.lower().split(',')
# Add desired global plugins
for spec in self.global_plugins:
if not spec.module.lower() in disabled_plugins:
ginga_shell.add_global_plugin(spec)
# Add GUI log handler (for "Log" global plugin)
guiHdlr = GuiLogHandler(ginga_shell)
guiHdlr.setLevel(options.loglevel)
fmt = logging.Formatter(log.LOG_FORMAT)
guiHdlr.setFormatter(fmt)
logger.addHandler(guiHdlr)
# Load any custom modules
if options.modules:
modules = options.modules.split(',')
for longPluginName in modules:
if '.' in longPluginName:
tmpstr = longPluginName.split('.')
pluginName = tmpstr[-1]
pfx = '.'.join(tmpstr[:-1])
else:
pluginName = longPluginName
pfx = None
spec = Bunch(name=pluginName, module=pluginName,
tab=pluginName, ws='right', pfx=pfx)
ginga_shell.add_global_plugin(spec)
# Load modules for "local" (per-channel) plug ins
for spec in self.local_plugins:
if not spec.module.lower() in disabled_plugins:
ginga_shell.add_local_plugin(spec)
# Load any custom plugins
if options.plugins:
plugins = options.plugins.split(',')
for longPluginName in plugins:
if '.' in longPluginName:
tmpstr = longPluginName.split('.')
pluginName = tmpstr[-1]
pfx = '.'.join(tmpstr[:-1])
else:
pluginName = longPluginName
pfx = None
spec = Bunch(module=pluginName, ws='dialogs',
hidden=False, pfx=pfx)
ginga_shell.add_local_plugin(spec)
ginga_shell.update_pending()
# TEMP?
tab_names = list(map(lambda name: name.lower(),
ginga_shell.ds.get_tabnames(group=None)))
if 'info' in tab_names:
ginga_shell.ds.raise_tab('Info')
if 'thumbs' in tab_names:
ginga_shell.ds.raise_tab('Thumbs')
# Add custom channels
channels = options.channels.split(',')
for chname in channels:
ginga_shell.add_channel(chname)
ginga_shell.change_channel(channels[0])
# User configuration (custom star catalogs, etc.)
if have_ginga_config:
try:
if hasattr(ginga_config, 'post_gui_config'):
ginga_config.post_gui_config(ginga_shell)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error processing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Redirect warnings to logger
for hdlr in logger.handlers:
logging.getLogger('py.warnings').addHandler(hdlr)
# Display banner the first time run, unless suppressed
showBanner = True
try:
showBanner = settings.get('showBanner')
except KeyError:
# disable for subsequent runs
settings.set(showBanner=False)
settings.save()
if (not options.nosplash) and (len(args) == 0) and showBanner:
ginga_shell.banner(raiseTab=True)
# Assume remaining arguments are fits files and load them.
for imgfile in args:
ginga_shell.nongui_do(ginga_shell.load_file, imgfile)
try:
try:
# if there is a network component, start it
if hasattr(ginga_shell, 'start'):
task = Task.FuncTask2(ginga_shell.start)
thread_pool.addTask(task)
# Main loop to handle GUI events
logger.info("Entering mainloop...")
ginga_shell.mainloop(timeout=0.001)
except __HOLE__:
logger.error("Received keyboard interrupt!")
finally:
logger.info("Shutting down...")
ev_quit.set()
sys.exit(0) | KeyboardInterrupt | dataset/ETHPy150Open ejeschke/ginga/ginga/main.py/ReferenceViewer.main |
8,366 | def main_search():
from optparse import OptionParser
parser = OptionParser(
usage="%prog [options] [<bandit> <bandit_algo>]")
parser.add_option('--load',
default='',
dest="load",
metavar='FILE',
help="unpickle experiment from here on startup")
parser.add_option('--save',
default='experiment.pkl',
dest="save",
metavar='FILE',
help="pickle experiment to here on exit")
parser.add_option("--steps",
dest='steps',
default='100',
metavar='N',
help="exit after queuing this many jobs (default: 100)")
parser.add_option("--workdir",
dest="workdir",
default=os.path.expanduser('~/.hyperopt.workdir'),
help="create workdirs here",
metavar="DIR")
parser.add_option("--bandit-argfile",
dest="bandit_argfile",
default=None,
help="path to file containing arguments bandit constructor \
file format: pickle of dictionary containing two keys,\
{'args' : tuple of positional arguments, \
'kwargs' : dictionary of keyword arguments}")
parser.add_option("--bandit-algo-argfile",
dest="bandit_algo_argfile",
default=None,
help="path to file containing arguments for bandit_algo "
"constructor. File format is pickled dictionary containing "
"two keys: 'args', a tuple of positional arguments, and "
"'kwargs', a dictionary of keyword arguments. "
"NOTE: bandit is pre-pended as first element of arg tuple.")
(options, args) = parser.parse_args()
try:
bandit_json, bandit_algo_json = args
except:
parser.print_help()
return -1
try:
if not options.load:
raise IOError()
handle = open(options.load, 'rb')
self = cPickle.load(handle)
handle.close()
except __HOLE__:
bandit = utils.get_obj(bandit_json, argfile=options.bandit_argfile)
bandit_algo = utils.get_obj(bandit_algo_json,
argfile=options.bandit_algo_argfile,
args=(bandit,))
self = SerialExperiment(bandit_algo)
try:
self.run(int(options.steps))
finally:
if options.save:
cPickle.dump(self, open(options.save, 'wb')) | IOError | dataset/ETHPy150Open hyperopt/hyperopt/hyperopt/main.py/main_search |
8,367 | def main(cmd, fn_pos = 1):
"""
Entry point for bin/* scripts
XXX
"""
logging.basicConfig(
stream=sys.stderr,
level=logging.INFO)
try:
runner = dict(
search='main_search',
dryrun='main_dryrun',
plot_history='main_plot_history',
)[cmd]
except __HOLE__:
logger.error("Command not recognized: %s" % cmd)
# XXX: Usage message
sys.exit(1)
try:
argv1 = sys.argv[fn_pos]
except IndexError:
logger.error('Module name required (XXX: print Usage)')
return 1
fn = datasets.main.load_tokens(sys.argv[fn_pos].split('.') + [runner])
sys.exit(fn(sys.argv[fn_pos+1:])) | KeyError | dataset/ETHPy150Open hyperopt/hyperopt/hyperopt/main.py/main |
8,368 | def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type process_input: string
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=_('Command requested root, but did not '
'specify a root helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
for _i in six.moves.range(20):
# NOTE(russellb) 20 is an arbitrary number of retries to
# prevent any chance of looping forever here.
try:
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
except __HOLE__ as e:
if e.errno in (errno.EAGAIN, errno.EINTR):
continue
raise
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, _('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, _('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0) | OSError | dataset/ETHPy150Open openstack/rack/rack/openstack/common/processutils.py/execute |
8,369 | def getBodyJson():
"""
For requests that are expected to contain a JSON body, this returns the
parsed value, or raises a :class:`girder.api.rest.RestException` for
invalid JSON.
"""
try:
return json.loads(cherrypy.request.body.read().decode('utf8'))
except __HOLE__:
raise RestException('Invalid JSON passed in request body.') | ValueError | dataset/ETHPy150Open girder/girder/girder/api/rest.py/getBodyJson |
8,370 | def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except __HOLE__:
return [] | OSError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/__init__.py/find_commands |
8,371 | def find_management_module(app_name):
"""
Determines the path to the management module for the given app_name,
without actually importing the application or the management module.
Raises ImportError if the management module cannot be found for any reason.
"""
parts = app_name.split('.')
parts.append('management')
parts.reverse()
part = parts.pop()
path = None
# When using manage.py, the project module is added to the path,
# loaded, then removed from the path. This means that
# testproject.testapp.models can be loaded in future, even if
# testproject isn't in the path. When looking for the management
# module, we need look for the case where the project name is part
# of the app_name but the project directory itself isn't on the path.
try:
f, path, descr = imp.find_module(part,path)
except __HOLE__,e:
if os.path.basename(os.getcwd()) != part:
raise e
while parts:
part = parts.pop()
f, path, descr = imp.find_module(part, path and [path] or None)
return path | ImportError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/__init__.py/find_management_module |
8,372 | def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included, the
startproject command will be disabled, and the startapp command
will be modified to use the directory in which the settings module appears.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
global _commands
if _commands is None:
_commands = dict([(name, 'django.core') for name in find_commands(__path__[0])])
# Find the installed apps
try:
from django.conf import settings
apps = settings.INSTALLED_APPS
except (AttributeError, EnvironmentError, ImportError):
apps = []
# Find the project directory
try:
from django.conf import settings
module = import_module(settings.SETTINGS_MODULE)
project_directory = setup_environ(module, settings.SETTINGS_MODULE)
except (__HOLE__, EnvironmentError, ImportError, KeyError):
project_directory = None
# Find and load the management module for each installed app.
for app_name in apps:
try:
path = find_management_module(app_name)
_commands.update(dict([(name, app_name)
for name in find_commands(path)]))
except ImportError:
pass # No management module - ignore this app
if project_directory:
# Remove the "startproject" command from self.commands, because
# that's a django-admin.py command, not a manage.py command.
del _commands['startproject']
# Override the startapp command so that it always uses the
# project_directory, not the current working directory
# (which is default).
from django.core.management.commands.startapp import ProjectCommand
_commands['startapp'] = ProjectCommand(project_directory)
return _commands | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/__init__.py/get_commands |
8,373 | def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlall', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
except __HOLE__:
raise CommandError("Unknown command: %r" % name)
# Grab out a list of defaults from the options. optparse does this for us
# when the script runs from the command line, but since call_command can
# be called programatically, we need to simulate the loading and handling
# of defaults (see #10080 for details).
defaults = dict([(o.dest, o.default)
for o in klass.option_list
if o.default is not NO_DEFAULT])
defaults.update(options)
return klass.execute(*args, **defaults) | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/__init__.py/call_command |
8,374 | def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin.py" or "manage.py") if it can't be found.
"""
try:
app_name = get_commands()[subcommand]
except __HOLE__:
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" % \
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/__init__.py/ManagementUtility.fetch_command |
8,375 | def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, a equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if not os.environ.has_key('DJANGO_AUTO_COMPLETE'):
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword-1]
except __HOLE__:
curr = ''
subcommands = get_commands().keys() + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print ' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands)))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'reset', 'sql', 'sqlall',
'sqlclear', 'sqlcustom', 'sqlindexes',
'sqlreset', 'sqlsequencereset', 'test'):
try:
from django.conf import settings
# Get the last part of the dotted path as the app name.
options += [(a.split('.')[-1], 0) for a in settings.INSTALLED_APPS]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
subcommand_cls.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword-1]]
options = filter(lambda (x, v): x not in prev_opts, options)
# filter options by current input
options = sorted([(k, v) for k, v in options if k.startswith(curr)])
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print opt_label
sys.exit(1) | IndexError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/__init__.py/ManagementUtility.autocomplete |
8,376 | def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = LaxOptionParser(usage="%prog subcommand [options] [args]",
version=get_version(),
option_list=BaseCommand.option_list)
self.autocomplete()
try:
options, args = parser.parse_args(self.argv)
handle_default_options(options)
except:
pass # Ignore any option errors at this point.
try:
subcommand = self.argv[1]
except __HOLE__:
subcommand = 'help' # Display help if no arguments were given.
if subcommand == 'help':
if len(args) > 2:
self.fetch_command(args[2]).print_help(self.prog_name, args[2])
else:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
sys.exit(1)
# Special-cases: We want 'django-admin.py --version' and
# 'django-admin.py --help' to work, for backwards compatibility.
elif self.argv[1:] == ['--version']:
# LaxOptionParser already takes care of printing the version.
pass
elif self.argv[1:] == ['--help']:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv) | IndexError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/__init__.py/ManagementUtility.execute |
8,377 | def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top))
self.dirty = True
except __HOLE__:
LOG.warning(_LW('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap}) | ValueError | dataset/ETHPy150Open openstack/nova/nova/network/linux_net.py/IptablesTable.remove_rule |
8,378 | def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except __HOLE__:
# Couldn't find table_name
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end) | ValueError | dataset/ETHPy150Open openstack/nova/nova/network/linux_net.py/IptablesManager._find_table |
8,379 | @utils.synchronized('lock_gateway', external=True)
def initialize_gateway_device(dev, network_ref):
if not network_ref:
return
_enable_ipv4_forwarding()
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to requests properly
try:
prefix = network_ref.cidr.prefixlen
except __HOLE__:
prefix = network_ref['cidr'].rpartition('/')[2]
full_ip = '%s/%s' % (network_ref['dhcp_server'], prefix)
new_ip_params = [[full_ip, 'brd', network_ref['broadcast']]]
old_ip_params = []
out, err = _execute('ip', 'addr', 'show', 'dev', dev,
'scope', 'global')
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
if fields[-2] in ('secondary', 'dynamic'):
ip_params = fields[1:-2]
else:
ip_params = fields[1:-1]
old_ip_params.append(ip_params)
if ip_params[0] != full_ip:
new_ip_params.append(ip_params)
if not old_ip_params or old_ip_params[0][0] != full_ip:
old_routes = []
result = _execute('ip', 'route', 'show', 'dev', dev)
if result:
out, err = result
for line in out.split('\n'):
fields = line.split()
if fields and 'via' in fields:
old_routes.append(fields)
_execute('ip', 'route', 'del', fields[0],
'dev', dev, run_as_root=True)
for ip_params in old_ip_params:
_execute(*_ip_bridge_cmd('del', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for ip_params in new_ip_params:
_execute(*_ip_bridge_cmd('add', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for fields in old_routes:
_execute('ip', 'route', 'add', *fields,
run_as_root=True)
if CONF.send_arp_for_ha and CONF.send_arp_for_ha_count > 0:
send_arp_for_ip(network_ref['dhcp_server'], dev,
CONF.send_arp_for_ha_count)
if CONF.use_ipv6:
_execute('ip', '-f', 'inet6', 'addr',
'change', network_ref['cidr_v6'],
'dev', dev, run_as_root=True) | AttributeError | dataset/ETHPy150Open openstack/nova/nova/network/linux_net.py/initialize_gateway_device |
8,380 | def _dnsmasq_pid_for(dev):
"""Returns the pid for prior dnsmasq instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _dhcp_file(dev, 'pid')
if os.path.exists(pid_file):
try:
with open(pid_file, 'r') as f:
return int(f.read())
except (ValueError, __HOLE__):
return None | IOError | dataset/ETHPy150Open openstack/nova/nova/network/linux_net.py/_dnsmasq_pid_for |
8,381 | def update_portSpec(old_obj, translate_dict):
global id_scope
sigstring = old_obj.db_sigstring
sigs = []
if sigstring and sigstring != '()':
for sig in sigstring[1:-1].split(','):
sigs.append(sig.split(':', 2))
# not great to use eval...
defaults = literal_eval(old_obj.db_defaults) if old_obj.db_defaults else []
if isinstance(defaults, basestring):
defaults = (defaults,)
else:
try:
it = iter(defaults)
except TypeError:
defaults = (defaults,)
# not great to use eval...
labels = literal_eval(old_obj.db_labels) if old_obj.db_labels else []
if isinstance(labels, basestring):
labels = (labels,)
else:
try:
it = iter(labels)
except __HOLE__:
labels = (labels,)
new_obj = DBPortSpec.update_version(old_obj, translate_dict)
total_len = len(sigs)
if len(defaults) < total_len:
defaults.extend("" for i in xrange(total_len-len(defaults)))
if len(labels) < total_len:
labels.extend("" for i in xrange(total_len-len(labels)))
for i, (sig, default, label) in enumerate(izip(sigs, defaults, labels)):
module = None
package = None
namespace = ''
if len(sig) == 1:
module = sig[0]
else:
package = sig[0]
module = sig[1]
if len(sig) > 2:
namespace = sig[2]
item = DBPortSpecItem(id=id_scope.getNewId(DBPortSpecItem.vtType),
pos=i,
module=module,
package=package,
namespace=namespace,
label=label,
default=default)
item.db_values = ''
item.db_entry_type = ''
new_obj.db_add_portSpecItem(item)
return new_obj | TypeError | dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/versions/v1_0_3/translate/v1_0_2.py/update_portSpec |
8,382 | def _getdb(scheme, host, params):
try:
module = import_module('stdnet.backends.%sb' % scheme)
except __HOLE__:
raise NotImplementedError
return getattr(module, 'BackendDataServer')(scheme, host, **params) | ImportError | dataset/ETHPy150Open lsbardel/python-stdnet/stdnet/backends/__init__.py/_getdb |
8,383 | def execute_generator(gen):
exc_info = None
result = None
while True:
try:
if exc_info:
result = failure.throw(*exc_info)
exc_info = None
else:
result = gen.send(result)
except __HOLE__:
break
except Exception:
if not exc_info:
exc_info = sys.exc_info()
else:
break
else:
if isgenerator(result):
result = execute_generator(result)
#
if exc_info:
raise_error_trace(exc_info[1], exc_info[2])
else:
return result | StopIteration | dataset/ETHPy150Open lsbardel/python-stdnet/stdnet/backends/__init__.py/execute_generator |
8,384 | def dpll_satisfiable(expr, all_models=False):
"""
Check satisfiability of a propositional sentence.
It returns a model rather than True when it succeeds.
Returns a generator of all models if all_models is True.
Examples
========
>>> from sympy.abc import A, B
>>> from sympy.logic.algorithms.dpll2 import dpll_satisfiable
>>> dpll_satisfiable(A & ~B)
{A: True, B: False}
>>> dpll_satisfiable(A & ~A)
False
"""
clauses = conjuncts(to_cnf(expr))
if False in clauses:
if all_models:
return (f for f in [False])
return False
symbols = sorted(_find_predicates(expr), key=default_sort_key)
symbols_int_repr = range(1, len(symbols) + 1)
clauses_int_repr = to_int_repr(clauses, symbols)
solver = SATSolver(clauses_int_repr, symbols_int_repr, set(), symbols)
models = solver._find_model()
if all_models:
return _all_models(models)
try:
return next(models)
except __HOLE__:
return False
# Uncomment to confirm the solution is valid (hitting set for the clauses)
#else:
#for cls in clauses_int_repr:
#assert solver.var_settings.intersection(cls) | StopIteration | dataset/ETHPy150Open sympy/sympy/sympy/logic/algorithms/dpll2.py/dpll_satisfiable |
8,385 | def _all_models(models):
satisfiable = False
try:
while True:
yield next(models)
satisfiable = True
except __HOLE__:
if not satisfiable:
yield False | StopIteration | dataset/ETHPy150Open sympy/sympy/sympy/logic/algorithms/dpll2.py/_all_models |
8,386 | def _process_events(self, events):
"""Process events from proactor."""
for f, callback, transferred, key, ov in events:
try:
self._logger.debug('Invoking event callback {}'.format(callback))
value = callback(transferred, key, ov)
except __HOLE__ as e:
self._logger.warn('Event callback failed: {}'.format(e))
f.set_exception(e)
else:
f.set_result(value) | OSError | dataset/ETHPy150Open harvimt/quamash/quamash/_windows.py/_ProactorEventLoop._process_events |
8,387 | def _poll(self, timeout=None):
"""Override in order to handle events in a threadsafe manner."""
if timeout is None:
ms = UINT32_MAX # wait for eternity
elif timeout < 0:
raise ValueError("negative timeout")
else:
# GetQueuedCompletionStatus() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
ms = math.ceil(timeout * 1e3)
if ms >= UINT32_MAX:
raise ValueError("timeout too big")
with QtCore.QMutexLocker(self._lock):
while True:
# self._logger.debug('Polling IOCP with timeout {} ms in thread {}...'.format(
# ms, threading.get_ident()))
status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
if status is None:
break
err, transferred, key, address = status
try:
f, ov, obj, callback = self._cache.pop(address)
except __HOLE__:
# key is either zero, or it is used to return a pipe
# handle which should be closed to avoid a leak.
if key not in (0, _overlapped.INVALID_HANDLE_VALUE):
_winapi.CloseHandle(key)
ms = 0
continue
if obj in self._stopped_serving:
f.cancel()
# Futures might already be resolved or cancelled
elif not f.done():
self.__events.append((f, callback, transferred, key, ov))
ms = 0 | KeyError | dataset/ETHPy150Open harvimt/quamash/quamash/_windows.py/_IocpProactor._poll |
8,388 | def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
if what == 'module':
# Strip top title
title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
re.I|re.S)
lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
else:
doc = get_doc_object(obj, what)
lines[:] = str(doc).split("\n")
if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
obj.__name__:
v = dict(full_name=obj.__name__)
lines += [''] + (app.config.numpydoc_edit_link % v).split("\n")
# replace reference numbers so that there are no duplicates
references = []
for l in lines:
l = l.strip()
if l.startswith('.. ['):
try:
references.append(int(l[len('.. ['):l.index(']')]))
except __HOLE__:
print "WARNING: invalid reference in %s docstring" % name
# Start renaming from the biggest number, otherwise we may
# overwrite references.
references.sort()
if references:
for i, line in enumerate(lines):
for r in references:
new_r = reference_offset[0] + r
lines[i] = lines[i].replace('[%d]_' % r,
'[%d]_' % new_r)
lines[i] = lines[i].replace('.. [%d]' % r,
'.. [%d]' % new_r)
reference_offset[0] += len(references) | ValueError | dataset/ETHPy150Open cigroup-ol/windml/doc/sphinxext/numpy_ext_old/numpydoc.py/mangle_docstrings |
8,389 | def get_local_ip(self):
""" Gets the local IP of the current node.
Returns:
The local IP
"""
try:
local_ip = self.__local_ip
except __HOLE__:
local_ip = None
if local_ip is None:
local_ip = os.environ.get("LOCAL_DB_IP")
if local_ip is None:
raise Exception("Env var LOCAL_DB_IP was not set.")
else:
self.__local_ip = local_ip
return self.__local_ip | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppDB/dbinterface_batch.py/AppDBInterface.get_local_ip |
8,390 | def get_master_ip(self):
""" Gets the master database IP of the current AppScale deployment.
Returns:
The master DB IP
"""
try:
master_ip = self.__master_ip
except __HOLE__:
master_ip = None
if master_ip is None:
master_ip = os.environ.get("MASTER_IP")
if master_ip is None:
raise Exception("Env var MASTER_IP was not set.")
else:
self.__master_ip = master_ip
return self.__master_ip | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppDB/dbinterface_batch.py/AppDBInterface.get_master_ip |
8,391 | def main(argv):
if len(argv) < 2:
print "Usage: %s com.apple.LaunchServices.QuarantineEvents" % __program__
sys.exit(1)
encoding = locale.getpreferredencoding()
if encoding.upper() != "UTF-8":
print "%s requires an UTF-8 capable console/terminal" % __program__
sys.exit(1)
files_to_process = []
for input_glob in argv[1:]:
files_to_process += glob.glob(input_glob)
for input_file in files_to_process:
events = OSXQuarantineEvents(open(input_file))
for data in events.Parse():
timestamp, entry_type, url, data1, data2, data3, _, _, _, _, _ = data
try:
date_string = datetime.datetime(1970, 1, 1)
date_string += datetime.timedelta(microseconds=timestamp)
date_string = u"%s+00:00" % (date_string)
except TypeError:
date_string = timestamp
except __HOLE__:
date_string = timestamp
output_string = u"%s\t%s\t%s\t%s\t%s\t%s" % (
date_string, entry_type, url, data1, data2, data3)
print output_string.encode("UTF-8") | ValueError | dataset/ETHPy150Open google/grr/grr/parsers/osx_quarantine.py/main |
8,392 | def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column('F1', 'A3', null='---',
array=np.array(['1.0', '2.0', '---', '3.0']),
ascii=True)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp('test.fits'))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp('test.fits'), mode='update') as h:
h[1].header['TFORM1'] = 'E3'
del h[1].header['TNULL1']
with fits.open(self.temp('test.fits')) as h:
assert_raises(ValueError, lambda: h[1].data['F1'])
try:
with fits.open(self.temp('test.fits')) as h:
h[1].data['F1']
except __HOLE__ as exc:
assert str(exc).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data") | ValueError | dataset/ETHPy150Open spacetelescope/PyFITS/pyfits/tests/test_table.py/TestTableFunctions.test_missing_tnull |
8,393 | def _apply_inplace(self, x, **kwargs):
r"""
Applies this transform to a :map:`Transformable` ``x`` destructively.
Any ``kwargs`` will be passed to the specific transform :meth:`_apply`
method.
Note that this is an inplace operation that should be used sparingly,
by internal API's where creating a copy of the transformed object is
expensive. It does not return anything, as the operation is inplace.
Parameters
----------
x : :map:`Transformable`
The :map:`Transformable` object to be transformed.
kwargs : `dict`
Passed through to :meth:`_apply`.
"""
def transform(x_):
"""
Local closure which calls the :meth:`_apply` method with the
`kwargs` attached.
"""
return self._apply(x_, **kwargs)
try:
x._transform_inplace(transform)
except __HOLE__:
raise ValueError('apply_inplace can only be used on Transformable'
' objects.') | AttributeError | dataset/ETHPy150Open menpo/menpo/menpo/transform/base/__init__.py/Transform._apply_inplace |
8,394 | def apply(self, x, batch_size=None, **kwargs):
r"""
Applies this transform to ``x``.
If ``x`` is :map:`Transformable`, ``x`` will be handed this transform
object to transform itself non-destructively (a transformed copy of the
object will be returned).
If not, ``x`` is assumed to be an `ndarray`. The transformation will be
non-destructive, returning the transformed version.
Any ``kwargs`` will be passed to the specific transform :meth:`_apply`
method.
Parameters
----------
x : :map:`Transformable` or ``(n_points, n_dims)`` `ndarray`
The array or object to be transformed.
batch_size : `int`, optional
If not ``None``, this determines how many items from the numpy
array will be passed through the transform at a time. This is
useful for operations that require large intermediate matrices
to be computed.
kwargs : `dict`
Passed through to :meth:`_apply`.
Returns
-------
transformed : ``type(x)``
The transformed object or array
"""
def transform(x_):
"""
Local closure which calls the :meth:`_apply` method with the
`kwargs` attached.
"""
return self._apply_batched(x_, batch_size, **kwargs)
try:
return x._transform(transform)
except __HOLE__:
return self._apply_batched(x, batch_size, **kwargs) | AttributeError | dataset/ETHPy150Open menpo/menpo/menpo/transform/base/__init__.py/Transform.apply |
8,395 | def do_GET(self):
print "Processing %s" % self.path
if "/success" in self.path:
print "Stopping embedded HTTP server"
self.send_response(200)
self.end_headers()
try:
self.wfile.write(open("login_success.html").read())
except __HOLE__:
self.wfile.write("<html><head><title>Authentication success!</title></head>")
self.wfile.write("<body>Authentication was successful. Remember to run <b>st_update</b> in Alfred.</body></html>")
self.server.stop = True
elif "/oauth/callback" in self.path:
print "Received OAuth callback"
parsed_path = urlparse.urlparse(self.path)
try:
params = dict([p.split('=') for p in parsed_path[4].split('&')])
except:
params = {}
oauth_params = {
'grant_type': 'authorization_code',
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'scope': 'app',
'redirect_uri': "http://localhost:{port}/oauth/callback".format(port=SERVER_PORT),
'code': params['code']
}
oauth_headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
connection = httplib.HTTPSConnection if PROTOCOL == "https" else httplib.HTTPConnection
conn = connection(HOSTNAME, PORT)
conn.request("POST", "/oauth/token",
urllib.urlencode(oauth_params), oauth_headers)
response = conn.getresponse()
data = response.read()
conn.close()
jsonData = json.loads(data)
token = jsonData["access_token"]
authHeader = open("token.txt", "w")
authHeader.write(token)
authHeader.close()
# Redirect so we can shut down the HTTP server
self.send_response(301)
self.send_header("Location", "http://localhost:{port}/success".format(port=SERVER_PORT))
self.end_headers()
else:
print "%s not found" % self.path
self.send_response(404) | IOError | dataset/ETHPy150Open PhysicalGraph/SmartThings-Alfred/alfredworkflow/http_server.py/StoppableHttpRequestHandler.do_GET |
8,396 | def listdir(path):
"""List directory contents, using cache."""
try:
cached_mtime, list = cache[path]
del cache[path]
except __HOLE__:
cached_mtime, list = -1, []
mtime = os.stat(path).st_mtime
if mtime != cached_mtime:
list = os.listdir(path)
list.sort()
cache[path] = mtime, list
return list | KeyError | dataset/ETHPy150Open Southpaw-TACTIC/TACTIC/src/context/client/tactic-api-python-4.0.api04/Lib/dircache.py/listdir |
8,397 | def _attr_get_(obj, attr):
'''Returns an attribute's value, or None (no error) if undefined.
Analagous to .get() for dictionaries. Useful when checking for
value of options that may not have been defined on a given
method.'''
try:
return getattr(obj, attr)
except __HOLE__:
return None | AttributeError | dataset/ETHPy150Open jookies/jasmin/jasmin/protocols/cli/options.py/_attr_get_ |
8,398 | def test_access_zipped_assets_integration():
test_executable = dedent('''
import os
from _pex.util import DistributionHelper
temp_dir = DistributionHelper.access_zipped_assets('my_package', 'submodule')
with open(os.path.join(temp_dir, 'mod.py'), 'r') as fp:
for line in fp:
print(line)
''')
with nested(temporary_dir(), temporary_dir()) as (td1, td2):
pb = PEXBuilder(path=td1)
with open(os.path.join(td1, 'exe.py'), 'w') as fp:
fp.write(test_executable)
pb.set_executable(fp.name)
submodule = os.path.join(td1, 'my_package', 'submodule')
safe_mkdir(submodule)
mod_path = os.path.join(submodule, 'mod.py')
with open(mod_path, 'w') as fp:
fp.write('accessed')
pb.add_source(fp.name, 'my_package/submodule/mod.py')
pex = os.path.join(td2, 'app.pex')
pb.build(pex)
output, returncode = run_simple_pex(pex)
try:
output = output.decode('UTF-8')
except __HOLE__:
pass
assert output == 'accessed\n'
assert returncode == 0 | ValueError | dataset/ETHPy150Open pantsbuild/pex/tests/test_util.py/test_access_zipped_assets_integration |
8,399 | def _reload_config_data(self):
"""Reload the data from config file into ``self._list`` and ``self._set``.
Note: When changing the managed list using add() and remove() from command line, the
DataWatcher's greenlet does not work, you need to call this explicitly to update the list
so as to make following changes.
"""
try:
self.zk_config_manager.reload_config_data()
except __HOLE__:
log.info('Error reading config file in managed list %s:%s' % (
self.list_domain, self.list_key))
# Assume there is empty data in the config file.
self._read_config_callback('') | IOError | dataset/ETHPy150Open pinterest/kingpin/kingpin/manageddata/managed_datastructures.py/ManagedList._reload_config_data |