Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
8,200
def get_action_class_instance(action_cls, config=None, action_service=None): """ Instantiate and return Action class instance. :param action_cls: Action class to instantiate. :type action_cls: ``class`` :param config: Config to pass to the action class. :type config: ``dict`` :param action_service: ActionService instance to pass to the class. :type action_service: :class:`ActionService` """ kwargs = {} kwargs['config'] = config kwargs['action_service'] = action_service # Note: This is done for backward compatibility reasons. We first try to pass # "action_service" argument to the action class constructor, but if that doesn't work (e.g. old # action which hasn't been updated yet), we resort to late assignment post class instantiation. # TODO: Remove in next major version once all the affected actions have been updated. try: action_instance = action_cls(**kwargs) except __HOLE__ as e: if 'unexpected keyword argument \'action_service\'' not in str(e): raise e LOG.debug('Action class (%s) constructor doesn\'t take "action_service" argument, ' 'falling back to late assignment...' % (action_cls.__class__.__name__)) action_service = kwargs.pop('action_service', None) action_instance = action_cls(**kwargs) action_instance.action_service = action_service return action_instance
TypeError
dataset/ETHPy150Open StackStorm/st2/st2actions/st2actions/runners/utils.py/get_action_class_instance
8,201
def get(self, **kwargs): if 'pk' in kwargs: pk = kwargs.pop('pk') elif 'id' in kwargs: pk = kwargs.pop('id') else: try: pk = self.instance.id except __HOLE__: raise AttributeError("The 'es.get' method needs to be called from an instance or be given a 'pk' parameter.") return self.queryset.get(id=pk)
AttributeError
dataset/ETHPy150Open liberation/django-elasticsearch/django_elasticsearch/managers.py/ElasticsearchManager.get
8,202
def make_mapping(self): """ Create the model's es mapping on the fly """ mappings = {} for field_name in self.get_fields(): try: field = self.model._meta.get_field(field_name) except FieldDoesNotExist: # abstract field mapping = {} else: mapping = {'type': ELASTICSEARCH_FIELD_MAP.get( field.get_internal_type(), 'string')} try: # if an analyzer is set as default, use it. # TODO: could be also tokenizer, filter, char_filter if mapping['type'] == 'string': analyzer = settings.ELASTICSEARCH_SETTINGS['analysis']['default'] mapping['analyzer'] = analyzer except (ValueError, AttributeError, KeyError, TypeError): pass try: mapping.update(self.model.Elasticsearch.mappings[field_name]) except (AttributeError, __HOLE__, TypeError): pass mappings[field_name] = mapping # add a completion mapping for every auto completable field fields = self.model.Elasticsearch.completion_fields or [] for field_name in fields: complete_name = "{0}_complete".format(field_name) mappings[complete_name] = {"type": "completion"} return { self.doc_type: { "properties": mappings } }
KeyError
dataset/ETHPy150Open liberation/django-elasticsearch/django_elasticsearch/managers.py/ElasticsearchManager.make_mapping
8,203
@pytest.fixture def dbm_db(request): db = dbm.open(db_file, 'n') db.close() def fin(): try: os.remove(db_file) except __HOLE__: pass request.addfinalizer(fin)
OSError
dataset/ETHPy150Open eleme/thriftpy/tests/test_tracking.py/dbm_db
8,204
@property def function(self): """ The function pointed to by the path_str attribute. Returns: A handle to a Python function or None if function is not valid. """ if not self._function and self._valid is None: try: # Split into parts and extract function name module_path, function_name = self.path.rsplit('.', 1) #Pre-process handler path full_module_path = '.'.join((self.PATH_PREFIX, module_path)) # Import module module = __import__(full_module_path, fromlist=[function_name]) except (__HOLE__, ImportError): self._valid = False else: # Get the function self._function = getattr(module, function_name) self._valid = True return self._function
ValueError
dataset/ETHPy150Open tethysplatform/tethys/tethys_apps/base/persistent_store.py/TethysFunctionExtractor.function
8,205
def __init__(self, pickle_file_path): cmd.Cmd.__init__(self) if not os.path.isfile(pickle_file_path): print "File %s does not exist." % pickle_file_path sys.exit(1) try: pickle_file = open(pickle_file_path, 'rb') except __HOLE__ as e: print "Unable to open file %s" % pickle_file_path sys.exit(1) try: self.fs = pickle.load(pickle_file) except: print ("Unable to load file '%s'. " + \ "Are you sure it is a valid pickle file?") % \ (pickle_file_path,) sys.exit(1) self.pickle_file_path=pickle_file_path #get the name of the file so we can display it as the prompt path_parts = pickle_file_path.split('/') self.fs_name = path_parts[-1] self.update_pwd("/") self.intro = "\nKippo/Cowrie file system interactive editor\n" + \ "Donovan Hubbard, Douglas Hubbard, March 2013\n" + \ "Type 'help' for help\n"
IOError
dataset/ETHPy150Open cowrie/cowrie/utils/fsctl.py/fseditCmd.__init__
8,206
@contextmanager def patched_settings(**kwargs): old = {} for k, v in kwargs.items(): try: old[k] = getattr(settings, k) except __HOLE__: pass setattr(settings, k, v) yield for k, v in old.items(): setattr(settings, k, v)
AttributeError
dataset/ETHPy150Open hzdg/django-staticbuilder/staticbuilder/utils.py/patched_settings
8,207
@manage(['op']) def init(self, op): try: self.op = { '<': operator.lt, '<=': operator.le, '=': operator.eq, '!=': operator.ne, '>=': operator.ge, '>': operator.gt, }[op] except __HOLE__: _log.warning('Invalid operator %s, will always produce FALSE as result' % str(op)) self.op = None
KeyError
dataset/ETHPy150Open EricssonResearch/calvin-base/calvin/actorstore/systemactors/std/Compare.py/Compare.init
8,208
@property def _core_plugin(self): try: return self._plugin except __HOLE__: self._plugin = manager.NeutronManager.get_plugin() return self._plugin
AttributeError
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/plugins/cisco/device_manager/service_vm_lib.py/ServiceVMManager._core_plugin
8,209
def dispatch_service_vm_real( self, context, instance_name, vm_image, vm_flavor, hosting_device_drv, credentials_info, connectivity_info, ports=None): mgmt_port = connectivity_info['mgmt_port'] nics = [{'port-id': mgmt_port['id']}] for port in ports or {}: nics.append({'port-id': port['id']}) try: image = n_utils.find_resource(self._nclient.images, vm_image) flavor = n_utils.find_resource(self._nclient.flavors, vm_flavor) except (nova_exc.CommandError, Exception) as e: LOG.error(_LE('Failure finding needed Nova resource: %s'), e) return try: # Assumption for now is that this does not need to be # plugin dependent, only hosting device type dependent. files = hosting_device_drv.create_config(context, credentials_info, connectivity_info) except __HOLE__: return try: server = self._nclient.servers.create( instance_name, image.id, flavor.id, nics=nics, files=files, config_drive=(files != {})) # There are several individual Nova client exceptions but they have # no other common base than Exception, therefore the long list. except (nova_exc.UnsupportedVersion, nova_exc.CommandError, nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch, nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException, nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints, nova_exc.ConnectionRefused, nova_exc.ClientException, Exception) as e: LOG.error(_LE('Failed to create service VM instance: %s'), e) return return {'id': server.id} #TODO(remove fake function later)
IOError
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/plugins/cisco/device_manager/service_vm_lib.py/ServiceVMManager.dispatch_service_vm_real
8,210
def dispatch_service_vm_fake(self, context, instance_name, vm_image, vm_flavor, hosting_device_drv, credentials_info, connectivity_info, ports=None): mgmt_port = connectivity_info['mgmt_port'] try: # Assumption for now is that this does not need to be # plugin dependent, only hosting device type dependent. hosting_device_drv.create_config(context, credentials_info, connectivity_info) except __HOLE__: return vm_id = uuidutils.generate_uuid() if mgmt_port is not None: p_dict = {'port': {'device_id': vm_id, 'device_owner': 'nova'}} self._core_plugin.update_port(context, mgmt_port['id'], p_dict) for port in ports or {}: p_dict = {'port': {'device_id': vm_id, 'device_owner': 'nova'}} self._core_plugin.update_port(context, port['id'], p_dict) myserver = {'server': {'adminPass': "MVk5HPrazHcG", 'id': vm_id, 'links': [{'href': "http://openstack.example.com/v2/" "openstack/servers/" + vm_id, 'rel': "self"}, {'href': "http://openstack.example.com/" "openstack/servers/" + vm_id, 'rel': "bookmark"}]}} return myserver['server']
IOError
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/plugins/cisco/device_manager/service_vm_lib.py/ServiceVMManager.dispatch_service_vm_fake
8,211
def main(): global SETTINGS, args try: # warn the user that they are starting PyPXE as non-root user if os.getuid() != 0: print '\nWARNING: Not root. Servers will probably fail to bind.\n' # configure args = parse_cli_arguments() if args.JSON_CONFIG: # load from configuration file if specified try: config_file = open(args.JSON_CONFIG, 'rb') except IOError: sys.exit('Failed to open {0}'.format(args.JSON_CONFIG)) try: loaded_config = json.load(config_file) config_file.close() except ValueError: sys.exit('{0} does not contain valid JSON'.format(args.JSON_CONFIG)) for setting in loaded_config: if type(loaded_config[setting]) is unicode: loaded_config[setting] = loaded_config[setting].encode('ascii') SETTINGS.update(loaded_config) # update settings with JSON config args = parse_cli_arguments() # re-parse, CLI options take precedence # ideally this would be in dhcp itself, but the chroot below *probably* # breaks the ability to open the config file. if args.STATIC_CONFIG: try: static_config = open(args.STATIC_CONFIG, 'rb') except IOError: sys.exit("Failed to open {0}".format(args.STATIC_CONFIG)) try: loaded_statics = json.load(static_config) static_config.close() except __HOLE__: sys.exit("{0} does not contain valid json".format(args.STATIC_CONFIG)) else: loaded_statics = dict() # setup main logger sys_logger = logging.getLogger('PyPXE') if args.SYSLOG_SERVER: handler = logging.handlers.SysLogHandler(address = (args.SYSLOG_SERVER, int(args.SYSLOG_PORT))) else: handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s %(message)s') handler.setFormatter(formatter) sys_logger.addHandler(handler) sys_logger.setLevel(logging.INFO) # pass warning to user regarding starting HTTP server without iPXE if args.USE_HTTP and not args.USE_IPXE and not args.USE_DHCP: sys_logger.warning('HTTP selected but iPXE disabled. PXE ROM must support HTTP requests.') # if the argument was pased to enabled ProxyDHCP then enable the DHCP server if args.DHCP_MODE_PROXY: args.USE_DHCP = True # if the network boot file name was not specified in the argument, # set it based on what services were enabled/disabled if args.NETBOOT_FILE == '': if not args.USE_IPXE: args.NETBOOT_FILE = 'pxelinux.0' elif not args.USE_HTTP: args.NETBOOT_FILE = 'boot.ipxe' else: args.NETBOOT_FILE = 'boot.http.ipxe' if args.NBD_WRITE and not args.NBD_COW: sys_logger.warning('NBD Write enabled but copy-on-write is not. Multiple clients may cause corruption') if args.NBD_COW_IN_MEM or args.NBD_COPY_TO_RAM: sys_logger.warning('NBD cowinmem and copytoram can cause high RAM usage') if args.NBD_COW and not args.NBD_WRITE: # cow implies write args.NBD_WRITE = True # make a list of running threads for each service running_services = [] # configure/start TFTP server if args.USE_TFTP: # setup TFTP logger tftp_logger = sys_logger.getChild('TFTP') sys_logger.info('Starting TFTP server...') # setup the thread tftp_server = tftp.TFTPD(mode_debug = do_debug('tftp'), mode_verbose = do_verbose('tftp'), logger = tftp_logger, netboot_directory = args.NETBOOT_DIR) tftpd = threading.Thread(target = tftp_server.listen) tftpd.daemon = True tftpd.start() running_services.append(tftpd) # configure/start DHCP server if args.USE_DHCP: # setup DHCP logger dhcp_logger = sys_logger.getChild('DHCP') if args.DHCP_MODE_PROXY: sys_logger.info('Starting DHCP server in ProxyDHCP mode...') else: sys_logger.info('Starting DHCP server...') # setup the thread dhcp_server = dhcp.DHCPD( ip = args.DHCP_SERVER_IP, port = args.DHCP_SERVER_PORT, offer_from = args.DHCP_OFFER_BEGIN, offer_to = args.DHCP_OFFER_END, subnet_mask = args.DHCP_SUBNET, router = args.DHCP_ROUTER, dns_server = args.DHCP_DNS, broadcast = args.DHCP_BROADCAST, file_server = args.DHCP_FILESERVER, file_name = args.NETBOOT_FILE, use_ipxe = args.USE_IPXE, use_http = args.USE_HTTP, mode_proxy = args.DHCP_MODE_PROXY, mode_debug = do_debug('dhcp'), mode_verbose = do_verbose('dhcp'), whitelist = args.DHCP_WHITELIST, static_config = loaded_statics, logger = dhcp_logger, saveleases = args.LEASES_FILE) dhcpd = threading.Thread(target = dhcp_server.listen) dhcpd.daemon = True dhcpd.start() running_services.append(dhcpd) # configure/start HTTP server if args.USE_HTTP: # setup HTTP logger http_logger = sys_logger.getChild('HTTP') sys_logger.info('Starting HTTP server...') # setup the thread http_server = http.HTTPD(mode_debug = do_debug('http'), mode_verbose = do_debug('http'), logger = http_logger, netboot_directory = args.NETBOOT_DIR) httpd = threading.Thread(target = http_server.listen) httpd.daemon = True httpd.start() running_services.append(httpd) # configure/start NBD server if args.NBD_BLOCK_DEVICE: # setup NBD logger nbd_logger = sys_logger.getChild('NBD') sys_logger.info('Starting NBD server...') nbd_server = nbd.NBD( block_device = args.NBD_BLOCK_DEVICE, write = args.NBD_WRITE, cow = args.NBD_COW, in_mem = args.NBD_COW_IN_MEM, copy_to_ram = args.NBD_COPY_TO_RAM, ip = args.NBD_SERVER_IP, port = args.NBD_PORT, mode_debug = do_debug('nbd'), mode_verbose = do_verbose('nbd'), logger = nbd_logger, netboot_directory = args.NETBOOT_DIR) nbdd = threading.Thread(target = nbd_server.listen) nbdd.daemon = True nbdd.start() running_services.append(nbdd) sys_logger.info('PyPXE successfully initialized and running!') while map(lambda x: x.isAlive(), running_services): sleep(1) except KeyboardInterrupt: sys.exit('\nShutting down PyPXE...\n')
ValueError
dataset/ETHPy150Open psychomario/PyPXE/pypxe/server.py/main
8,212
def is_fp_closed(obj): """ Checks whether a given file-like object is closed. :param obj: The file-like object to check. """ try: # Check via the official file-like-object way. return obj.closed except AttributeError: pass try: # Check if the object is a container for another file-like object that # gets released on exhaustion (e.g. HTTPResponse). return obj.fp is None except __HOLE__: pass raise ValueError("Unable to determine whether fp is closed.")
AttributeError
dataset/ETHPy150Open CouchPotato/CouchPotatoServer/libs/requests/packages/urllib3/util/response.py/is_fp_closed
8,213
def options_tab(request, template_name="manage/voucher/options.html"): """Displays the vouchers options """ try: voucher_options = VoucherOptions.objects.all()[0] except __HOLE__: voucher_options = VoucherOptions.objects.create() form = VoucherOptionsForm(instance=voucher_options) return render_to_string(template_name, RequestContext(request, { "form": form, }))
IndexError
dataset/ETHPy150Open diefenbach/django-lfs/lfs/manage/voucher/views.py/options_tab
8,214
@permission_required("core.manage_shop") def manage_vouchers(request): """Redirects to the first voucher group or to no voucher groups view. """ try: voucher_group = VoucherGroup.objects.all()[0] except __HOLE__: url = reverse("lfs_no_vouchers") else: url = reverse("lfs_manage_voucher_group", kwargs={"id": voucher_group.id}) return HttpResponseRedirect(url)
IndexError
dataset/ETHPy150Open diefenbach/django-lfs/lfs/manage/voucher/views.py/manage_vouchers
8,215
@permission_required("core.manage_shop") def add_vouchers(request, group_id): """ """ voucher_group = VoucherGroup.objects.get(pk=group_id) form = VoucherForm(data=request.POST) msg = "" if form.is_valid(): try: amount = int(request.POST.get("amount", 0)) except __HOLE__: amount = 0 for i in range(0, amount): number = lfs.voucher.utils.create_voucher_number() counter = 0 while Voucher.objects.filter(number=number).exists() and counter < 100: number = lfs.voucher.utils.create_voucher_number() counter += 1 if counter == 100: msg = _(u"Unable to create unique Vouchers for the options specified.") break Voucher.objects.create( number=number, group=voucher_group, creator=request.user, kind_of=request.POST.get("kind_of", 0), value=request.POST.get("value", 0.0), start_date=request.POST.get("start_date"), end_date=request.POST.get("end_date"), effective_from=request.POST.get("effective_from"), tax_id=request.POST.get("tax"), limit=request.POST.get("limit") ) msg = _(u"Vouchers have been created.") return render_to_ajax_response( (("#vouchers", vouchers_tab(request, voucher_group)), ), msg)
TypeError
dataset/ETHPy150Open diefenbach/django-lfs/lfs/manage/voucher/views.py/add_vouchers
8,216
@permission_required("core.manage_shop") def save_voucher_options(request): """Saves voucher options. """ try: voucher_options = VoucherOptions.objects.all()[0] except __HOLE__: voucher_options = VoucherOptions.objects.create() form = VoucherOptionsForm(instance=voucher_options, data=request.POST) if form.is_valid(): form.save() return render_to_ajax_response( (("#options_tab", options_tab(request)),), _(u"Voucher options has been saved.") )
IndexError
dataset/ETHPy150Open diefenbach/django-lfs/lfs/manage/voucher/views.py/save_voucher_options
8,217
@wsgi.response(202) def create(self, req, body, gid, is_proxy=False): def _validate(context, body, gid, is_proxy=False): proxy = db.process_get_all( context, gid, filters={"is_proxy": True}) if is_proxy: if len(proxy) > 0: msg = _( "Proxy process already exists in the group %s" % gid) raise exception.InvalidInput(reason=msg) else: if len(proxy) != 1: msg = _( "Proxy process does not exist in the group %s" % gid) raise webob.exc.HTTPBadRequest(explanation=msg) keyname = "proxy" if is_proxy else "process" if not self.is_valid_body(body, keyname): msg = _("Invalid request body") raise exception.InvalidInput(reason=msg) values = body[keyname] ppid = values.get("ppid") name = values.get("name") keypair_id = values.get("keypair_id") securitygroup_ids = values.get("securitygroup_ids") glance_image_id = values.get("glance_image_id") nova_flavor_id = values.get("nova_flavor_id") userdata = values.get("userdata") args = values.get("args") self._uuid_check(gid, ppid, keypair_id) pid = unicode(uuid.uuid4()) if not name: prefix = "proxy-" if is_proxy else "process-" name = prefix + pid if ppid: parent_process = db.process_get_by_pid(context, gid, ppid) nova_keypair_id = None if keypair_id: keypair = db.keypair_get_by_keypair_id( context, gid, keypair_id) nova_keypair_id = keypair["nova_keypair_id"] elif ppid: keypair_id = parent_process.get("keypair_id") if keypair_id: keypair = db.keypair_get_by_keypair_id( context, gid, keypair_id) nova_keypair_id = keypair["nova_keypair_id"] else: default_keypair = db.keypair_get_all( context, gid, filters={"is_default": True}) if default_keypair: keypair_id = default_keypair[0]["keypair_id"] nova_keypair_id = default_keypair[0]["nova_keypair_id"] if securitygroup_ids is not None and\ not isinstance(securitygroup_ids, list): msg = _("securitygroupids must be a list") raise exception.InvalidInput(reason=msg) elif securitygroup_ids: neutron_securitygroup_ids = [] for id in securitygroup_ids: self._uuid_check(securitygroup_id=id) securitygroup = db.securitygroup_get_by_securitygroup_id( context, gid, id) neutron_securitygroup_ids.append( securitygroup["neutron_securitygroup_id"]) elif ppid: securitygroups = parent_process.get("securitygroups") securitygroup_ids =\ [securitygroup["securitygroup_id"] for securitygroup in securitygroups] neutron_securitygroup_ids =\ [securitygroup["neutron_securitygroup_id"] for securitygroup in securitygroups] else: default_securitygroups = db.securitygroup_get_all( context, gid, filters={"is_default": True}) if default_securitygroups: securitygroup_ids =\ [securitygroup["securitygroup_id"] for securitygroup in default_securitygroups] neutron_securitygroup_ids =\ [securitygroup["neutron_securitygroup_id"] for securitygroup in default_securitygroups] else: msg = _( "securitygroup_ids is required. Default \ securitygroup_ids are not registered.") raise exception.InvalidInput(reason=msg) if not glance_image_id and ppid: glance_image_id = parent_process.get("glance_image_id") if not nova_flavor_id and ppid: nova_flavor_id = parent_process.get("nova_flavor_id") if userdata: try: base64.b64decode(userdata) except __HOLE__: msg = _("userdadta must be a base64 encoded value.") raise exception.InvalidInput(reason=msg) networks = db.network_get_all(context, gid) if not networks: msg = _("Netwoks does not exist in the group %s" % gid) raise webob.exc.HTTPBadRequest(explanation=msg) network_ids =\ [network["network_id"] for network in networks] neutron_network_ids =\ [network["neutron_network_id"] for network in networks] nics = [] for id in neutron_network_ids: nics.append({"net-id": id}) if args is None: args = {} elif args is not None and\ not isinstance(args, dict): msg = _("args must be a dict.") raise exception.InvalidInput(reason=msg) else: for key in args.keys(): args[key] = str(args[key]) default_args = { "gid": gid, "pid": pid, } if ppid: default_args["ppid"] = ppid if is_proxy: default_args["rackapi_ip"] = cfg.CONF.my_ip default_args["os_username"] = cfg.CONF.os_username default_args["os_password"] = cfg.CONF.os_password default_args["os_tenant_name"] = cfg.CONF.os_tenant_name default_args["os_auth_url"] = cfg.CONF.os_auth_url default_args["os_region_name"] = cfg.CONF.os_region_name else: proxy_instance_id = proxy[0]["nova_instance_id"] default_args["proxy_ip"] = self.manager.get_process_address( context, proxy_instance_id) args.update(default_args) valid_values = {} valid_values["gid"] = gid valid_values["ppid"] = ppid valid_values["pid"] = pid valid_values["display_name"] = name valid_values["keypair_id"] = keypair_id valid_values["securitygroup_ids"] = securitygroup_ids valid_values["glance_image_id"] = glance_image_id valid_values["nova_flavor_id"] = nova_flavor_id valid_values["userdata"] = userdata valid_values["args"] = json.dumps(args) valid_values["is_proxy"] = True if is_proxy else False valid_values["network_ids"] = network_ids if is_proxy: ipc_endpoint = values.get("ipc_endpoint") shm_endpoint = values.get("shm_endpoint") fs_endpoint = values.get("fs_endpoint") if ipc_endpoint: utils.check_string_length( ipc_endpoint, 'ipc_endpoint', min_length=1, max_length=255) if shm_endpoint: utils.check_string_length( shm_endpoint, 'shm_endpoint', min_length=1, max_length=255) if fs_endpoint: utils.check_string_length( fs_endpoint, 'fs_endpoint', min_length=1, max_length=255) valid_values["ipc_endpoint"] = ipc_endpoint valid_values["shm_endpoint"] = shm_endpoint valid_values["fs_endpoint"] = fs_endpoint boot_values = {} boot_values["name"] = name boot_values["key_name"] = nova_keypair_id boot_values["security_groups"] = neutron_securitygroup_ids boot_values["image"] = glance_image_id boot_values["flavor"] = nova_flavor_id boot_values["userdata"] = userdata boot_values["meta"] = args boot_values["nics"] = nics return valid_values, boot_values try: context = req.environ['rack.context'] values, boot_values = _validate(context, body, gid, is_proxy) nova_instance_id, status = self.manager.process_create( context, **boot_values) values["nova_instance_id"] = nova_instance_id values["user_id"] = context.user_id values["project_id"] = context.project_id process = db.process_create(context, values, values.pop("network_ids"), values.pop("securitygroup_ids")) process["status"] = status except exception.InvalidInput as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) return self._view_builder.create(process)
TypeError
dataset/ETHPy150Open openstack/rack/rack/api/v1/processes.py/Controller.create
8,218
def decode_to_string(toDecode): """ This function is needed for Python 3, because a subprocess can return bytes instead of a string. """ try: return toDecode.decode('utf-8') except __HOLE__: # bytesToDecode was of type string before return toDecode
AttributeError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/util.py/decode_to_string
8,219
def measure_energy(oldEnergy=None): ''' returns a dictionary with the currently available values of energy consumptions (like a time-stamp). If oldEnergy is not None, the difference (currentValue - oldEnergy) is returned. ''' newEnergy = {} executable = find_executable('read-energy.sh', exitOnError=False) if executable is None: # not available on current system logging.debug( 'Energy measurement not available because read-energy.sh could not be found.') return newEnergy for energyType in ENERGY_TYPES: logging.debug('Reading %s energy measurement for value.', energyType) energysh = subprocess.Popen([executable, energyType], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = energysh.communicate() if energysh.returncode or stderr: logging.debug('Error while reading %s energy measurement: retval=%s, out=%s, err=%s', energyType, energysh.returncode, stdout, stderr) try: newEnergy[energyType] = int(stdout) except __HOLE__: logging.debug('Invalid value while reading %s energy measurement: %s', energyType, stdout) logging.debug('Finished reading energy measurements.') if oldEnergy is None: return newEnergy else: return _energy_difference(newEnergy, oldEnergy)
ValueError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/util.py/measure_energy
8,220
def main(): global FIELDS_TO_CONVERT # construct option parser parser = OptionParser() parser.add_option("-f","--from", dest="fromencoding", help="Override original encoding (ascii)", default='ascii') parser.add_option("-t","--to", dest="toencoding", help="Override target encoding (utf-16)", default='utf-16') parser.add_option("-4","--strip-integer", action="store_true", dest="stripint", default=False, help="Strip first 4 bytes of tag contents. To clean some malformed tags") parser.add_option('-s','--singletag', dest="singletag", help="Only convert this single tag", default='') parser.add_option('-p','--pretend', action="store_true", dest="pretend", default=False, help="Pretend (don't write to file)") (options, files) = parser.parse_args() if options.singletag: FIELDS_TO_CONVERT = [options.singletag] errors = [] for filename in files: print "Converting %s:" % filename try: id3 = ID3v2(filename, ID3V2_FILE_MODIFY) except ID3Exception: errors.append((filename, "Unable to find ID3v2 tag")) id3 = ID3v2(filename, ID3V2_FILE_NEW) id3v1 = ID3v1(filename) id3v1.parse() if not id3v1.tag.has_key("songname"): errors.append((filename, "Unable to find ID3v1 tag")) framekeys = [] for f in id3.frames: framekeys.append(f.fid) if f.fid in FIELDS_TO_CONVERT: if options.stripint: stripped = f.fields[1][4:] else: stripped = f.fields[1] # if the field is empty, maybe check ID3v1 ? if len(f.fields[1]) == 0: if f.fid == 'TALB' and id3v1.tag.has_key("album"): f.fields = quicktext(ID3V2_FIELD_ENC_ISO8859_1, id3v1.tag["album"]) elif f.fid == 'TPE1' and id3v1.tag.has_key("artist"): f.fields = quicktext(ID3V2_FIELD_ENC_ISO8859_1, id3v1.tag["artist"]) elif f.fid == 'TIT2' and id3v1.tag.has_key("songname"): f.fields = quicktext(ID3V2_FIELD_ENC_ISO8859_1, id3v1.tag["songname"]) # convert from ascii if ID3v2Frame.encodings[f.fields[0]] == 'iso8859-1': try: utf = stripped.decode(options.fromencoding) except (__HOLE__, UnicodeEncodeError): errors.append((filename, "Unable to convert: %s from %s encoding (%s)" % (f.fid, options.fromencoding, str([stripped])))) continue else: utf = stripped if options.toencoding in ['utf-8', 'utf-16', 'utf-16be']: f.fields = (ID3v2Frame.encodings[options.toencoding], utf, [utf]) else: f.fields = (ID3V2_FIELD_ENC_ISO8859_1, utf.encode(parser.toencoding), [utf.encode(parser.toencoding)]) elif f.fid[0] == 'T': if options.stripint: f.fields = (f.fields[0], f.fields[1][4:], [f.fields[1][4:]]) # check if TALB, TPE1 and TIT2 are present, if not, add them from ID3v1 if 'TALB' not in framekeys and \ 'TAL' not in framekeys and \ id3v1.tag.has_key("album"): if id3.tag["version"] > 0x200: newframe = ID3v2Frame(fid='TALB') elif id3.tag["version"] == 0x200: newframe = ID3v2_2Frame(fid='TAL') try: newframe.fields = quicktext(ID3V2_FIELD_ENC_UTF16, id3v1.tag["album"].strip().decode(options.fromencoding)) newframe.meta = {"status":0,"format":0} id3.frames.append(newframe) except UnicodeDecodeError: errors.append((filename, "Unable to convert ID3v1 to TALB")) if 'TIT2' not in framekeys \ and 'TT2' not in framekeys \ and id3v1.tag.has_key("songname"): if id3.tag["version"] > 0x200: newframe = ID3v2Frame(fid='TIT2') elif id3.tag["version"] == 0x200: newframe = ID3v2_2Frame(fid='TT2') try: newframe.fields = quicktext(ID3V2_FIELD_ENC_UTF16, id3v1.tag["songname"].strip().decode(options.fromencoding)) newframe.meta = {"status":0,"format":0} id3.frames.append(newframe) except UnicodeDecodeError: errors.append((filename, "Unable to convert ID3v1 to TIT2")) pass if 'TPE1' not in framekeys \ and 'TP1' not in framekeys \ and id3v1.tag.has_key("artist"): if id3.tag["version"] > 0x200: newframe = ID3v2Frame(fid='TPE1') elif id3.tag["version"] == 0x200: newframe = ID3v2_2Frame(fid='TP1') try: newframe.fields = quicktext(ID3V2_FIELD_ENC_UTF16, id3v1.tag["artist"].strip().decode(options.fromencoding)) newframe.meta = {"status":0,"format":0} id3.frames.append(newframe) except UnicodeDecodeError: errors.append((filename, "Unable to convert ID3v1 to TPE1")) pass if 'TRCK' not in framekeys \ and 'TRK' not in framekeys \ and id3v1.tag.has_key("track") \ and id3v1.tag["track"] != -1: if id3.tag["version"] > 0x200: newframe = ID3v2Frame(fid='TRCK') elif id3.tag["version"] == 0x200: newframe = ID3v2_2Frame(fid='TRK') try: newframe.fields = (ID3V2_FIELD_ENC_ISO8859_1, str(id3v1.tag["track"]), [str(id3v1.tag["track"])]) newframe.meta = {"status":0,"format":0} id3.frames.append(newframe) except UnicodeDecodeError: errors.append((filename, "Unable to convert ID3v1 to TRK")) pass if 'TYER' not in framekeys \ and 'TYE' not in framekeys \ and id3v1.tag.has_key("year"): if id3.tag["version"] > 0x200: newframe = ID3v2Frame(fid='TYER') elif id3.tag["version"] == 0x200: newframe = ID3v2_2Frame(fid='TYE') try: newframe.fields = (ID3V2_FIELD_ENC_ISO8859_1, id3v1.tag["year"], [id3v1.tag["year"]]) newframe.meta = {"status":0,"format":0} id3.frames.append(newframe) except UnicodeDecodeError: errors.append((filename, "Unable to convert ID3v1 to TYE")) pass id3.commit(pretend=options.pretend) if errors: for x in errors: print x[0], ":", x[1]
UnicodeDecodeError
dataset/ETHPy150Open Ciantic/pytagger/mp3conv.py/main
8,221
def load_library(self, *names, **kwargs): '''Find and load a library. More than one name can be specified, they will be tried in order. Platform-specific library names (given as kwargs) are tried first. Raises ImportError if library is not found. ''' if 'framework' in kwargs and self.platform == 'darwin': return self.load_framework(kwargs['framework']) platform_names = kwargs.get(self.platform, []) if type(platform_names) in (str, unicode): platform_names = [platform_names] elif type(platform_names) is tuple: platform_names = list(platform_names) if self.platform == 'linux2': for name in names: libname = ctypes.util.find_library(name) platform_names.append(libname or 'lib%s.so' % name) platform_names.extend(names) for name in platform_names: try: lib = ctypes.cdll.LoadLibrary(name) if _debug_lib: print name if _debug_trace: lib = _TraceLibrary(lib) return lib except __HOLE__: path = self.find_library(name) if path: try: lib = ctypes.cdll.LoadLibrary(path) if _debug_lib: print path if _debug_trace: lib = _TraceLibrary(lib) return lib except OSError: pass raise ImportError('Library "%s" not found.' % names[0])
OSError
dataset/ETHPy150Open ardekantur/pyglet/pyglet/lib.py/LibraryLoader.load_library
8,222
def _create_ld_so_cache(self): # Recreate search path followed by ld.so. This is going to be # slow to build, and incorrect (ld.so uses ld.so.cache, which may # not be up-to-date). Used only as fallback for distros without # /sbin/ldconfig. # # We assume the DT_RPATH and DT_RUNPATH binary sections are omitted. directories = [] try: directories.extend(os.environ['LD_LIBRARY_PATH'].split(':')) except KeyError: pass try: directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')]) except __HOLE__: pass directories.extend(['/lib', '/usr/lib']) cache = {} lib_re = re.compile('lib(.*)\.so') for dir in directories: try: for file in os.listdir(dir): if '.so' not in file: continue # Index by filename path = os.path.join(dir, file) if file not in cache: cache[file] = path # Index by library name match = lib_re.match(file) if match: library = match.group(1) if library not in cache: cache[library] = path except OSError: pass self._ld_so_cache = cache
IOError
dataset/ETHPy150Open ardekantur/pyglet/pyglet/lib.py/LinuxLibraryLoader._create_ld_so_cache
8,223
def _get_plugins_from_settings(): plugins = (list(getattr(settings, 'NOSE_PLUGINS', [])) + ['django_nose.plugin.TestReorderer']) for plug_path in plugins: try: dot = plug_path.rindex('.') except __HOLE__: raise exceptions.ImproperlyConfigured( "%s isn't a Nose plugin module" % plug_path) p_mod, p_classname = plug_path[:dot], plug_path[dot + 1:] try: mod = import_module(p_mod) except ImportError as e: raise exceptions.ImproperlyConfigured( 'Error importing Nose plugin module %s: "%s"' % (p_mod, e)) try: p_class = getattr(mod, p_classname) except AttributeError: raise exceptions.ImproperlyConfigured( 'Nose plugin module "%s" does not define a "%s"' % (p_mod, p_classname)) yield p_class()
ValueError
dataset/ETHPy150Open django-nose/django-nose/django_nose/runner.py/_get_plugins_from_settings
8,224
def import_from_string(val, setting_name): """ Attempt to import a class from a string representation. """ try: # Nod to tastypie's use of importlib. parts = val.split('.') module_path, class_name = '.'.join(parts[:-1]), parts[-1] module = importlib.import_module(module_path) return getattr(module, class_name) except (ImportError, __HOLE__) as e: msg = "Could not import '%s' for API setting '%s'. %s: %s." % (val, setting_name, e.__class__.__name__, e) raise ImportError(msg)
AttributeError
dataset/ETHPy150Open tomchristie/django-rest-framework/rest_framework/settings.py/import_from_string
8,225
def __getattr__(self, attr): if attr not in self.defaults: raise AttributeError("Invalid API setting: '%s'" % attr) try: # Check if present in user settings val = self.user_settings[attr] except __HOLE__: # Fall back to defaults val = self.defaults[attr] # Coerce import strings into classes if attr in self.import_strings: val = perform_import(val, attr) # Cache the result setattr(self, attr, val) return val
KeyError
dataset/ETHPy150Open tomchristie/django-rest-framework/rest_framework/settings.py/APISettings.__getattr__
8,226
def supershapes_dict(): """Return a dict of all supershapes in this module keyed by name.""" current_module = sys.modules[__name__] supershapes = dict() for name, obj in inspect.getmembers(current_module): try: if issubclass(obj, _SuperShape) and (name[0] != '_'): supershapes[name] = obj() except __HOLE__: pass # issubclass complains for non class obj return supershapes
TypeError
dataset/ETHPy150Open kobejohn/polymaze/polymaze/shapes.py/supershapes_dict
8,227
def edge(self, neighbor_index): """Return one edge of this shape by the index of the sharing neighbor. Note: When an edge is shared, both shapes will return the same edge. """ # try to get from self try: return self._owned_edges[neighbor_index] except __HOLE__: pass # get from neighbor neighbor = self._grid.get(neighbor_index) if neighbor: # neighbor stores the shared edge under this shape's index return neighbor._owned_edges[self.index()] # if the code gets here, it's basically a runtime error
KeyError
dataset/ETHPy150Open kobejohn/polymaze/polymaze/shapes.py/_ComponentShape.edge
8,228
def endpoints(self, requesting_shape_index=None): """Return the xy, xy end points of this edge. kwargs: requesting_shape_index - if the clockwise order of vertices is desired, provide this so the edge knows which way to sort """ # default if the sorting doesn't matter other_side_lookup = {self._neighbor_1_index: self._neighbor_2_index, self._neighbor_2_index: self._neighbor_1_index} if requesting_shape_index: # use only the provided index try: n_index = other_side_lookup[requesting_shape_index] except __HOLE__: raise ValueError('The requesting shape is not one of the' ' sharing neighbors of this edge.') requesting_shape = self._grid.get(requesting_shape_index) else: # use either index if not provided requesting_shape = self._grid.get(self._neighbor_1_index) if not requesting_shape: requesting_shape = self._grid.get(self._neighbor_2_index) n_index = other_side_lookup[requesting_shape.index()] v1, v2 = (requesting_shape._edge_data[n_index]['counter_vertex'], requesting_shape._edge_data[n_index]['clock_vertex']) return v1, v2
KeyError
dataset/ETHPy150Open kobejohn/polymaze/polymaze/shapes.py/Edge.endpoints
8,229
def locateOnScreen(image, minSearchTime=0, **kwargs): """minSearchTime - amount of time in seconds to repeat taking screenshots and trying to locate a match. The default of 0 performs a single search. """ start = time.time() while True: try: screenshotIm = screenshot(region=None) # the locateAll() function must handle cropping to return accurate coordinates, so don't pass a region here. retVal = locate(image, screenshotIm, **kwargs) try: screenshotIm.fp.close() except __HOLE__: # Screenshots on Windows won't have an fp since they came from # ImageGrab, not a file. Screenshots on Linux will have fp set # to None since the file has been unlinked pass if retVal or time.time() - start > minSearchTime: return retVal except ImageNotFoundException: if time.time() - start > minSearchTime: raise
AttributeError
dataset/ETHPy150Open asweigart/pyscreeze/pyscreeze/__init__.py/locateOnScreen
8,230
def locateAllOnScreen(image, **kwargs): screenshotIm = screenshot(region=None) # the locateAll() function must handle cropping to return accurate coordinates, so don't pass a region here. retVal = locateAll(image, screenshotIm, **kwargs) try: screenshotIm.fp.close() except __HOLE__: # Screenshots on Windows won't have an fp since they came from # ImageGrab, not a file. Screenshots on Linux will have fp set # to None since the file has been unlinked pass return retVal
AttributeError
dataset/ETHPy150Open asweigart/pyscreeze/pyscreeze/__init__.py/locateAllOnScreen
8,231
def get_library(name): """ Returns a ctypes.CDLL or None """ try: if platform.system() == 'Windows': return ctypes.windll.LoadLibrary(name) else: return ctypes.cdll.LoadLibrary(name) except __HOLE__: pass return None
OSError
dataset/ETHPy150Open NVIDIA/DIGITS/digits/device_query.py/get_library
8,232
def _trim_disassembly(stdout): if not stdout: return stdout start_loc = stdout.find("Dump of assembler code") end_loc = stdout.find("End of assembler dump.", start_loc) if start_loc == -1 or end_loc == -1: return "%s\nError trimming assembler dump. start_loc = %d, end_loc = %d" % (stdout, start_loc, end_loc) try: a, b = stdout[start_loc:end_loc].split("\n=>") except __HOLE__: return "%s\nError trimming assembler dump. Could not find '=>'" % (stdout) a = a.splitlines() start_loc += len(a.pop(0)) return "%s\n%s\n=>%s\n%s" % (stdout[:start_loc], "\n".join(a[-15:]), "\n".join(b.splitlines()[:15]), stdout[end_loc:])
ValueError
dataset/ETHPy150Open blackberry/ALF/alf/debug/_gdb.py/_trim_disassembly
8,233
def run_with_gdb(target_cmd, symbols=None, solib_search=None, env=None, callback=None, callback_args=None, timeout=_common.DEFAULT_TIMEOUT, memory_limit=None, idle_limit=None): """ This function is similar to the :func:`run` function above, except the target is executed under control of the GNU Debugger. Symbols may be specified manually, otherwise they are expected to be findable by GDB (usually included in the target itself). :func:`run_with_gdb` returns a :class:`~alf.FuzzResult` instance. If no crash was detected, the :attr:`~alf.FuzzResult.classification` member of the :class:`~alf.FuzzResult` will be :data:`~alf.debug.NOT_AN_EXCEPTION`. Classifications: :data:`~alf.debug.NOT_AN_EXCEPTION`, :data:`~alf.debug.TIMEOUT`, :data:`~alf.debug.UNKNOWN`. Availability: Unix, Windows. """ classification = None cpid = None if platform.system() == "Windows": _common._set_gflags(target_cmd[0]) if platform.system() == "QNX": if not os.path.isfile("libc.so.3"): if not os.path.isfile("/root/symbols/x86/lib/libc.so.3.sym"): raise RuntimeError("Cannot find /root/symbols/x86/lib/libc.so.3.sym") os.symlink("/root/symbols/x86/lib/libc.so.3.sym", "libc.so.3") fd, temp_fn = tempfile.mkstemp(prefix="gdb", suffix=".log", dir=".") os.close(fd) nul = open(os.devnull, "w+") try: with open(temp_fn, "w+") as f: if env is None: env = dict(os.environ) env["LIBC_FATAL_STDERR_"] = "1" p = _common.subprocess.Popen(_gdb_cmd(target_cmd[0], solib_search) + target_cmd, close_fds=CLOSE_FDS, stdout=f, stderr=f, stdin=nul, creationflags=_common.POPEN_FLAGS, env=env) try: with open(temp_fn) as fr: while p.poll() is None: line = fr.readline() m = re.match(r"^\*\s+1\s+Thread\s+\w+\s+\(LWP\s+(?P<pid>[0-9]+)\)", line) if m is None: m = re.match(r"^\*\s+1\s+(pid|process|Thread)\s+(?P<pid>[0-9]+)", line) if m: cpid = int(m.group("pid")) break cb_res = _common._call_callback(callback, callback_args, p.pid) if cb_res == _common.CB_ERROR: raise RuntimeError("callback() returned error") target_mon = _common.TargetMonitor(cpid, idle_limit=idle_limit, memory_limit=memory_limit, time_limit=timeout) while p.poll() is None: if target_mon.check_memory(): classification = _common.EXCESS_MEMORY_USAGE break if target_mon.check_idle(): break if target_mon.check_timeout(): classification = _common.TIMEOUT break time.sleep(0.01) finally: while p.poll() is None: try: if platform.system() == "QNX": attempt = -1 sigs = [signal.SIGTERM, signal.SIGKILL] while p.poll() is None: attempt += 1 assert attempt < len(sigs), "Failed to kill child process" _send_signal(sigs[attempt], cpid, p.pid) kill_time = _common.prof_timer() while _common.prof_timer() - kill_time < SLAY_TIMEOUT: if p.poll() is not None: break time.sleep(0.25) elif platform.system() == "Windows": _send_signal(signal.CTRL_BREAK_EVENT, cpid, p.pid) else: _send_signal(signal.SIGTERM, cpid, p.pid) except __HOLE__: pass exit_code = p.wait() f.seek(0, os.SEEK_SET) stdout = f.read() finally: _common.delete(temp_fn) nul.close() m = re.search(r"Traceback \(\D+\):.+Python command:", stdout, re.DOTALL) if m: tb = m.group(0) tb = tb[:tb.rfind("\n")] if not tb.endswith("No threads running"): raise RuntimeError("GDB Python Failure\n\n%s" % tb) else: return _common.FuzzResult(_common.NOT_AN_EXCEPTION, stdout) backtrace, debug_classification = _process_gdb_output(stdout) if cb_res == _common.CB_HANG: classification = _common.TIMEOUT elif classification is None: if cb_res == _common.CB_FAIL: classification = _common.UNKNOWN else: classification = debug_classification stdout = _trim_disassembly(stdout) stdout = _common._limit_output_length(stdout) return _common.FuzzResult(classification, stdout, backtrace, exit_code)
OSError
dataset/ETHPy150Open blackberry/ALF/alf/debug/_gdb.py/run_with_gdb
8,234
def Load(self): fname = User.User.OwnFile(self._name, "usermemo") try: os.stat(fname) except OSError as exc: data = self.read() if (data is None): raise ServerError("can't load usermemo for %s" % self._name) try: with open(fname, "wb") as f: f.write(data) except IOError: raise ServerError("can't write usermemo for %s" % self._name) try: fmemo = open(fname, "r+b") except __HOLE__: raise ServerError("can't open usermemo for %s" % self._name) (self._memo, _) = Util.Mmap(fmemo, mmap.PROT_READ | mmap.PROT_WRITE, mmap.MAP_SHARED) fmemo.close() if (self._memo == None): raise ServerError("can't mmap usermemo for %s" % self._name) return True
IOError
dataset/ETHPy150Open HenryHu/pybbs/UserMemo.py/UserMemo.Load
8,235
def read(self): datafile = User.User.OwnFile(self._name, ".userdata") try: with open(datafile, "rb") as f: return f.read(self.size) except IOError: try: if not hasattr(self, '__reserved'): Util.InitStruct(self) self.userid = self._name data = self.pack(False) with open(datafile, "wb") as f: f.write(data) return data except __HOLE__: return None
IOError
dataset/ETHPy150Open HenryHu/pybbs/UserMemo.py/UserMemo.read
8,236
def auth_api_request(self, api_endpoint, method='GET', input_dict=None): '''Send API request to DEP or depsim providing OAuth where necessary. We implement the logic of attempting OAuth1 authentication here. We could have tried to implement recursive calling of `api_request()` but to avoid potential infinite loops and to more granularly control the flow we opted to manually code the error cases and retries here. ''' # if we have no auth/session token, then try to get one if not self.session_token: self.oauth1() try: return self.api_request(api_endpoint, method, input_dict) except __HOLE__, e: response = e.read().strip("\"\n\r") response_l = response.lower() if e.code == 403 and response_l == 'forbidden': # authentication token is invalid # try to get a new one self.oauth1() # try the request a second time return self.api_request(api_endpoint, method, input_dict) if e.code == 401 and response_l == 'unauthorized': # authentication token has expired # try to get a new one self.oauth1() # try the request a second time return self.api_request(api_endpoint, method, input_dict) if e.code == 400: raise DEP400Error(response, e) raise
HTTPError
dataset/ETHPy150Open jessepeterson/commandment/commandment/utils/dep.py/DEP.auth_api_request
8,237
def get_admin(self): """ Returns the current admin system to render templates within the ``Flask-Admin`` system. Returns ------- obj Current admin view """ try: return self._admin except __HOLE__: raise NotImplementedError('``_admin`` has not been declared.')
AttributeError
dataset/ETHPy150Open thisissoon/Flask-Velox/flask_velox/admin/mixins/template.py/AdminTemplateMixin.get_admin
8,238
def write(self,vb=voicebox): if not vb.voices: print("Cannot write with an empty Voicebox!") return self.chooseVoiceMenu(vb) scriptlog =[] cur = 0 linelog = [] hashtable = {} while 1: before = linelog[:cur] after = linelog[cur:] if vb.vision >= len(before): recent_words = before else: firstword = len(before) - vb.vision recent_words = before[firstword:] # hash check on this set of recent words if "".join(recent_words) in hashtable: options = hashtable["".join(recent_words)] else: options = vb.getOptions(recent_words)[0:vb.num_opts] hashtable["".join(recent_words)] = options self.printOptions(options) try: response = raw_input('Choose one\n') except __HOLE__: response = input('Choose one\n') if response.isdigit(): response = int(response) if response >= 1 and response <= vb.num_opts: before += [options[response-1][0]] linelog = before + after cur += 1 print(self.voiceHeader(vb)) self.printLog(scriptlog+linelog,cur) elif response == 0: scriptlog = scriptlog + linelog print('Final output: ') print(' '.join(scriptlog)) return scriptlog else: print("Number out of range!") self.printLog(scriptlog+linelog,cur) elif response == 'x': if len(before) == 0: print("Cannot delete the start of the sentence!") else: cur -= 1 del before[-1] # remove last element of current line linelog = before + after self.printLog(scriptlog+linelog,cur) elif response == 'z': cur -= 1 self.printLog(scriptlog+linelog,cur) elif response == 'c': if cur == len(linelog): print("Already at end of sentence!") else: cur += 1 self.printLog(scriptlog+linelog,cur) elif response == '.' or response=='?': # starts a new sentence before[-1] += response linelog = before + after scriptlog = scriptlog + linelog linelog = [] self.printLog(scriptlog+linelog,cur) elif response in ['m','menu']: self.writing_menu(vb) self.printLog(scriptlog+linelog,cur) #elif re.compile('v\d',response): # number = response[1] # print "here" elif isinstance(response, str): before = before + [response] linelog = before + after cur += 1 self.printLog(scriptlog+linelog,cur) else: print("Invalid input. Choose a number between 1 and " + str(vb.num_opts) + " or enter a word manually.") self.printLog(scriptlog+linelog,cur)
NameError
dataset/ETHPy150Open jbrew/pt-voicebox/writer.py/Writer.write
8,239
def writing_menu(self,vb): top_menu_prompt = "Choose an option from below:" \ "\n 1. Select one voice." \ "\n 2. Assign custom weights." \ "\n 3. Change ranktypes." \ "\n 4. Get voice info." \ "\n 5. Add a voice." \ "\n 6. Save Voicebox" \ "\n 7. Load Voicebox" \ "\n 8. Exit menu.\n" try: response = raw_input(top_menu_prompt) except NameError: response = input(top_menu_prompt) if response.isdigit(): response = int(response) if response == 1: self.chooseVoiceMenu(vb) elif response ==2: self.setWeightMenu(vb) elif response ==3: vb.getVoices() for v in sorted(vb.voices): try: response = raw_input('Set ranktype for '+v+'\n') except NameError: response = input('Set ranktype for '+v+'\n') if response in ['norm','freq','sig']: vb.voices[v].ranktype = response print(v + ' ranktype set to ' + str(response)) else: print("Please choose either 'norm' 'freq' or 'sig'") vb.getVoices() elif response ==4: print("not implemented") return elif response ==5: self.addVoiceMenu(vb) elif response == 6: try: path = 'saved/' + raw_input('Save as: ') except NameError: path = 'saved/' + input('Save as: ') + '.pkl' with open(path, 'wb') as output: pickle.dump(vb, output, pickle.HIGHEST_PROTOCOL) elif response == 7: try: path = 'saved/' + raw_input('Load file: ') except __HOLE__: path = 'saved/' + input('Load file: ') + '.pkl' with open(path, 'rb') as i: p = pickle.load(i) elif response == 8: print("Returning to write") return # prints the log in a readable way
NameError
dataset/ETHPy150Open jbrew/pt-voicebox/writer.py/Writer.writing_menu
8,240
def chooseVoiceMenu(self,vb): print(self.voiceHeader(vb)) try: response = raw_input('Choose a voice by number from above. \nOr:\n' '0 to use an equal mixture.\n' 'C to assign custom weights.\n') except __HOLE__: response = input('Choose a voice by number from above. \nOr:\n' '0 to use an equal mixture.\n' 'C to assign custom weights.\n') if response.isdigit(): response = int(response) voicelist = sorted(vb.voices) print(len(voicelist)) if response == 0: print("Voices weighted equally!") elif response <= len(voicelist): voicename = voicelist[response-1] vb.useOneVoice(voicename) print(voicename + ' selected!') elif response == 'C': self.setWeightMenu(vb) else: print('Invalid response! Type a number in range.')
NameError
dataset/ETHPy150Open jbrew/pt-voicebox/writer.py/Writer.chooseVoiceMenu
8,241
def setWeightMenu(self,vb): vb.getVoices() for v in sorted(vb.voices): try: response = raw_input('Set weight for '+v+'\n') except __HOLE__: response = input('Set weight for '+v+'\n') if response.isdigit(): response = int(response) vb.voiceWeights[v] = response print(v + ' weight set to ' + str(response)) else: print("Please type a number!") vb.getVoices()
NameError
dataset/ETHPy150Open jbrew/pt-voicebox/writer.py/Writer.setWeightMenu
8,242
def addVoiceMenu(self,vb): print(self.voiceHeader(vb)) import glob textfiles = glob.glob('texts/*') count = 1 for filename in textfiles: print(str(count) + ": " + filename[6:]) count+=1 try: response = raw_input('Choose a file by number from above.') except __HOLE__: response = input('Choose a file by number from above.') if response.isdigit(): response = int(response) vb.addVoiceFromFile(textfiles[response-1][6:])
NameError
dataset/ETHPy150Open jbrew/pt-voicebox/writer.py/Writer.addVoiceMenu
8,243
def test_tryexcept(input,output): def endpoint(v): if v == 7: raise ValueError return v def callee(v): return endpoint(v) def caller(v): try: return callee(v) except __HOLE__: return 0 assert caller(input) == output
ValueError
dataset/ETHPy150Open rfk/withrestart/withrestart/tests/overhead.py/test_tryexcept
8,244
def from_dict(self, dic): for k, v in dic.items(): try: ctor = self.OPTIONS[k] except __HOLE__: fmt = "Does not support option: '%s'" raise KeyError(fmt % k) else: self.values[k] = ctor(v)
KeyError
dataset/ETHPy150Open numba/numba/numba/targets/options.py/TargetOptions.from_dict
8,245
def parse_entry_points(self): def split_and_strip(entry_point): console_script, entry_point = entry_point.split('=', 2) return console_script.strip(), entry_point.strip() raw_entry_points = self.distribution.entry_points if isinstance(raw_entry_points, string): parser = ConfigParser() parser.readfp(StringIO(raw_entry_points)) if parser.has_section('console_scripts'): return dict(parser.items('console_scripts')) elif isinstance(raw_entry_points, dict): try: return dict(split_and_strip(script) for script in raw_entry_points.get('console_scripts', [])) except __HOLE__: pass elif raw_entry_points is not None: die('When entry_points is provided, it must be a string or dict.') return {}
ValueError
dataset/ETHPy150Open pantsbuild/pex/pex/commands/bdist_pex.py/bdist_pex.parse_entry_points
8,246
def _refleak_cleanup(): # Collect cyclic trash and read memory statistics immediately after. try: func1 = sys.getallocatedblocks except AttributeError: func1 = lambda: 42 try: func2 = sys.gettotalrefcount except __HOLE__: func2 = lambda: 42 # Flush standard output, so that buffered data is sent to the OS and # associated Python objects are reclaimed. for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__): if stream is not None: stream.flush() sys._clear_type_cache() # This also clears the various internal CPython freelists. gc.collect() return func1(), func2()
AttributeError
dataset/ETHPy150Open numba/llvmlite/llvmlite/tests/customize.py/_refleak_cleanup
8,247
def addSuccess(self, test): try: rc_deltas, alloc_deltas = self._huntLeaks(test) except __HOLE__: # Test failed when repeated assert not self.wasSuccessful() return # These checkers return False on success, True on failure def check_rc_deltas(deltas): return any(deltas) def check_alloc_deltas(deltas): # At least 1/3rd of 0s if 3 * deltas.count(0) < len(deltas): return True # Nothing else than 1s, 0s and -1s if not set(deltas) <= set((1,0,-1)): return True return False failed = False for deltas, item_name, checker in [ (rc_deltas, 'references', check_rc_deltas), (alloc_deltas, 'memory blocks', check_alloc_deltas)]: if checker(deltas): msg = '%s leaked %s %s, sum=%s' % ( test, deltas, item_name, sum(deltas)) failed = True try: raise ReferenceLeakError(msg) except Exception: exc_info = sys.exc_info() if self.showAll: self.stream.write("%s = %r " % (item_name, deltas)) self.addFailure(test, exc_info) if not failed: super(RefleakTestResult, self).addSuccess(test)
AssertionError
dataset/ETHPy150Open numba/llvmlite/llvmlite/tests/customize.py/RefleakTestResult.addSuccess
8,248
def test(self, verbose=1, extra_argv=None, coverage=False, capture=True, knownfailure=True): """ Run tests for module using nose. :type verbose: int :param verbose: Verbosity value for test outputs, in the range 1-10. Default is 1. :type extra_argv: list :param extra_argv: List with any extra arguments to pass to nosetests. :type coverage: bool :param coverage: If True, report coverage of Theano code. Default is False. :type capture: bool :param capture: If True, capture the standard output of the tests, like nosetests does in command-line. The output of failing tests will be displayed at the end. Default is True. :type knownfailure: bool :param knownfailure: If True, tests raising KnownFailureTest will not be considered Errors nor Failure, but reported as "known failures" and treated quite like skipped tests. Default is True. :returns: Returns the result of running the tests as a ``nose.result.TextTestResult`` object. """ from nose.config import Config from nose.plugins.manager import PluginManager from numpy.testing.noseclasses import NumpyTestProgram # Many Theano tests suppose device=cpu, so we need to raise an # error if device==gpu. if not os.path.exists('theano/__init__.py'): try: from theano import config if config.device != "cpu": raise ValueError("Theano tests must be run with device=cpu." " This will also run GPU tests when possible.\n" " If you want GPU-related tests to run on a" " specific GPU device, and not the default one," " you should use the init_gpu_device theano flag.") except __HOLE__: pass # cap verbosity at 3 because nose becomes *very* verbose beyond that verbose = min(verbose, 3) self._show_system_info() cwd = os.getcwd() if self.package_path in os.listdir(cwd): # The tests give weird errors if the package to test is # in current directory. raise RuntimeError(( "This function does not run correctly when, at the time " "theano was imported, the working directory was theano's " "parent directory. You should exit your Python prompt, change " "directory, then launch Python again, import theano, then " "launch theano.test().")) argv, plugins = self.prepare_test_args(verbose, extra_argv, coverage, capture, knownfailure) # The "plugins" keyword of NumpyTestProgram gets ignored if config is # specified. Moreover, using "addplugins" instead can lead to strange # errors. So, we specify the plugins in the Config as well. cfg = Config(includeExe=True, plugins=PluginManager(plugins=plugins)) t = NumpyTestProgram(argv=argv, exit=False, config=cfg) return t.result
ImportError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tests/main.py/TheanoNoseTester.test
8,249
def setup_backend(self): from pymongo import ASCENDING, DESCENDING from pymongo.connection import Connection try: from pymongo.uri_parser import parse_uri except __HOLE__: from pymongo.connection import _parse_uri as parse_uri from pymongo.errors import AutoReconnect _connection = None uri = self.options.pop('uri', u('')) _connection_attempts = 0 parsed_uri = parse_uri(uri, Connection.PORT) if type(parsed_uri) is tuple: # pymongo < 2.0 database = parsed_uri[1] else: # pymongo >= 2.0 database = parsed_uri['database'] # Handle auto reconnect signals properly while _connection_attempts < 5: try: if _connection is None: _connection = Connection(uri) database = _connection[database] break except AutoReconnect: _connection_attempts += 1 time.sleep(0.1) self.database = database # setup correct indexes database.tickets.ensure_index([('record_hash', ASCENDING)], unique=True) database.tickets.ensure_index([('solved', ASCENDING), ('level', ASCENDING)]) database.occurrences.ensure_index([('time', DESCENDING)])
ImportError
dataset/ETHPy150Open getlogbook/logbook/logbook/ticketing.py/MongoDBBackend.setup_backend
8,250
def main(): #---Load environment settings from SETTINGS.json in root directory and build filepaths for all base submissions---# settings = utils.load_settings('SETTINGS.json') base_filepaths = (settings['file_bryan_submission'], settings['file_miroslaw_submission']) segment_weights = settings['ensemble_segment_weights'] segments = segment_weights.keys() targets = segment_weights[segments[0]].keys() #---Output the segment weights to be used for ensemble averaging of base submissions---# log.info('==========ENSEMBLE WEIGHTS (B,M)============') for segment in segment_weights: log.info(segment.upper()+':') for target in segment_weights[segment]: log.info(' '+target.upper()+' -- ['+segment_weights[segment][target]['0']+','+ segment_weights[segment][target]['1']+']') #---Load each base submission to a list of dataframes---# base_subs = [] for file in base_filepaths: try: base_subs.append(pd.read_csv(file).set_index(['id'], drop=False).sort()) log.info('Base submission successfully loaded: %s.' % file) except IOError: log.info('Base submission file does not exist: %s. Run base model to generate, or update filepath.' %file) sys.exit('---Exiting---') utils.line_break() #---Load id's labeled with segments to a dataframe used for segment based averaging---# file = settings['file_segment_ids'] try: segment_ids = pd.read_csv(file) log.info('Segment IDs successfully loaded from: %s.' % file) except __HOLE__: log.info('Segment IDs file does not exist: %s. Update filepath in SETTINGS.json.' % file) utils.line_break() #---Transform base predictions to log space prior to averaging, if selected in settings---# if settings['avg_log_space'] == 'y': log.info('Transforming base predictions to log space prior to averaging.') for i in range(len(base_subs)): for target in targets: base_subs[i][target] = np.log(base_subs[i][target]+1) utils.line_break() #---Apply segment based weights to each base submission then combine them to create ensemble submission---# log.info('Applying segment weights to base submissions then combining to create ensemble.') for i in range(len(base_subs)): #Merge the segment labels from the segment id's file with the base submission dataframe base_subs[i] = base_subs[i].merge(segment_ids,on='id',how='inner') for segment in segments: for target in targets: base_subs[i][target][base_subs[i]['Segment'] == segment] \ *= float(segment_weights[segment][target][str(i)]) del base_subs[i]['Segment'] ensemble_sub = base_subs[0].ix[:] for i in range(len(base_subs)-1): for target in targets: ensemble_sub[target] += base_subs[i+1][target] utils.line_break() #---Transform ensemble predictions back to normal, if use log space averaging was selected in settings---# if settings['avg_log_space'] == 'y': log.info('Transforming ensemble predictions back to normal from log space.') for target in targets: ensemble_sub[target] = np.exp(ensemble_sub[target])-1 utils.line_break() #---Apply any final target scalars to ensemble predictions---# for target in targets: ensemble_sub[target] *= float(settings['target_scalars'][target]) #---Output ensemble submission to directory set in SETTINGS.json, appending creation date and time---# timestamp = datetime.now().strftime('%m-%d-%y_%H%M') filename = settings['dir_ensemble_submissions']+'ensemble_predictions_'+timestamp+'.csv' ensemble_sub.to_csv(filename, index=False) log.info('Ensemble submission saved: %s' % filename) utils.line_break() #End main log.info('Program executed successfully without error! Exiting.')
IOError
dataset/ETHPy150Open theusual/kaggle-seeclickfix-ensemble/main.py/main
8,251
def configure(name, path=None): """ Configure logging and return a logger and the location of its logging configuration file. This function expects: + A Splunk app directory structure:: <app-root> bin ... default ... local ... + The current working directory is *<app-root>***/bin**. Splunk guarantees this. If you are running the app outside of Splunk, be sure to set the current working directory to *<app-root>***/bin** before calling. This function looks for a logging configuration file at each of these locations, loading the first, if any, logging configuration file that it finds:: local/{name}.logging.conf default/{name}.logging.conf local/logging.conf default/logging.conf The current working directory is set to *<app-root>* before the logging configuration file is loaded. Hence, paths in the logging configuration file are relative to *<app-root>*. The current directory is reset before return. You may short circuit the search for a logging configuration file by providing an alternative file location in `path`. Logging configuration files must be in `ConfigParser format`_. #Arguments: :param name: Logger name :type name: str :param path: Location of an alternative logging configuration file or `None` :type path: str or NoneType :returns: A logger and the location of its logging configuration file .. _ConfigParser format: http://goo.gl/K6edZ8 """ app_directory = os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))) if path is None: probing_path = [ 'local/%s.logging.conf' % name, 'default/%s.logging.conf' % name, 'local/logging.conf', 'default/logging.conf'] for relative_path in probing_path: configuration_file = os.path.join(app_directory, relative_path) if os.path.exists(configuration_file): path = configuration_file break elif not os.path.isabs(path): found = False for conf in 'local', 'default': configuration_file = os.path.join(app_directory, conf, path) if os.path.exists(configuration_file): path = configuration_file found = True break if not found: raise ValueError( 'Logging configuration file "%s" not found in local or default ' 'directory' % path) elif not os.path.exists(path): raise ValueError('Logging configuration file "%s" not found') if path is not None: working_directory = os.getcwd() os.chdir(app_directory) try: splunk_home = os.path.normpath(os.path.join(working_directory, os.environ['SPLUNK_HOME'])) except __HOLE__: splunk_home = working_directory # reasonable in debug scenarios try: path = os.path.abspath(path) fileConfig(path, {'SPLUNK_HOME': splunk_home}) finally: os.chdir(working_directory) if len(root.handlers) == 0: root.addHandler(StreamHandler()) logger = getLogger(name) return logger, path
KeyError
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/searchcommands/logging.py/configure
8,252
def single_file_action(self, path, apply_template=True): attempt_open = True file_exist = os.path.exists(path) if not file_exist: try: self.create(path) except __HOLE__ as e: attempt_open = False sublime.error_message("Cannot create '" + path + "'. See console for details") print("Exception: %s '%s'" % (e.strerror, e.filename)) if attempt_open and os.path.isfile(path): file_view = self.open_file(path) if not file_exist and apply_template: file_view.settings().set("_anf_new", True)
OSError
dataset/ETHPy150Open skuroda/Sublime-AdvancedNewFile/advanced_new_file/commands/new_file_command.py/AdvancedNewFileNew.single_file_action
8,253
def load_configs_from_directory(config_dir, overrides): """ Returns a master configuration object and a list of configuration objects :param config_dir: the directory where the configuration files are located :param overrides: mapping of the command line overrides """ MASTER_CONFIG_FILE_NAME = "master" DEFAULT_CONFIG_NAME = "single execution" EMPTY_MAP = {} master_config = None config_objs = [] # get master config and default mapping config_subdirs = [] default_mapping = {} for dir_item in os.listdir(config_dir): full_path = os.path.join(config_dir, dir_item) if os.path.isdir(full_path): config_subdirs.append(full_path) # save subdirs for processing later elif os.path.isfile(full_path): config_name = os.path.splitext(os.path.basename(full_path))[0] try: mapping = utils.parse_config_file(full_path) except ValueError: logger.debug("Ignored " + full_path + "as configuration due to file extension") else: if MASTER_CONFIG_FILE_NAME in config_name: master_config = Config(MASTER_CONFIG_FILE_NAME, mapping) else: default_mapping.update(mapping) if master_config is None: master_config = Config(MASTER_CONFIG_FILE_NAME, EMPTY_MAP) if len(config_subdirs) == 0: default_mapping.update(overrides) config_objs.append(Config(DEFAULT_CONFIG_NAME, default_mapping)) else: # make a config object for each subdir for config_subdir in config_subdirs: config_files = [os.path.join(config_subdir, config_file) for config_file in os.listdir(config_subdir) if os.path.isfile(os.path.join(config_subdir, config_file))] subdir_mapping = default_mapping.copy() # initialize the configuration as default config_name = os.path.basename(config_subdir) for config_file in config_files: try: mapping = utils.parse_config_file(config_file) except __HOLE__: logger.debug("Ignored " + config_file + "as configuration due to file extension") else: subdir_mapping.update(mapping) subdir_mapping.update(overrides) config_objs.append(Config(config_name, subdir_mapping)) return master_config, config_objs
ValueError
dataset/ETHPy150Open linkedin/Zopkio/zopkio/test_runner_helper.py/load_configs_from_directory
8,254
@staticmethod def _create_scoped_credentials(credentials, scope): """Create a scoped set of credentials if it is required. :type credentials: :class:`oauth2client.client.OAuth2Credentials` or :class:`NoneType` :param credentials: The OAuth2 Credentials to add a scope to. :type scope: list of URLs :param scope: the effective service auth scopes for the connection. :rtype: :class:`oauth2client.client.OAuth2Credentials` or :class:`NoneType` :returns: A new credentials object that has a scope added (if needed). """ if credentials: try: if credentials.create_scoped_required(): credentials = credentials.create_scoped(scope) except __HOLE__: pass return credentials
AttributeError
dataset/ETHPy150Open GoogleCloudPlatform/gcloud-python/gcloud/connection.py/Connection._create_scoped_credentials
8,255
def set_character_set(self, charset): """Set the connection character set to charset. The character set can only be changed in MySQL-4.1 and newer. If you try to change the character set from the current value in an older version, NotSupportedError will be raised.""" if charset == "utf8mb4": py_charset = "utf8" else: py_charset = charset if self.character_set_name() != charset: try: super(Connection, self).set_character_set(charset) except __HOLE__: if self._server_version < (4, 1): raise NotSupportedError("server is too old to set charset") self.query('SET NAMES %s' % charset) self.store_result() self.string_decoder.charset = py_charset self.unicode_literal.charset = py_charset
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/MySQL-python-1.2.5/MySQLdb/connections.py/Connection.set_character_set
8,256
def __init__(self, file_path=None): self.index = 0 self.string_encoding = None self.file_path = file_path self.file_type = None try: fd = open(file_path, 'r') self.data = fd.read() fd.close() except __HOLE__ as e: print('I/O error({0}): {1}'.format(e.errno, e.strerror)) except: print('Unexpected error:'+str(sys.exc_info()[0])) raise
IOError
dataset/ETHPy150Open samdmarshall/xcparse/xcparse/Helpers/pbPlist/pbParser.py/PBParser.__init__
8,257
def next(self): item = self.iterator.next() if not isinstance(item, self.as_is): try: new_iter = iter(item) self.iterator = itertools.chain(new_iter, self.iterator) return self.next() except __HOLE__: pass return item
TypeError
dataset/ETHPy150Open sunlightlabs/clearspending/utils.py/flattened.next
8,258
def get_node_or_fail(conn, node_id, coroutine=None, cargs=(), ckwargs={}): """Shortcut to get a single node by its id. In case when such node could not be found, coroutine could be called to handle such case. Typically coroutine will output an error message and exit from application. @param conn: libcloud connection handle @param node_id: id of the node to search for @param coroutine: a callable object to handle case when node cannot be found @param cargs: positional arguments for coroutine @param kwargs: keyword arguments for coroutine @return: node object if found, None otherwise""" try: node = [node for node in conn.list_nodes() if str(node.id) == str(node_id)][0] return node except __HOLE__: if callable(coroutine): coroutine(*cargs, **ckwargs) return None
IndexError
dataset/ETHPy150Open novel/lc-tools/lctools/shortcuts.py/get_node_or_fail
8,259
def __init__(self,stl_file): """given an stl file object, imports points and reshapes array to an array of n_facetsx3 points.""" if not hasattr(stl_file,'readline'): stl_file_name = stl_file stl_file = open(stl_file,'rb') else: stl_file_name = stl_file.name #check for a pickle, to skip all the loading calcs if possible last_edited = time.ctime(os.path.getmtime(stl_file_name)) h1 = str(hash(last_edited)).replace('-','n') h2 = str(hash(stl_file_name)).replace('-','n') pkl_file_name = '%s_%s.stl_pkl'%(h1,h2) pkl_folder = "pyBspline_pkl" pkl_file_name = os.path.join(pkl_folder,pkl_file_name) if not os.path.exists(pkl_folder): os.mkdir(pkl_folder) if os.path.exists(pkl_file_name): self.facets, self.stl_i0, self.stl_i1, self.p_count, self.stl_indecies, \ self.stl_i0, self.points, self.point_indecies, \ self.triangles = cPickle.load(open(pkl_file_name)) return ascii = (stl_file.readline().strip().split()[0] == 'solid') stl_file.seek(0) if ascii: self.facets = parse_ascii_stl(stl_file) else: self.facets = parse_binary_stl(stl_file) #list of points and the associated index from the facet array points = [] stl_indecies = [] point_indecies = [] #same size as stl_indecies, but points to locations in the points data #stl files have duplicate points, which we don't want to compute on #so instead we keep a mapping between duplicates and their index in #the point array point_locations = {} triangles = [] #used to track connectivity information #extract the 9 points from each facet into one 3*n_facets set of (x,y,z) # points and keep track of the original indcies at the same time so # I can reconstruct the stl file later column = np.arange(3,12,dtype=np.int) row_base = np.ones(9,dtype=np.int) p_count = 0 #I'm using this to avoid calling len(points) a lot for i,facet in enumerate(self.facets): row = row_base*i ps = facet[3:].reshape((3,3)) triangle = [] for p in ps: t_p = tuple(p) try: p_index = point_locations[t_p] point_indecies.append(p_index) #we already have that point, so just point back to it triangle.append(p_index) except __HOLE__: points.append(p) point_locations[t_p] = p_count point_indecies.append(p_count) triangle.append(p_count) p_count += 1 triangles.append(tuple(triangle)) index = np.vstack((row_base*i,column)).T.reshape((3,3,2)) stl_indecies.extend(index) self.p_count = p_count self.stl_indecies = np.array(stl_indecies) #just need to re-shape these for the assignment call later self.stl_i0 = self.stl_indecies[:,:,0] self.stl_i1 = self.stl_indecies[:,:,1] self.points = np.array(points) self.point_indecies = point_indecies self.triangles = np.array(triangles) #pickle for efficiency, instead of re-doing the load every time pkl_data = (self.facets, self.stl_i0, self.stl_i1, self.p_count, self.stl_indecies, self.stl_i0, self.points, self.point_indecies, self.triangles) cPickle.dump(pkl_data,open(pkl_file_name,'w'))
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/geometry/stl.py/STL.__init__
8,260
@classmethod def get_for_instance(cls, instance): try: permissions = cls._registry[type(instance)] except KeyError: try: permissions = cls._registry[cls._proxies[type(instance)]] except __HOLE__: permissions = () pks = [permission.stored_permission.pk for permission in permissions] return StoredPermission.objects.filter(pk__in=pks)
KeyError
dataset/ETHPy150Open mayan-edms/mayan-edms/mayan/apps/acls/classes.py/ModelPermission.get_for_instance
8,261
def _init_toolkit(): """ Initialise the current toolkit. """ def import_toolkit(tk): try: # Try and import the toolkit's pyface backend init module. __import__('tvtk.pyface.ui.%s.init' % tk) except: raise if ETSConfig.toolkit: # If the toolkit has been explicitly specified, just import it and # allow failure if an exception is thrown. import_toolkit(ETSConfig.toolkit) tk = ETSConfig.toolkit else: # Toolkits to check for if none is explicitly specified. known_toolkits = ('wx', 'qt4', 'null') for tk in known_toolkits: try: import_toolkit(tk) # In case we have just decided on a toolkit, tell everybody else. ETSConfig.toolkit = tk break except __HOLE__: pass else: # Try to import the null toolkit but don't set the ETSConfig toolkit try: tk = 'null' import_toolkit(tk) import warnings warnings.warn("Unable to import the %s backend for pyface;"\ " using the 'null' toolkit instead." % ", ".join(toolkits)) except: raise ImportError("unable to import a pyface backend for any of the %s toolkits" \ % ", ".join(known_toolkits)) # Save the imported toolkit name. global _toolkit _toolkit = tk # Do this once then disappear.
ImportError
dataset/ETHPy150Open enthought/mayavi/tvtk/pyface/toolkit.py/_init_toolkit
8,262
def toolkit_object(name): """ Return the toolkit specific object with the given name. The name consists of the relative module path and the object name separated by a colon. """ mname, oname = name.split(':') be = 'tvtk.pyface.ui.%s.' % _toolkit be_mname = be + mname class Unimplemented(object): """ This is returned if an object isn't implemented by the selected toolkit. It raises an exception if it is ever instantiated. """ def __init__(self, *args, **kwargs): raise NotImplementedError("the %s pyface backend doesn't implement %s" % (be, oname)) be_obj = Unimplemented try: __import__(be_mname) try: be_obj = getattr(sys.modules[be_mname], oname) except AttributeError: pass except __HOLE__: pass return be_obj
ImportError
dataset/ETHPy150Open enthought/mayavi/tvtk/pyface/toolkit.py/toolkit_object
8,263
def GetFeedItemIdsForCampaign(campaign_feed): """Gets the Feed Item Ids used by a campaign through a given Campaign Feed. Args: campaign_feed: the Campaign Feed we are retrieving Feed Item Ids from. Returns: A list of Feed Item IDs. """ feed_item_ids = set() try: lhs_operand = campaign_feed['matchingFunction']['lhsOperand'] except __HOLE__: lhs_operand = None if (lhs_operand and lhs_operand[0]['FunctionArgumentOperand.Type'] == 'RequestContextOperand'): request_context_operand = lhs_operand[0] if (request_context_operand['contextType'] == 'FEED_ITEM_ID' and campaign_feed['matchingFunction']['operator'] == 'IN'): for argument in campaign_feed['matchingFunction']['rhsOperand']: if argument['xsi_type'] == 'ConstantOperand': feed_item_ids.add(argument['longValue']) return feed_item_ids
KeyError
dataset/ETHPy150Open googleads/googleads-python-lib/examples/adwords/v201603/migration/migrate_to_extension_settings.py/GetFeedItemIdsForCampaign
8,264
def run_tests(self): import sys if sys.version_info[:2] == (2, 6): import unittest2 as unittest # Python 2.6 else: import unittest setup_file = sys.modules['__main__'].__file__ setup_dir = os.path.abspath(os.path.dirname(setup_file)) tests = unittest.TestLoader().discover( os.path.join(setup_dir, 'tests'), pattern='*.py') try: # https://github.com/CleanCut/green/issues/50 from green.runner import run from green.suite import GreenTestSuite from green.config import default_args default_args.verbose = 3 run(GreenTestSuite(tests), sys.stdout, default_args) except __HOLE__: unittest.TextTestRunner(verbosity=2).run(tests)
ImportError
dataset/ETHPy150Open laike9m/ezcf/setup.py/PyTest.run_tests
8,265
def register_rule_types(): LOG.debug('Start : register default RuleTypes.') for rule_type in RULE_TYPES: rule_type = copy.deepcopy(rule_type) try: rule_type_db = RuleType.get_by_name(rule_type['name']) update = True except __HOLE__: rule_type_db = None update = False rule_type_api = RuleTypeAPI(**rule_type) rule_type_api.validate() rule_type_model = RuleTypeAPI.to_model(rule_type_api) if rule_type_db: rule_type_model.id = rule_type_db.id try: rule_type_db = RuleType.add_or_update(rule_type_model) extra = {'rule_type_db': rule_type_db} if update: LOG.audit('RuleType updated. RuleType %s', rule_type_db, extra=extra) else: LOG.audit('RuleType created. RuleType %s', rule_type_db, extra=extra) except Exception: LOG.exception('Unable to register RuleType %s.', rule_type['name']) LOG.debug('End : register default RuleTypes.')
ValueError
dataset/ETHPy150Open StackStorm/st2/st2common/st2common/bootstrap/ruletypesregistrar.py/register_rule_types
8,266
def __init__(self, request, model, list_display, list_display_links, list_filter, date_hierarchy, search_fields, list_select_related, list_per_page, list_max_show_all, list_editable, model_admin): self.model = model self.opts = model._meta self.lookup_opts = self.opts self.root_query_set = model_admin.queryset(request) self.list_display = list_display self.list_display_links = list_display_links self.list_filter = list_filter self.date_hierarchy = date_hierarchy self.search_fields = search_fields self.list_select_related = list_select_related self.list_per_page = list_per_page self.list_max_show_all = list_max_show_all self.model_admin = model_admin # Get search parameters from the query string. try: self.page_num = int(request.GET.get(PAGE_VAR, 0)) except __HOLE__: self.page_num = 0 self.show_all = ALL_VAR in request.GET self.is_popup = IS_POPUP_VAR in request.GET self.to_field = request.GET.get(TO_FIELD_VAR) self.params = dict(request.GET.items()) if PAGE_VAR in self.params: del self.params[PAGE_VAR] if ERROR_FLAG in self.params: del self.params[ERROR_FLAG] if self.is_popup: self.list_editable = () else: self.list_editable = list_editable self.query = request.GET.get(SEARCH_VAR, '') self.query_set = self.get_query_set(request) self.get_results(request) if self.is_popup: title = ugettext('Select %s') else: title = ugettext('Select %s to change') self.title = title % force_text(self.opts.verbose_name) self.pk_attname = self.lookup_opts.pk.attname
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/admin/views/main.py/ChangeList.__init__
8,267
def get_ordering(self, request, queryset): """ Returns the list of ordering fields for the change list. First we check the get_ordering() method in model admin, then we check the object's default ordering. Then, any manually-specified ordering from the query string overrides anything. Finally, a deterministic order is guaranteed by ensuring the primary key is used as the last ordering field. """ params = self.params ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering()) if ORDER_VAR in params: # Clear ordering and used params ordering = [] order_params = params[ORDER_VAR].split('.') for p in order_params: try: none, pfx, idx = p.rpartition('-') field_name = self.list_display[int(idx)] order_field = self.get_ordering_field(field_name) if not order_field: continue # No 'admin_order_field', skip it ordering.append(pfx + order_field) except (__HOLE__, ValueError): continue # Invalid ordering specified, skip it. # Add the given query's ordering fields, if any. ordering.extend(queryset.query.order_by) # Ensure that the primary key is systematically present in the list of # ordering fields so we can guarantee a deterministic order across all # database backends. pk_name = self.lookup_opts.pk.name if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])): # The two sets do not intersect, meaning the pk isn't present. So # we add it. ordering.append('-pk') return ordering
IndexError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/admin/views/main.py/ChangeList.get_ordering
8,268
def get_ordering_field_columns(self): """ Returns a SortedDict of ordering field column numbers and asc/desc """ # We must cope with more than one column having the same underlying sort # field, so we base things on column numbers. ordering = self._get_default_ordering() ordering_fields = SortedDict() if ORDER_VAR not in self.params: # for ordering specified on ModelAdmin or model Meta, we don't know # the right column numbers absolutely, because there might be more # than one column associated with that ordering, so we guess. for field in ordering: if field.startswith('-'): field = field[1:] order_type = 'desc' else: order_type = 'asc' for index, attr in enumerate(self.list_display): if self.get_ordering_field(attr) == field: ordering_fields[index] = order_type break else: for p in self.params[ORDER_VAR].split('.'): none, pfx, idx = p.rpartition('-') try: idx = int(idx) except __HOLE__: continue # skip it ordering_fields[idx] = 'desc' if pfx == '-' else 'asc' return ordering_fields
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/admin/views/main.py/ChangeList.get_ordering_field_columns
8,269
@publisher('model') def check_uvs(): """checks uvs with no uv area The area of a 2d polygon calculation is based on the answer of Darius Bacon in http://stackoverflow.com/questions/451426/how-do-i-calculate-the-surface-area-of-a-2d-polygon """ # skip if this is a representation v = staging.get('version') if v and Representation.repr_separator in v.take_name: return def area(p): return 0.5 * abs(sum(x0 * y1 - x1 * y0 for ((x0, y0), (x1, y1)) in segments(p))) def segments(p): return zip(p, p[1:] + [p[0]]) all_meshes = pm.ls(type='mesh') mesh_count = len(all_meshes) from anima.ui.progress_dialog import ProgressDialogManager pdm = ProgressDialogManager() if not pm.general.about(batch=1) and mesh_count: pdm.use_ui = True caller = pdm.register(mesh_count, 'check_uvs()') meshes_with_zero_uv_area = [] for node in all_meshes: all_uvs = node.getUVs() try: for i in range(node.numFaces()): uvs = [] for j in range(node.numPolygonVertices(i)): # uvs.append(node.getPolygonUV(i, j)) uv_id = node.getPolygonUVid(i, j) uvs.append((all_uvs[0][uv_id], all_uvs[1][uv_id])) if area(uvs) == 0.0: meshes_with_zero_uv_area.append(node) break except __HOLE__: meshes_with_zero_uv_area.append(node) caller.step() if len(meshes_with_zero_uv_area): pm.select([node.getParent() for node in meshes_with_zero_uv_area]) raise RuntimeError( """There are meshes with no uvs or faces with zero uv area:<br><br> %s""" % '<br>'.join( map(lambda x: x.name(), meshes_with_zero_uv_area[:MAX_NODE_DISPLAY]) ) ) # ****************** # # LOOK DEVELOPMENT # # ****************** #
RuntimeError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/publish.py/check_uvs
8,270
@publisher(['animation', 'shot previs']) def set_frame_range(): """sets the frame range from the shot node """ shot_node = pm.ls(type='shot')[0] start_frame = shot_node.startFrame.get() end_frame = shot_node.endFrame.get() handle_count = 1 try: handle_count = shot_node.getAttr('handle') except __HOLE__: pass # set it in the playback pm.playbackOptions( ast=start_frame, aet=end_frame, min=start_frame-handle_count, max=end_frame+handle_count )
AttributeError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/publish.py/set_frame_range
8,271
@publisher(['animation', 'shot previs'], publisher_type=POST_PUBLISHER_TYPE) def export_camera(): """exports camera and the related shot node """ from stalker import Task, Version from anima.env import mayaEnv m = mayaEnv.Maya() v = m.get_current_version() shot = pm.ls(type='shot')[0] try: sequencer = pm.ls(shot.message.connections(), type='sequencer')[0] except __HOLE__: sequencer = None camera = None if shot: camera = shot.currentCamera.get() camera_task = \ Task.query\ .filter(Task.parent == v.task.parent)\ .filter(Task.name == 'Camera').first() if camera_task: from stalker import LocalSession local_session = LocalSession() logged_in_user = local_session.logged_in_user cam_v = Version( task=camera_task, description='Exported from %s task on Publish' % v.task.name ) cam_v.update_paths() cam_v.extension = '.ma' cam_v.is_published = True cam_v.created_by = cam_v.updated_by = logged_in_user pm.select([shot, camera, sequencer]) m.export_as(cam_v)
IndexError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/publish.py/export_camera
8,272
def cleanse_setting(key, value): """Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. """ try: if HIDDEN_SETTINGS.search(key): cleansed = CLEANSED_SUBSTITUTE else: if isinstance(value, dict): cleansed = dict((k, cleanse_setting(k, v)) for k,v in value.items()) else: cleansed = value except __HOLE__: # If the key isn't regex-able, just return as-is. cleansed = value return cleansed
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/debug.py/cleanse_setting
8,273
def get_exception_reporter_filter(request): global default_exception_reporter_filter if default_exception_reporter_filter is None: # Load the default filter for the first time and cache it. modpath = settings.DEFAULT_EXCEPTION_REPORTER_FILTER modname, classname = modpath.rsplit('.', 1) try: mod = import_module(modname) except __HOLE__ as e: raise ImproperlyConfigured( 'Error importing default exception reporter filter %s: "%s"' % (modpath, e)) try: default_exception_reporter_filter = getattr(mod, classname)() except AttributeError: raise ImproperlyConfigured('Default exception reporter filter module "%s" does not define a "%s" class' % (modname, classname)) if request: return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter) else: return default_exception_reporter_filter
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/debug.py/get_exception_reporter_filter
8,274
def get_traceback_data(self): "Return a Context instance containing traceback information." if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist): from django.template.loader import template_source_loaders self.template_does_not_exist = True self.loader_debug_info = [] for loader in template_source_loaders: try: source_list_func = loader.get_template_sources # NOTE: This assumes exc_value is the name of the template that # the loader attempted to load. template_list = [{'name': t, 'exists': os.path.exists(t)} \ for t in source_list_func(str(self.exc_value))] except __HOLE__: template_list = [] loader_name = loader.__module__ + '.' + loader.__class__.__name__ self.loader_debug_info.append({ 'loader': loader_name, 'templates': template_list, }) if (settings.TEMPLATE_DEBUG and hasattr(self.exc_value, 'django_template_source')): self.get_template_exception_info() frames = self.get_traceback_frames() for i, frame in enumerate(frames): if 'vars' in frame: frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']] frames[i] = frame unicode_hint = '' if self.exc_type and issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = smart_text(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace') from django import get_version c = { 'is_email': self.is_email, 'unicode_hint': unicode_hint, 'frames': frames, 'request': self.request, 'filtered_POST': self.filter.get_post_parameters(self.request), 'settings': get_safe_settings(), 'sys_executable': sys.executable, 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3], 'server_time': datetime.datetime.now(), 'django_version_info': get_version(), 'sys_path' : sys.path, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, 'loader_debug_info': self.loader_debug_info, } # Check whether exception info is available if self.exc_type: c['exception_type'] = self.exc_type.__name__ if self.exc_value: c['exception_value'] = smart_text(self.exc_value, errors='replace') if frames: c['lastframe'] = frames[-1] return c
AttributeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/debug.py/ExceptionReporter.get_traceback_data
8,275
def get_template_exception_info(self): origin, (start, end) = self.exc_value.django_template_source template_source = origin.reload() context_lines = 10 line = 0 upto = 0 source_lines = [] before = during = after = "" for num, next in enumerate(linebreak_iter(template_source)): if start >= upto and end <= next: line = num before = escape(template_source[upto:start]) during = escape(template_source[start:end]) after = escape(template_source[end:next]) source_lines.append( (num, escape(template_source[upto:next])) ) upto = next total = len(source_lines) top = max(1, line - context_lines) bottom = min(total, line + 1 + context_lines) # In some rare cases, exc_value.args might be empty. try: message = self.exc_value.args[0] except __HOLE__: message = '(Could not get exception message)' self.template_info = { 'message': message, 'source_lines': source_lines[top:bottom], 'before': before, 'during': during, 'after': after, 'top': top, 'bottom': bottom, 'total': total, 'line': line, 'name': origin.name, }
IndexError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/debug.py/ExceptionReporter.get_template_exception_info
8,276
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None): """ Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context). """ source = None if loader is not None and hasattr(loader, "get_source"): source = loader.get_source(module_name) if source is not None: source = source.splitlines() if source is None: try: with open(filename, 'rb') as fp: source = fp.readlines() except (__HOLE__, IOError): pass if source is None: return None, [], None, [] # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a Unicode # string, then we should do that ourselves. if isinstance(source[0], six.binary_type): encoding = 'ascii' for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (http://www.python.org/dev/peps/pep-0263/) match = re.search(br'coding[:=]\s*([-\w.]+)', line) if match: encoding = match.group(1).decode('ascii') break source = [six.text_type(sline, encoding, 'replace') for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines pre_context = [line.strip('\n') for line in source[lower_bound:lineno]] context_line = source[lineno].strip('\n') post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]] return lower_bound, pre_context, context_line, post_context
OSError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/debug.py/ExceptionReporter._get_lines_from_file
8,277
def technical_404_response(request, exception): "Create a technical 404 error response. The exception should be the Http404." try: tried = exception.args[0]['tried'] except (__HOLE__, TypeError, KeyError): tried = [] else: if not tried: # tried exists but is an empty list. The URLconf must've been empty. return empty_urlconf(request) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template') c = Context({ 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': request.path_info[1:], # Trim leading slash 'urlpatterns': tried, 'reason': force_bytes(exception, errors='replace'), 'request': request, 'settings': get_safe_settings(), }) return HttpResponseNotFound(t.render(c), content_type='text/html')
IndexError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/debug.py/technical_404_response
8,278
def _real_extract(self, url): from ..extractor import gen_extractors mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') extractor_id = mobj.group('extractor') all_extractors = gen_extractors() rex = re.compile(extractor_id, flags=re.IGNORECASE) matching_extractors = [ e for e in all_extractors if rex.search(e.IE_NAME)] if len(matching_extractors) == 0: raise ExtractorError( 'No extractors matching %r found' % extractor_id, expected=True) elif len(matching_extractors) > 1: # Is it obvious which one to pick? try: extractor = next( ie for ie in matching_extractors if ie.IE_NAME.lower() == extractor_id.lower()) except StopIteration: raise ExtractorError( ('Found multiple matching extractors: %s' % ' '.join(ie.IE_NAME for ie in matching_extractors)), expected=True) else: extractor = matching_extractors[0] num_str = mobj.group('num') num = int(num_str) if num_str else 0 testcases = [] t = getattr(extractor, '_TEST', None) if t: testcases.append(t) testcases.extend(getattr(extractor, '_TESTS', [])) try: tc = testcases[num] except __HOLE__: raise ExtractorError( ('Test case %d not found, got only %d tests' % (num, len(testcases))), expected=True) self.to_screen('Test URL: %s' % tc['url']) return { '_type': 'url', 'url': tc['url'], 'id': video_id, }
IndexError
dataset/ETHPy150Open yasoob/youtube-dl-GUI/youtube_dl/extractor/testurl.py/TestURLIE._real_extract
8,279
def expect_line(self, content): try: line = next(self) except __HOLE__: raise ParsingError('expected %r but got end of input' % content) if line != content: raise ParsingError('expected %r but got %r' % (content, line))
StopIteration
dataset/ETHPy150Open tonyseek/openvpn-status/openvpn_status/parser.py/LogParser.expect_line
8,280
def expect_list(self): try: line = next(self) except __HOLE__: raise ParsingError('expected list but got end of input') splited = line.split(self.list_separator) if len(splited) == 1: raise ParsingError('expected list but got %r' % line) return splited
StopIteration
dataset/ETHPy150Open tonyseek/openvpn-status/openvpn_status/parser.py/LogParser.expect_list
8,281
def expect_tuple(self, name): try: line = next(self) except __HOLE__: raise ParsingError('expected 2-tuple but got end of input') splited = line.split(self.list_separator) if len(splited) != 2: raise ParsingError('expected 2-tuple but got %r' % line) if splited[0] != name: raise ParsingError('expected 2-tuple starting with %r' % name) return splited[1]
StopIteration
dataset/ETHPy150Open tonyseek/openvpn-status/openvpn_status/parser.py/LogParser.expect_tuple
8,282
def _find_cgroup_mounts(): """ Return the information which subsystems are mounted where. @return a generator of tuples (subsystem, mountpoint) """ try: with open('/proc/mounts', 'rt') as mountsFile: for mount in mountsFile: mount = mount.split(' ') if mount[2] == 'cgroup': mountpoint = mount[1] options = mount[3] for option in options.split(','): if option in ALL_KNOWN_SUBSYSTEMS: yield (option, mountpoint) except __HOLE__: logging.exception('Cannot read /proc/mounts')
IOError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/cgroups.py/_find_cgroup_mounts
8,283
def _find_own_cgroups(): """ For all subsystems, return the information in which (sub-)cgroup this process is in. (Each process is in exactly cgroup in each hierarchy.) @return a generator of tuples (subsystem, cgroup) """ try: with open('/proc/self/cgroup', 'rt') as ownCgroupsFile: for cgroup in _parse_proc_pid_cgroup(ownCgroupsFile): yield cgroup except __HOLE__: logging.exception('Cannot read /proc/self/cgroup')
IOError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/cgroups.py/_find_own_cgroups
8,284
def _parse_proc_pid_cgroup(content): """ Parse a /proc/*/cgroup file into tuples of (subsystem,cgroup). @param content: An iterable over the lines of the file. @return: a generator of tuples """ for ownCgroup in content: #each line is "id:subsystem,subsystem:path" ownCgroup = ownCgroup.strip().split(':') try: path = ownCgroup[2][1:] # remove leading / except __HOLE__: raise IndexError("index out of range for " + str(ownCgroup)) for subsystem in ownCgroup[1].split(','): yield (subsystem, path)
IndexError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/cgroups.py/_parse_proc_pid_cgroup
8,285
def kill_all_tasks_in_cgroup(cgroup, kill_process_fn): tasksFile = os.path.join(cgroup, 'tasks') freezer_file = os.path.join(cgroup, 'freezer.state') def try_write_to_freezer(content): try: util.write_file(content, freezer_file) except __HOLE__: pass # expected if freezer not enabled, we try killing without it i = 0 while True: i += 1 # TODO We can probably remove this loop over signals and just send # SIGKILL. We added this loop when killing sub-processes was not reliable # and we did not know why, but now it is reliable. for sig in [signal.SIGKILL, signal.SIGINT, signal.SIGTERM]: try_write_to_freezer('FROZEN') with open(tasksFile, 'rt') as tasks: task = None for task in tasks: task = task.strip() if i > 1: logging.warning('Run has left-over process with pid %s ' 'in cgroup %s, sending signal %s (try %s).', task, cgroup, sig, i) kill_process_fn(int(task), sig) if task is None: return # No process was hanging, exit try_write_to_freezer('THAWED') time.sleep(i * 0.5) # wait for the process to exit, this might take some time
IOError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/cgroups.py/kill_all_tasks_in_cgroup
8,286
def remove_cgroup(cgroup): if not os.path.exists(cgroup): logging.warning('Cannot remove CGroup %s, because it does not exist.', cgroup) return assert os.path.getsize(os.path.join(cgroup, 'tasks')) == 0 try: os.rmdir(cgroup) except OSError: # sometimes this fails because the cgroup is still busy, we try again once try: os.rmdir(cgroup) except __HOLE__ as e: logging.warning("Failed to remove cgroup %s: error %s (%s)", cgroup, e.errno, e.strerror)
OSError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/cgroups.py/remove_cgroup
8,287
def _register_process_with_cgrulesengd(pid): """Tell cgrulesengd daemon to not move the given process into other cgroups, if libcgroup is available. """ # Logging/printing from inside preexec_fn would end up in the output file, # not in the correct logger, thus it is disabled here. from ctypes import cdll try: libcgroup = cdll.LoadLibrary('libcgroup.so.1') failure = libcgroup.cgroup_init() if failure: pass #print('Could not initialize libcgroup, error {}'.format(success)) else: CGROUP_DAEMON_UNCHANGE_CHILDREN = 0x1 failure = libcgroup.cgroup_register_unchanged_process(pid, CGROUP_DAEMON_UNCHANGE_CHILDREN) if failure: pass #print('Could not register process to cgrulesndg, error {}. ' # 'Probably the daemon will mess up our cgroups.'.format(success)) except __HOLE__: pass #print('libcgroup is not available: {}'.format(e.strerror))
OSError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/cgroups.py/_register_process_with_cgrulesengd
8,288
def require_subsystem(self, subsystem): """ Check whether the given subsystem is enabled and is writable (i.e., new cgroups can be created for it). Produces a log message for the user if one of the conditions is not fulfilled. If the subsystem is enabled but not writable, it will be removed from this instance such that further checks with "in" will return "False". @return A boolean value. """ if not subsystem in self: logging.warning('Cgroup subsystem %s is not enabled. Please enable it with ' '"sudo mount -t cgroup none /sys/fs/cgroup".', subsystem) return False try: test_cgroup = self.create_fresh_child_cgroup(subsystem) test_cgroup.remove() except __HOLE__ as e: self.paths = set(self.per_subsystem.values()) logging.warning('Cannot use cgroup hierarchy mounted at {0} for subsystem {1}, ' 'reason: {2}. ' 'If permissions are wrong, please run "sudo chmod o+wt \'{0}\'".' .format(self.per_subsystem[subsystem], subsystem, e.strerror)) del self.per_subsystem[subsystem] return False return True
OSError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/cgroups.py/Cgroup.require_subsystem
8,289
def create_fresh_child_cgroup(self, *subsystems): """ Create child cgroups of the current cgroup for at least the given subsystems. @return: A Cgroup instance representing the new child cgroup(s). """ assert set(subsystems).issubset(self.per_subsystem.keys()) createdCgroupsPerSubsystem = {} createdCgroupsPerParent = {} for subsystem in subsystems: parentCgroup = self.per_subsystem[subsystem] if parentCgroup in createdCgroupsPerParent: # reuse already created cgroup createdCgroupsPerSubsystem[subsystem] = createdCgroupsPerParent[parentCgroup] continue cgroup = tempfile.mkdtemp(prefix=CGROUP_NAME_PREFIX, dir=parentCgroup) createdCgroupsPerSubsystem[subsystem] = cgroup createdCgroupsPerParent[parentCgroup] = cgroup # add allowed cpus and memory to cgroup if necessary # (otherwise we can't add any tasks) def copy_parent_to_child(name): shutil.copyfile(os.path.join(parentCgroup, name), os.path.join(cgroup, name)) try: copy_parent_to_child('cpuset.cpus') copy_parent_to_child('cpuset.mems') except __HOLE__: # expected to fail if cpuset subsystem is not enabled in this hierarchy pass return Cgroup(createdCgroupsPerSubsystem)
IOError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/cgroups.py/Cgroup.create_fresh_child_cgroup
8,290
def main(): import argparse import redis import time import sys from os import path sys.path.append(path.dirname(path.dirname(path.abspath(__file__)))) from scutils.redis_queue import RedisPriorityQueue from scutils.redis_throttled_queue import RedisThrottledQueue parser = argparse.ArgumentParser(description="Throttled Queue Test Script." " Start either a single or multiple processes to see the " " throttled queue mechanism in action.") parser.add_argument('-r', '--redis-host', action='store', required=True, help="The Redis host ip") parser.add_argument('-p', '--redis-port', action='store', default='6379', help="The Redis port") parser.add_argument('-m', '--moderate', action='store_const', const=True, default=False, help="Moderate the outbound Queue") parser.add_argument('-w', '--window', action='store', default=60, help="The window time to test") parser.add_argument('-n', '--num-hits', action='store', default=10, help="The number of pops allowed in the given window") parser.add_argument('-q', '--queue', action='store', default='testqueue', help="The Redis queue name") args = vars(parser.parse_args()) window = int(args['window']) num = int(args['num_hits']) host = args['redis_host'] port = args['redis_port'] mod = args['moderate'] queue = args['queue'] conn = redis.Redis(host=host, port=port) q = RedisPriorityQueue(conn, queue) t = RedisThrottledQueue(conn, q, window, num, mod) def push_items(amount): for i in range(0, amount): t.push('item-'+str(i), i) print "Adding", num * 2, "items for testing" push_items(num * 2) def read_items(): print "Kill when satisfied ^C" ti = time.time() count = 0 while True: item = t.pop() if item: print "My item", item, "My time:", time.time() - ti count += 1 try: read_items() except __HOLE__: pass t.clear() print "Finished"
KeyboardInterrupt
dataset/ETHPy150Open istresearch/scrapy-cluster/utils/tests/test_throttled_queue.py/main
8,291
def test_error_messages(self): self.model.driver.clear_objectives() self.model.driver.clear_constraints() try: self.model.driver._check() except __HOLE__, err: msg = "driver: Missing outputs for gradient calculation" self.assertEqual(str(err), msg) else: self.fail('ValueError expected') self.model.driver.clear_parameters() try: self.model.driver._check() except ValueError, err: msg = "driver: Missing inputs for gradient calculation" self.assertEqual(str(err), msg) else: self.fail('ValueError expected')
ValueError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/drivers/test/test_sensitivity.py/SensitivityDriverTestCase.test_error_messages
8,292
def _register(): for pduKlass in globals().values(): try: if issubclass(pduKlass, PDU): PDUS[pduKlass.commandId] = pduKlass except __HOLE__: pass
TypeError
dataset/ETHPy150Open jookies/jasmin/jasmin/vendor/smpp/pdu/operations.py/_register
8,293
def __init__(self, key, msg = None, digestmod = None): """Create a new HMAC object. :Parameters: key : byte string secret key for the MAC object. It must be long enough to match the expected security level of the MAC. However, there is no benefit in using keys longer than the `digest_size` of the underlying hash algorithm. msg : byte string The very first chunk of the message to authenticate. It is equivalent to an early call to `update()`. Optional. :Parameter digestmod: The hash algorithm the HMAC is based on. Default is `Crypto.Hash.MD5`. :Type digestmod: A hash module or object instantiated from `Crypto.Hash` """ if digestmod is None: import MD5 digestmod = MD5 self.digestmod = digestmod self.outer = digestmod.new() self.inner = digestmod.new() try: self.digest_size = digestmod.digest_size except __HOLE__: self.digest_size = len(self.outer.digest()) try: # The block size is 128 bytes for SHA384 and SHA512 and 64 bytes # for the others hash function blocksize = digestmod.block_size except AttributeError: blocksize = 64 ipad = 0x36 opad = 0x5C if len(key) > blocksize: key = digestmod.new(key).digest() key = key + bchr(0) * (blocksize - len(key)) self.outer.update(strxor_c(key, opad)) self.inner.update(strxor_c(key, ipad)) if (msg): self.update(msg)
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/Hash/HMAC.py/HMAC.__init__
8,294
def handle(self, request, context): url = reverse("horizon:project:network_services:index") try: try: del context['template_string'] del context['template_file'] del context['config_type'] except __HOLE__: pass context['config'] = json.dumps(context['config']) if context.get('name'): context['name'] = html.escape(context['name']) if context.get('description'): context['description'] = html.escape(context['description']) client.create_servicechain_node(request, **context) msg = _("Service Chain Node Created Successfully!") LOG.debug(msg) return http.HttpResponseRedirect(url) except Exception as e: msg = _("Failed to create Service Chain Node. %s") % (str(e)) LOG.error(msg) exceptions.handle(request, msg, redirect=shortcuts.redirect)
KeyError
dataset/ETHPy150Open openstack/group-based-policy-ui/gbpui/panels/network_services/forms.py/CreateServiceChainNodeForm.handle
8,295
def test01_basic(self): d = db.DB() get_returns_none = d.set_get_returns_none(2) d.set_get_returns_none(get_returns_none) d.open(self.filename, db.DB_RECNO, db.DB_CREATE) for x in string.ascii_letters: recno = d.append(x * 60) self.assertIsInstance(recno, int) self.assertGreaterEqual(recno, 1) if verbose: print recno, if verbose: print stat = d.stat() if verbose: pprint(stat) for recno in range(1, len(d)+1): data = d[recno] if verbose: print data self.assertIsInstance(data, str) self.assertEqual(data, d.get(recno)) try: data = d[0] # This should raise a KeyError!?!?! except db.DBInvalidArgError, val: if sys.version_info < (2, 6) : self.assertEqual(val[0], db.EINVAL) else : self.assertEqual(val.args[0], db.EINVAL) if verbose: print val else: self.fail("expected exception") # test that has_key raises DB exceptions (fixed in pybsddb 4.3.2) try: d.has_key(0) except db.DBError, val: pass else: self.fail("has_key did not raise a proper exception") try: data = d[100] except __HOLE__: pass else: self.fail("expected exception") try: data = d.get(100) except db.DBNotFoundError, val: if get_returns_none: self.fail("unexpected exception") else: self.assertEqual(data, None) keys = d.keys() if verbose: print keys self.assertIsInstance(keys, list) self.assertIsInstance(keys[0], int) self.assertEqual(len(keys), len(d)) items = d.items() if verbose: pprint(items) self.assertIsInstance(items, list) self.assertIsInstance(items[0], tuple) self.assertEqual(len(items[0]), 2) self.assertIsInstance(items[0][0], int) self.assertIsInstance(items[0][1], str) self.assertEqual(len(items), len(d)) self.assertTrue(d.has_key(25)) del d[25] self.assertFalse(d.has_key(25)) d.delete(13) self.assertFalse(d.has_key(13)) data = d.get_both(26, "z" * 60) self.assertEqual(data, "z" * 60, 'was %r' % data) if verbose: print data fd = d.fd() if verbose: print fd c = d.cursor() rec = c.first() while rec: if verbose: print rec rec = c.next() c.set(50) rec = c.current() if verbose: print rec c.put(-1, "a replacement record", db.DB_CURRENT) c.set(50) rec = c.current() self.assertEqual(rec, (50, "a replacement record")) if verbose: print rec rec = c.set_range(30) if verbose: print rec # test that non-existent key lookups work (and that # DBC_set_range doesn't have a memleak under valgrind) rec = c.set_range(999999) self.assertEqual(rec, None) if verbose: print rec c.close() d.close() d = db.DB() d.open(self.filename) c = d.cursor() # put a record beyond the consecutive end of the recno's d[100] = "way out there" self.assertEqual(d[100], "way out there") try: data = d[99] except KeyError: pass else: self.fail("expected exception") try: d.get(99) except db.DBKeyEmptyError, val: if get_returns_none: self.fail("unexpected DBKeyEmptyError exception") else: if sys.version_info < (2, 6) : self.assertEqual(val[0], db.DB_KEYEMPTY) else : self.assertEqual(val.args[0], db.DB_KEYEMPTY) if verbose: print val else: if not get_returns_none: self.fail("expected exception") rec = c.set(40) while rec: if verbose: print rec rec = c.next() c.close() d.close()
KeyError
dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/bsddb/test/test_recno.py/SimpleRecnoTestCase.test01_basic
8,296
def authenticator(session_manager, login_url='/auth/login'): '''Create an authenticator decorator. :param session_manager: A session manager class to be used for storing and retrieving session data. Probably based on :class:`BaseSession`. :param login_url: The URL to redirect to if a login is required. (default: ``'/auth/login'``). ''' def valid_user(login_url=login_url): def decorator(handler, *a, **ka): import functools @functools.wraps(handler) def check_auth(*a, **ka): try: data = session_manager.get_session() if not data['valid']: raise KeyError('Invalid login') except (__HOLE__, TypeError): bottle.response.set_cookie( 'validuserloginredirect', bottle.request.fullpath, path='/', expires=(int(time.time()) + 3600)) bottle.redirect(login_url) # set environment if data.get('name'): bottle.request.environ['REMOTE_USER'] = data['name'] return handler(*a, **ka) return check_auth return decorator return(valid_user)
KeyError
dataset/ETHPy150Open linsomniac/bottlesession/bottlesession.py/authenticator
8,297
def forwards(self, orm): # Adding field 'Thumb.image' db.add_column('cropduster4_thumb', 'image', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['cropduster.Image']), keep_default=False) if not db.dry_run: Thumb = orm['cropduster.thumb'] thumbs = Thumb.objects.filter(image_id__isnull=True) if thumbs.count() > 5000: print ("\n" " ! There are too many cropduster4_thumb rows to migrate in South.\n\n" " ! You will need to manually migrate the many-to-many table.\n") return for thumb in thumbs: try: image = thumb.image_set.all()[0] except __HOLE__: pass else: thumb.image_id = image.pk thumb.save()
IndexError
dataset/ETHPy150Open theatlantic/django-cropduster/cropduster/south_migrations/0006_auto__add_field_thumb_image.py/Migration.forwards
8,298
def _create_autocompleter_system(files_only, directories_only, handler_type_cls, get_host_func): def local_handler(func): class ChildHandler(SCPHandler): is_leaf = True def __init__(self, shell, path): self.path = path SCPHandler.__init__(self, shell) @property def handler_type(self): host = get_host_func(self.shell) if self.path in ('..', '.', '/') or host.stat(self.path).is_dir: return DirectoryType() else: return FileType() def __call__(self): func(self.shell, self.path) class MainHandler(SCPHandler): handler_type = handler_type_cls() def complete_subhandlers(self, part): host = get_host_func(self.shell) # Progress bar. for f in host.listdir(): if f.startswith(part): if files_only and not host.stat(f).is_file: continue if directories_only and not host.stat(f).is_dir: continue yield f, ChildHandler(self.shell, f) # Root directory. if '/'.startswith(part) and not files_only: yield f, ChildHandler(self.shell, '/') def get_subhandler(self, name): host = get_host_func(self.shell) # First check whether this name appears in the current directory. # (avoids stat calls on unknown files.) if name in host.listdir(): # When this file does not exist, return try: s = host.stat(name) if (files_only and not s.is_file): return if (directories_only and not s.is_dir): return except __HOLE__: # stat on non-existing file. return finally: return ChildHandler(self.shell, name) # Root, current and parent directory. if name in ('/', '..', '.') and not files_only: return ChildHandler(self.shell, name) return MainHandler return local_handler
IOError
dataset/ETHPy150Open jonathanslenders/python-deployer/deployer/scp_shell.py/_create_autocompleter_system
8,299
def get_metadata(full_path): metadata = { 'filename': os.path.basename(full_path)[:-4], } if full_path.endswith('mp4') or full_path.endswith('m4a'): id3_cls = EasyMP4 elif full_path.endswith('mp3'): id3_cls = EasyMP3 else: id3_cls = None if id3_cls: try: audio = id3_cls(full_path) except Exception, e: print e audio = None if audio: for key in ('artist', 'title', 'album', 'genre'): try: value = unicode(audio[key][0]) except (IndexError, __HOLE__): continue metadata[key] = value metadata['length'] = audio.info.length return metadata
KeyError
dataset/ETHPy150Open disqus/playa/playa/ext/audio/index.py/get_metadata