repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
PieterMostert/Lipgloss | view/pretty_names.py | 1 | 1617 | # LIPGLOSS - Graphical user interface for constructing glaze recipes
# Copyright (C) 2017 Pieter Mostert
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# version 3 along with this program (see LICENCE.txt). If not, see
# <http://www.gnu.org/licenses/>.
# Contact: pi.mostert@gmail.com
# Construct prettify function
pretty_dict = {'SiO2':'SiO\u2082',
'Al2O3':'Al\u2082O\u2083',
'B2O3':'B\u2082O\u2083',
'Li2O':'Li\u2082O',
'Na2O':'Na\u2082O',
'K2O':'K\u2082O',
'P2O5':'P\u2082O\u2085',
'Fe2O3':'Fe\u2082O\u2083',
'TiO2':'TiO\u2082',
'MnO2':'MnO\u2082',
'SiO2_Al2O3':'SiO\u2082 : Al\u2082O\u2083',
'cost':'Cost',
'mass_perc_':'% weight',
'mole_perc_':'% mole'}
def prettify(text):
try:
return pretty_dict[text]
except:
return text
def pretty_entry_type(text):
if text == 'um':
return ' UMF'
elif text == 'ma':
return ' % weight'
elif text == 'mo':
return ' % mole'
else:
return ''
| gpl-3.0 | 344,815,602,470,841,860 | 32 | 70 | 0.594929 | false |
gsnbng/erpnext | erpnext/patches/v4_2/fix_gl_entries_for_stock_transactions.py | 2 | 2129 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
from frappe.utils import flt
def execute():
from erpnext.stock.stock_balance import repost
repost(allow_zero_rate=True, only_actual=True)
frappe.reload_doctype("Account")
warehouse_account = frappe.db.sql("""select name, master_name from tabAccount
where ifnull(account_type, '') = 'Warehouse'""")
if warehouse_account:
warehouses = [d[1] for d in warehouse_account]
accounts = [d[0] for d in warehouse_account]
stock_vouchers = frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where sle.warehouse in (%s)
order by sle.posting_date""" %
', '.join(['%s']*len(warehouses)), tuple(warehouses))
rejected = []
for voucher_type, voucher_no in stock_vouchers:
stock_bal = frappe.db.sql("""select sum(stock_value_difference) from `tabStock Ledger Entry`
where voucher_type=%s and voucher_no =%s and warehouse in (%s)""" %
('%s', '%s', ', '.join(['%s']*len(warehouses))), tuple([voucher_type, voucher_no] + warehouses))
account_bal = frappe.db.sql("""select ifnull(sum(ifnull(debit, 0) - ifnull(credit, 0)), 0)
from `tabGL Entry`
where voucher_type=%s and voucher_no =%s and account in (%s)
group by voucher_type, voucher_no""" %
('%s', '%s', ', '.join(['%s']*len(accounts))), tuple([voucher_type, voucher_no] + accounts))
if stock_bal and account_bal and abs(flt(stock_bal[0][0]) - flt(account_bal[0][0])) > 0.1:
try:
print(voucher_type, voucher_no, stock_bal[0][0], account_bal[0][0])
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
voucher = frappe.get_doc(voucher_type, voucher_no)
voucher.make_gl_entries()
frappe.db.commit()
except Exception as e:
print(frappe.get_traceback())
rejected.append([voucher_type, voucher_no])
frappe.db.rollback()
print("Failed to repost: ")
print(rejected)
| agpl-3.0 | -6,838,263,425,813,662,000 | 38.425926 | 100 | 0.672147 | false |
alessio/prey | platform/linux/prey-config.py | 1 | 21957 | #!/usr/bin/env python
################################################
# Prey Configurator for Linux
# By Tomas Pollak
# (c) 2010 - Fork Ltd. (usefork.com)
################################################
# if having trouble with the GTK theme as root, do this:
# sudo ln -s ~/.themes/ /root/.themes
################################################
# base includes
################################################
import pygtk
pygtk.require("2.0")
import gtk
import os
# from xml.dom.minidom import parseString
import re
import urllib
app_name = 'prey-config'
lang_path = 'lang'
script_path = os.sys.path[0]
################################################
# gettext localization
################################################
import locale
import gettext
# locale.setlocale(locale.LC_ALL, '')
# locale.bindtextdomain(app_name, lang_path)
gettext.bindtextdomain(app_name, lang_path)
gettext.textdomain(app_name)
_ = gettext.gettext
################################################
# vars and such
################################################
PREY_PATH = '/usr/share/prey'
CONFIG_FILE = PREY_PATH + '/config'
CONTROL_PANEL_URL = 'http://control.preyproject.com'
CONTROL_PANEL_URL_SSL = 'https://control.preyproject.com'
GUEST_ACCOUNT_NAME = 'guest_account'
VERSION = os.popen("cat " + PREY_PATH + "/version 2> /dev/null").read().strip().replace('version=', '').replace("'",'')
PAGES = ['report_options', 'control_panel_options', 'new_user', 'existing_user', 'existing_device', 'standalone_options']
class PreyConfigurator(object):
################################################
# helper functions
################################################
def get(self, name):
return self.root.get_object(name)
def text(self, name):
return self.get(name).get_text()
def checkbox(self, name):
if self.get(name).get_active() == True:
return 'y'
else:
return 'n'
################################################
# validations
################################################
def validate_email(self, string):
if len(string) > 7:
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", string) != None:
return True
return False
def validate_fields(self):
if self.text('user_name') == '':
self.show_alert(_("Empty name!"), _("Please type in your name."))
return False
if self.validate_email(self.text('email')) == False:
self.show_alert(_("Invalid email"), _("Please make sure the email address you typed is valid."))
return False
if len(self.text('password')) < 6:
self.show_alert(_("Bad password"), _("Password should contain at least 6 chars. Please try again."))
return False
elif self.text('password') != self.text('password_confirm'):
self.show_alert(_("Passwords don't match"), _("Please make sure both passwords match!"))
return False
return True
################################################
# dialogs
################################################
def show_alert(self, title, message, quit = False):
dialog = gtk.MessageDialog(
parent = None,
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
type = gtk.MESSAGE_INFO,
buttons = gtk.BUTTONS_OK,
message_format = message)
dialog.set_title(title)
if quit == True:
dialog.connect('response', lambda dialog, response: gtk.main_quit())
else:
dialog.connect('response', lambda dialog, response: dialog.destroy())
self.center_dialog(dialog)
dialog.show()
def show_question(self, title, message):
dialog = gtk.MessageDialog(
parent = None,
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
type = gtk.MESSAGE_QUESTION,
buttons = gtk.BUTTONS_YES_NO,
message_format = message)
dialog.set_title(title)
self.center_dialog(dialog)
response = dialog.run()
dialog.destroy()
return response
def show_about(self):
dialog = self.get('about_prey_config')
self.center_dialog(dialog)
dialog.show()
def close_about(self, dialog, response):
dialog.hide()
def center_dialog(self, dialog):
if 'window' in self.__dict__:
dialog.set_transient_for(self.window)
dialog.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
################################################
# window and widget management
################################################
def get_page_name(self):
return PAGES[self.pages.get_current_page()]
def toggle_pg3_next_apply(self, button):
button_next = self.get('button_next')
button_apply = self.get('button_apply')
if self.get('use_existing_device').get_active() == False:
button_next.hide()
button_apply.show()
button_apply.grab_default()
else:
button_apply.hide()
button_next.show()
button_next.grab_default()
def next_page(self, button):
page_name = self.get_page_name()
increment = 1
if page_name == 'control_panel_options' and self.get('new_user_option').get_active() == False:
increment = 2
if page_name == 'report_options':
if self.get('reporting_mode_cp').get_active() == True:
if self.current_api_key != '':
response = self.show_question(_("Hold your horses!"), _("Your device seems to be already synchronized with the Control Panel! Do you want to re-setup your account? (Not recommended)"))
if response == gtk.RESPONSE_NO:
return
else:
increment = 5
if page_name == 'existing_user': # then we are going to select an exising device
if not self.get_existing_user(True):
# login didn't work, so don't go to next page
return
self.pages.set_current_page(self.pages.get_current_page() + increment)
self.toggle_buttons(button, None, 1)
def prev_page(self, button):
page_name = self.get_page_name()
decrement = 1
if page_name == 'existing_user':
decrement = 2
elif page_name == 'standalone_options':
decrement = 5
if self.pages.get_current_page() != 0:
self.pages.set_current_page(self.pages.get_current_page() - decrement)
self.toggle_buttons(button, None, 1)
def toggle_buttons(self, button, tab, tab_number):
button_prev = self.get('button_prev')
button_next = self.get('button_next')
button_apply = self.get('button_apply')
if tab_number == 0: #main settings tab
button_prev.hide()
button_next.hide()
button_apply.show()
self.hide_ssl()
else:
page_name = self.get_page_name()
if page_name == 'report_options':
button_prev.hide()
else:
button_prev.show()
if page_name == 'report_options' or page_name == 'control_panel_options' or (page_name == 'existing_user' and self.get('use_existing_device').get_active() == True):
button_apply.hide()
button_next.show()
button_next.grab_default()
else:
button_next.hide()
button_apply.show()
button_apply.grab_default()
if self.get_page_name() == 'new_user' or self.get_page_name() == 'existing_user':
self.show_ssl()
else:
self.hide_ssl()
def hide_ssl(self):
self.get('icon_ssl').hide()
self.get('lbl_ssl').hide()
def show_ssl(self):
self.get('icon_ssl').show()
self.get('lbl_ssl').show()
def set_default_action(self,button,ctrl):
button_cancel = self.get('button_cancel')
cancel_has_default = button_cancel.flags() & gtk.HAS_DEFAULT
button_prev = self.get('button_prev')
prev_has_default = button_prev.flags() & gtk.HAS_DEFAULT
button_next = self.get('button_next')
button_apply = self.get('button_apply')
if not cancel_has_default and not prev_has_default:
if button_next.flags() & gtk.VISIBLE:
button_next.grab_default()
else:
button_apply.grab_default()
def ensure_visible(self,widget,event): #ensure the widget focused is visible in the scroll window
self.get('delay').set_name('delay')
self.get('extended_headers').set_name('extended_headers')
widget_name = widget.get_name()
scrollwindow = self.get('main_settings_scrollwindow')
internal_height = self.get('main_settings').get_size()[1]
port_height = scrollwindow.allocation.height
port_vadjust = scrollwindow.get_vadjustment()
port_posn = port_vadjust.value
widget_posn = widget.allocation.y
widget_height = widget.allocation.height
if (widget_posn - port_posn) >= 0 and (widget_posn + widget_height - port_posn) <= port_height:
#widget is fully visible (even if its description or icon is not), so do nothing
return False
# for now we know there are only two possible hidden widgets so we scroll all the way up or all the way down
# if we add options to this page we will have to scroll differently
if widget_name == 'delay':
#scroll to top
port_vadjust.set_value(0)
elif widget_name == 'extended_headers':
#scroll to bottom
port_vadjust.set_value(internal_height - port_height)
return True
def key_pressed(self, widget, event):
# show about dialog on F1 keypress
if (event.keyval == gtk.keysyms.F1) \
and (event.state & gtk.gdk.CONTROL_MASK) == 0 \
and (event.state & gtk.gdk.SHIFT_MASK) == 0:
self.show_about()
return True
return False
################################################
# setting getting
################################################
def prey_exists(self):
if not os.path.exists(PREY_PATH + '/core'):
self.show_alert(_("Prey not installed"), _("Couldn't find a Prey installation on this system. Sorry."), True)
else:
return True
def is_config_writable(self):
command = 'if [ ! -w "'+PREY_PATH+'/config" ]; then echo 1; fi'
no_access = os.popen(command).read().strip()
if no_access == '1':
self.show_alert(_("Unauthorized"), _("You don't have access to manage Prey's configuration. Sorry."), True)
else:
return True
def get_setting(self, var):
command = 'grep \''+var+'=\' '+CONFIG_FILE+' | sed "s/'+var+'=\'\(.*\)\'/\\1/"'
return os.popen(command).read().strip()
def get_current_settings(self):
self.current_delay = os.popen("crontab -l | grep prey | cut -c 3-4").read()
if not self.current_delay: self.current_delay = 20
self.current_auto_connect = self.get_setting('auto_connect')
self.current_extended_headers = self.get_setting('extended_headers')
self.current_guest_account = self.guest_account_exists()
self.current_lang = self.get_setting('lang')
self.current_check_url = self.get_setting('check_url')
self.current_post_method = self.get_setting('post_method')
self.current_api_key = self.get_setting('api_key')
self.current_device_key = self.get_setting('device_key')
self.current_mail_to = self.get_setting('mail_to')
self.current_smtp_server = self.get_setting('smtp_server')
self.current_smtp_username = self.get_setting('smtp_username')
def guest_account_exists(self):
result = os.popen('id ' + GUEST_ACCOUNT_NAME + ' 2> /dev/null').read()
if result.find("uid"):
return False
else:
return True
def toggle_guest_account(self, enabled):
if enabled:
# create user and leave password blank
os.system("useradd -m " + GUEST_ACCOUNT_NAME + "; passwd -d " + GUEST_ACCOUNT_NAME)
# Authorize login with no passwords in gdm
os.system("sed -i 's/PasswordRequired=false/#PasswordRequired=false/' /etc/gdm/gdm.conf")
# Authorize login with no passwords in pam
os.system("sed -i 's/nullok_secure/nullok/' /etc/pam.d/common-auth")
else:
os.system("userdel -r " + GUEST_ACCOUNT_NAME)
os.system("sed -i 's/#PasswordRequired=false/PasswordRequired=false/' /etc/gdm/gdm.conf")
os.system("sed -i 's/nullok/nullok_secure/' /etc/pam.d/common-auth")
def display_real_settings(self):
self.get('delay').set_value(int(self.current_delay))
self.get('guest_account').set_active(self.current_guest_account)
if self.current_auto_connect == 'y':
self.get('auto_connect').set_active(True)
if self.current_extended_headers == 'y':
self.get('extended_headers').set_active(True)
self.get('check_url').set_text(self.current_check_url)
self.get('mail_to').set_text(self.current_mail_to)
self.get('smtp_server').set_text(self.current_smtp_server)
self.get('smtp_username').set_text(self.current_smtp_username)
if self.current_post_method == 'email':
self.get('reporting_mode_standalone').set_active(True)
def check_if_configured(self):
if self.current_post_method == 'http' and self.current_api_key == '':
self.show_alert(_('Welcome!'), _("It seems this is the first time you run this setup. Please set up your reporting method now, otherwise Prey won't work!"))
################################################
# setting settings
################################################
def save(self, param, value):
if param == 'check_url': value = value.replace('/', '\/')
command = 'sed -i -e "s/'+param+'=\'.*\'/'+param+'=\''+value+'\'/" '+ CONFIG_FILE
os.system(command)
def apply_settings(self, button):
self.get('button_apply').set_label(_("Saving..."))
if self.get("main_tabs").get_current_page() == 0: # main settings page
self.apply_main_settings()
else:
page_name = self.get_page_name()
if page_name == 'new_user':
if self.validate_fields():
self.create_user()
elif page_name == "existing_user": # this is an apply event, so we are creating a new device (no "advanced" device selection)
self.get_existing_user(False)
elif page_name == "existing_device":
self.apply_device_settings()
elif page_name == "standalone_options":
self.apply_standalone_settings()
self.get('button_apply').set_label('gtk-apply')
def apply_main_settings(self):
# save('lang', text('lang'))
self.save('auto_connect', self.checkbox('auto_connect'))
self.save('extended_headers', self.checkbox('extended_headers'))
if((self.checkbox('guest_account') == 'y') != self.current_guest_account):
self.toggle_guest_account(self.checkbox('guest_account') == 'y')
# check and change the crontab interval
new_delay = self.get('delay').get_value_as_int()
if new_delay != int(self.current_delay):
# print 'Updating delay in crontab...'
os.system('(crontab -l | grep -v prey; echo "*/'+str(new_delay)+' * * * * /usr/share/prey/prey.sh > /var/log/prey.log") | crontab -')
if self.check_if_configured == False:
self.show_alert(_("All good."), _("Configuration saved. Remember you still need to set up your posting method, otherwise Prey won't work!"))
else:
self.show_alert(_("All good."), _("Configuration saved!"), True)
def apply_control_panel_settings(self):
if self.current_post_method != 'http':
self.save('post_method', 'http')
if self.current_check_url != CONTROL_PANEL_URL:
self.save('check_url', CONTROL_PANEL_URL)
# we could eventually use the email as a checking method to remove prey
# i.e. "under which email was this account set up?"
# self.save('mail_to', self.email)
self.save('api_key', self.api_key)
if self.device_key != "":
self.save('device_key', self.device_key)
def apply_standalone_settings(self):
if self.current_post_method != 'email':
self.save('post_method', 'email')
self.save('check_url', self.text('check_url'))
self.save('mail_to', self.text('mail_to'))
self.save('smtp_server', self.text('smtp_server'))
self.save('smtp_username', self.text('smtp_username'))
smtp_password = self.text('smtp_password')
if smtp_password != '':
encoded_pass = os.popen('echo -n "'+ smtp_password +'" | openssl enc -base64').read().strip()
self.save('smtp_password', encoded_pass)
self.exit_configurator()
def exit_configurator(self):
self.run_prey()
self.show_alert(_("Success"), _("Configuration saved! Your device is now setup and being tracked by Prey. Happy hunting!"), True)
def run_prey(self):
os.system(PREY_PATH + '/prey.sh > /var/log/prey.log &')
################################################
# control panel api
################################################
def report_connection_issue(self):
self.show_alert(_("Problem connecting"), _("We seem to be having a problem connecting to your Control Panel. This is likely a temporary issue. Please try again in a few moments."))
def user_has_available_slots(self, string):
matches = re.search(r"<available_slots>(\w*)</available_slots>", string)
if matches and int(matches.groups()[0]) > 0:
return True
else:
return False
def get_api_key(self, string):
matches = re.search(r"<key>(\w*)</key>", string)
if matches:
self.api_key = matches.groups()[0]
def get_device_keys(self, string, has_available_slots):
hostname = os.popen("hostname").read().strip()
devices = self.get('device')
index = -1
chosen = index
liststore = gtk.ListStore(str,str)
devices.clear()
matches = re.findall(r"<device>\s*<key>(\w*)</key>.*?<title>([\s\w]*)</title>\s*</device>", string, re.DOTALL)
for match in matches:
index += 1
key = match[0]
title = match[1]
liststore.append([title,key])
if key == self.current_device_key: #set the choice because we have a matching device key
chosen = index
elif title.lower() == hostname.lower and chosen < 0: #set the choice because we likely have a matching title (but device key takes precedence)
chosen = index
if index < 0:
#self.get('create_new_device').set_active(True)
self.show_alert(_("No devices exist"), _("There are no devices currently defined in your Control Panel.\n\nPlease select the option to create a new device."))
return False
devices.set_model(liststore)
cell = gtk.CellRendererText()
devices.pack_start(cell, True)
devices.add_attribute(cell, 'text', 0)
devices.set_active(chosen)
return True
def create_user(self):
self.email = self.text('email')
params = urllib.urlencode({'user[name]': self.text('user_name'), 'user[email]': self.email, 'user[password]': self.text('password'), 'user[password_confirmation]' : self.text('password_confirm')})
# params = 'user[name]='+self.text('user_name')+'&user[email]='+self.email+'&user[password]='+self.text('password')+'&user[password_confirmation]='+self.text('password_confirm')
result = os.popen('curl -i -s -k --connect-timeout 5 '+ CONTROL_PANEL_URL_SSL + '/users.xml -d \"'+params+'\"').read().strip()
if result.find("<key>") != -1:
self.get_api_key(result)
self.device_key = ""
elif result.find("Email has already been taken") != -1:
self.show_alert(_("Email has already been taken"), _("That email address already exists! If you signed up previously, please go back and select the Existing User option."))
return
else:
self.show_alert(_("Couldn't create user!"), _("There was a problem creating your account. Please make sure the email address you entered is valid, as well as your password."))
return
self.apply_control_panel_settings()
self.run_prey()
self.show_alert(_("Account created!"), _("Your account has been succesfully created and configured in Prey's Control Panel.\n\nPlease check your inbox now, you should have received a verification email."), True)
def get_existing_user(self, show_devices):
self.email = self.text('existing_email')
password = self.text('existing_password')
result = os.popen('curl -i -s -k --connect-timeout 5 '+ CONTROL_PANEL_URL_SSL + '/profile.xml -u '+self.email+":'"+password+"'").read().strip()
if result.find('401 Unauthorized') != -1:
self.show_alert(_("User does not exist"), _("Couldn't log you in. Remember you need to activate your account opening the link we emailed you.\n\nIf you forgot your password please visit preyproject.com."))
return
if result.find("<user>") != -1:
self.get_api_key(result)
else:
self.report_connection_issue()
return False
has_available_slots = self.user_has_available_slots(result)
if not has_available_slots and not show_devices:
self.show_alert(_("Not allowed"), _("It seems you've reached your limit for devices!\n\nIf you had previously added this PC, you should select the \"Device already exists\" option to select the device from a list of devices you have already defined.\n\nIf this is a new device, you can also upgrade to a Pro Account to increase your slot count and get access to additional features. For more information, please check\nhttp://preyproject.com/plans."))
return False
if show_devices:
result = os.popen('curl -i -s -k --connect-timeout 5 '+ CONTROL_PANEL_URL_SSL + '/devices.xml -u '+self.email+":'"+password+"'").read().strip()
if result.find("</devices>") != -1:
return self.get_device_keys(result,has_available_slots)
else:
self.report_connection_issue()
return False
else:
self.device_key = ""
self.apply_control_panel_settings()
self.exit_configurator()
def apply_device_settings(self):
devices = self.get('device')
model = devices.get_model()
self.device_key = model.get_value(devices.get_active_iter(),1)
self.apply_control_panel_settings()
self.exit_configurator()
def __init__(self):
if not self.prey_exists() or not self.is_config_writable():
gtk.main()
exit(1)
self.get_current_settings()
builder = gtk.Builder()
builder.set_translation_domain(app_name)
builder.add_from_file(script_path + "/prey-config.glade")
builder.connect_signals({
"on_window_destroy" : gtk.main_quit,
"prev_page" : self.prev_page,
"next_page" : self.next_page,
"toggle_buttons" : self.toggle_buttons,
"apply_settings" : self.apply_settings,
"toggle_pg3_next_apply" : self.toggle_pg3_next_apply,
"set_default_action" : self.set_default_action,
"ensure_visible" : self.ensure_visible,
"key_pressed" : self.key_pressed,
"close_about" : self.close_about
})
self.window = builder.get_object("window")
self.window.set_title(self.window.get_title() + " (v" + VERSION + ")")
# self.window.get_settings().set_string_property('gtk-font-name', 'sans normal 11','');
self.pages = builder.get_object("reporting_mode_tabs")
self.root = builder
self.get('delay').grab_focus()
about = self.get('about_prey_config')
about.set_version(VERSION)
self.display_real_settings()
self.check_if_configured()
if __name__ == "__main__":
app = PreyConfigurator()
gtk.main()
| gpl-3.0 | -2,701,603,045,709,129,000 | 35.53411 | 455 | 0.644851 | false |
xrmx/pylokit | setup.py | 1 | 1072 | from setuptools import setup, find_packages
import os
VERSION = "0.8.1"
CLASSIFIERS = [
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Office/Business :: Office Suites',
]
setup(
author="Riccardo Magliocchetti",
author_email="riccardo.magliocchetti@gmail.com",
name='pylokit',
version=VERSION,
description='Python CFFI wrapper for LibreOfficeKit',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url="https://github.com/xrmx/pylokit",
license='MPL 2.0',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'cffi',
'six',
],
test_suite='pylokit.tests',
packages=find_packages(),
include_package_data=True,
zip_safe = False,
)
| mpl-2.0 | -7,979,361,998,995,849,000 | 28.777778 | 88 | 0.655784 | false |
xuru/pyvisdk | pyvisdk/do/or_alarm_expression.py | 1 | 1024 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def OrAlarmExpression(vim, *args, **kwargs):
'''A data object type that links multiple alarm expressions with OR operators.'''
obj = vim.client.factory.create('ns0:OrAlarmExpression')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'expression' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | 7,627,906,779,953,518,000 | 30.060606 | 124 | 0.598633 | false |
jmschrei/blueberry | docs/source/conf.py | 1 | 10487 | # -*- coding: utf-8 -*-
#
# blueberry documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 23 00:00:07 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
libpath = os.path.join(curr_path, '../../')
sys.path.insert(0, libpath)
sys.path.insert(0, curr_path)
from .sphinx_utils import MarkdownParser, AutoStructify
github_doc_root = "https://github.com/jmschrei/blueberry/docs/source"
MarkdownParser.github_doc_root = github_doc_root
source_parsers = {
'.md': MarkdownParser,
}
# The suffix(es) of source filenames.
source_suffix = ['.rst', '.md']
import mock
MOCK_MODULES = ['joblib', 'networkx', 'scipy', 'scipy.special']
for mod in MOCK_MODULES:
sys.modules[mod] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.md'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'blueberry'
copyright = u'2016, Jacob Schreiber'
author = u'Jacob Schreiber'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'v0.1.0'
# The full version, including alpha/beta/rc tags.
release = u'v0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'blueberrydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'blueberry.tex', u'blueberry Documentation',
u'Jacob Schreiber', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'blueberry', u'blueberry Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'blueberry', u'blueberry Documentation',
author, 'blueberry', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
| mit | -7,591,383,865,380,614,000 | 31.367284 | 79 | 0.704491 | false |
hroark13/android_kernel_zte_draconis | scripts/gcc-wrapper.py | 2 | 3383 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
# interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 | 7,115,874,685,368,545,000 | 34.239583 | 97 | 0.668342 | false |
rouxcode/django-cms-plugins | cmsplugins/baseplugin/utils.py | 1 | 1125 | from __future__ import unicode_literals
from importlib import import_module
from django.utils import six
from django.utils.html import mark_safe
from django.utils.translation import ugettext_lazy as _
def get_indicator_hidden(request, instance):
html = ''
is_visible = getattr(instance, 'is_visible', True)
if request.toolbar.edit_mode_active and not is_visible:
name = _('hidden')
css_class = 'plugin-indicator-hidden'
html = '<span class="{}">{}</span>'.format(
css_class,
name,
)
return mark_safe(html)
def get_str_from_tuple(key='', properties=()):
return dict((k, v) for k, v in properties).get(key, '')
def load_object(import_path):
if not isinstance(import_path, six.string_types):
return import_path
if '.' not in import_path:
raise TypeError(
"'import_path' argument to 'django_load.core.load_object'"
" must contain at least one dot."
)
module_name, object_name = import_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, object_name)
| mit | 25,095,546,226,057,910 | 29.405405 | 70 | 0.636444 | false |
griffinfoster/shapelets | setup.py | 1 | 1460 | from setuptools import setup, find_packages
#from Cython.Build import cythonize
import numpy as np
import os, sys, glob
__version__ = '0.2' #this needs to be kept up to date with shapelets/__init__.py
setup(name = 'shapelets',
version = __version__,
description = 'Shapelet fitting and plotting',
long_description = 'Shapelet fitting and plotting',
author = 'Griffin Foster',
author_email = 'griffin.foster@gmail.com',
url = 'https://github.com/griffinfoster/shapelets',
platforms = ['*nix'],
license='GPL',
requires = ['distutils', 'numpy', 'astropy', 'scipy', 'matplotlib', 'json'],
provides = ['shapelets', 'shapelets.phs'],
packages = ['shapelets', 'shapelets.phs'],
#ext_modules = cythonize('shapelets/cshapelet.pyx', annotate=True),
include_dirs = [np.get_include()],
#scripts = glob.glob('scripts/*.py'),
scripts = ['scripts/fitShapelet.py', 'scripts/insertShapelet.py', 'scripts/plotCoeffs.py', 'scripts/plotImg.py', 'scripts/plotShapelets.py', 'scripts/solveShapelet.py'],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Topic :: Scientific/Engineering :: Astronomy',
],
)
| bsd-3-clause | 9,192,524,142,672,802,000 | 40.714286 | 173 | 0.644521 | false |
bsandrow/hn-saved-stories | hn_saved_stories/__init__.py | 1 | 8406 |
import os
import json
import re
import sys
import requests
import lxml.html
from datetime import datetime, timedelta
from pprint import pprint as PP
from time import sleep
from urlparse import urljoin
from .utils import hn_relatime_to_datetime, get_story_id
from .logger import logger
def parse_date_header(date):
errors = []
formats = [
"%a, %d %B %Y %H:%M:%S %Z",
"%a, %d %b %Y %H:%M:%S %Z",
]
for format in formats:
try:
return datetime.strptime(date, format)
except ValueError as e:
errors.append(e)
raise errors[0]
class HNSession(object):
user_agent = 'hn-saved-stories/0.2 (https://github.com/bsandrow/hn-saved-stories/)'
max_retries = 2
retry_delay = 30
def __init__(self, headers=None):
headers = headers or {}
headers['User-Agent'] = headers.get('User-Agent', self.user_agent)
self.session = requests.Session()
self.session.headers = headers
self.last_response = None
def last_response_time(self):
""" Return the time of the last response """
if 'last_response' in self.__dict__ and self.last_response.headers.get('date'):
return parse_date_header(self.last_response.headers.get('date'))
else:
return None
def last_response_url(self):
""" Return the url of the last response """
if 'last_response' in self.__dict__:
return self.last_response.url
else:
return None
def get(self, *args, **kwargs):
""" requests.get() within the session
Wraps requests.get() within the session (so it has access to session
cookies), and also retries on failures, because timeouts seem to
happen randomly.
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = 10
retries = 0
while True:
try:
request = self.session.get(*args, **kwargs)
request.raise_for_status()
return request
except requests.exceptions.RequestException as e:
if retries < self.max_retries:
retries += 1
sleep(self.retry_delay)
logger.info("[Sleeping between requests (%ss)]" % self.retry_delay)
else:
raise
def resolve_url(self, url):
""" Resolve :url: using the most appropriate base url """
base_url = self.last_response_url() or 'https://news.ycombinator.com'
return urljoin(base_url, url)
def login(self, username, password, debug=False):
""" Log into the session using provided credentials """
try:
response = self.get('https://news.ycombinator.com/newslogin')
except requests.exceptions.HTTPError:
raise Exception("Error: Unable to retrieve login page")
doc = lxml.html.fromstring(response.text)
fields = doc.xpath('.//form[1]/input')
form_data = { x.get('name'): x.get('value') for x in fields }
form_data['u'] = username
form_data['p'] = password
if debug:
print "Login Form Data: ",
import pprint
pprint.pprint(form_data)
response = self.session.post('https://news.ycombinator.com/y', data=form_data, timeout=10)
if response.status_code != requests.codes.ok:
raise Exception("Error: Unable to successfully login.")
self.username = username
self.last_response = response
def get_saved_stories(self, max_pages=None, break_func=None):
""" Fetch the list of 'saved stories' from a profile
Fetch the list of saved stories for a Hacker News user account. The
session needs to be logged into an account for this to work.
break_func - A function that takes the current page's story list, and
returns True if we should break out of the loop.
max_pages - The maximum number of pages that we should go through
before aborting. A value of None goes through all pages.
"""
def parse_story(title, subtext):
""" Parse a story from title + subtext """
url_keys = ['url', 'comments', 'submitter_link']
story = {}
title_anchor = title.xpath('./a')[0]
comments_anchor = subtext.xpath('.//a[contains(text(), "comments") or contains(text(), "discuss")]')[0] # See Footnote [1]
story['url'] = title_anchor.get('href')
story['title'] = title_anchor.text
story['comments'] = comments_anchor.get('href')
story['submitter'] = subtext.xpath('.//a[1]//text()')[0] # See Footnote [4]
story['submitter_link'] = subtext.xpath('.//a[1]/@href')[0]
story['submitted_at'] = str( hn_relatime_to_datetime(self.last_response_time(), subtext.xpath('./text()')[1]) )
# Resolve all relative URLs
for key in story.keys():
if key in url_keys and story.get(key):
story[key] = self.resolve_url(story[key])
return get_story_id(story), story
page = 1
stories = {}
url = 'https://news.ycombinator.com/saved?id=%s' % self.username
while max_pages is None or page <= max_pages:
html = None
try:
logger.info("Page %d:" % page)
logger.debug(" url = %s" % url)
logger.info(" Fetching...")
try:
response = self.get(url)
except requests.exceptions.HTTPError as e:
raise Exception("Error: Failed to retrieve page %d, error:'%s', rurl: %s" % (page, str(e), url))
if response.text == "Can't display that.":
raise Exception("Error: Got \"Can't display that\" response.")
logger.info(" Parsing...")
html = lxml.html.fromstring(response.text)
basetime = parse_date_header(response.headers['date'])
title = html.cssselect('td.title') # See Footnote [3]
subtext = html.cssselect('td.subtext')
page_stories = dict([ parse_story(*s) for s in zip(title[1::2], subtext) ])
try:
next_link = title[-1].xpath('.//a[text() = "More"]/@href')
except IndexError:
sys.exit("Caught IndexError. Dumping HTML:" + lxml.html.tostring(html))
next_link = next_link[0] if next_link else None
stories.update(page_stories)
should_break = (break_func and break_func(page_stories)) or next_link is None
if should_break:
break
url = self.resolve_url(next_link)
page += 1
logger.info(" Sleeping (1s)...")
sleep(1)
except Exception as e:
if html:
logger.debug("Caught exception. Dumping page...")
logger.debug("______________")
logger.debug(lxml.html.tostring(html, pretty_print=True))
logger.debug("______________")
raise
logger.info("Done.")
return stories
# Footnotes
# ~~~~~~~~~
# [1] Anchor text needs 'comments,' because Polls submitted by yourself there
# is a 'add choice.' Also, if the story has no comments, then the anchor
# text is just 'discuss.'
#
# [2] '[Dead]' links remove the 'href' attribute from the anchor, so you end up
# with None as a URL.
#
# [3] 'td.title' selects 3 different things:
# 1) the number of the story (in reverse, story #1 is
# the most recently saved)
# 2) the title + link of the story
# 3) the 'More' link at the bottom of the page, which
# goes to the next page in the series.
# The series should look something like [1,2,1,2,1,2,1,2,3], #1 and #2
# alternating with #3 being the last in the list. #3 will be missing on the
# final page.
#
# [4] The '//text()' part is needed because sometimes the submitter has a
# <font> element colouring it, so text() is not a direct child of the
# anchor. E.g.:
#
# <a href="user?id=foofoobar"><font color="#3c963c">foofoobar</font></a>
| mit | 7,238,045,857,086,352,000 | 35.868421 | 134 | 0.557935 | false |
rosarior/mayan | apps/main/__init__.py | 1 | 2420 | from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from navigation.api import register_top_menu
from navigation.api import register_links
from project_setup.api import register_setup
from project_tools.api import register_tool
from .conf.settings import SIDE_BAR_SEARCH, DISABLE_HOME_VIEW
__author__ = 'Roberto Rosario'
__copyright__ = 'Copyright 2012 Roberto Rosario'
__credits__ = ['Roberto Rosario',]
__license__ = 'GPL'
__maintainer__ = 'Roberto Rosario'
__email__ = 'roberto.rosario.gonzalez@gmail.com'
__status__ = 'Production'
__version_info__ = {
'major': 1,
'minor': 0,
'micro': 0,
'releaselevel': 'alpha',
'serial': 0
}
def is_superuser(context):
return context['request'].user.is_staff or context['request'].user.is_superuser
maintenance_menu = {'text': _(u'maintenance'), 'view': 'maintenance_menu', 'famfam': 'wrench', 'icon': 'wrench.png'}
statistics = {'text': _(u'statistics'), 'view': 'statistics', 'famfam': 'table', 'icon': 'blackboard_sum.png', 'condition': is_superuser, 'children_view_regex': [r'statistics']}
diagnostics = {'text': _(u'diagnostics'), 'view': 'diagnostics', 'famfam': 'pill', 'icon': 'pill.png'}
sentry = {'text': _(u'sentry'), 'view': 'sentry', 'famfam': 'bug', 'icon': 'bug.png', 'condition': is_superuser}
admin_site = {'text': _(u'admin site'), 'view': 'admin:index', 'famfam': 'keyboard', 'icon': 'keyboard.png', 'condition': is_superuser}
if not DISABLE_HOME_VIEW:
register_top_menu('home', link={'text': _(u'home'), 'view': 'home', 'famfam': 'house'}, position=0)
if not SIDE_BAR_SEARCH:
register_top_menu('search', link={'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'}, children_path_regex=[r'^search/'])
def get_version():
'''
Return the formatted version information
'''
vers = ['%(major)i.%(minor)i' % __version_info__, ]
if __version_info__['micro']:
vers.append('.%(micro)i' % __version_info__)
if __version_info__['releaselevel'] != 'final':
vers.append('%(releaselevel)s%(serial)i' % __version_info__)
return ''.join(vers)
__version__ = get_version()
if 'django.contrib.admin' in settings.INSTALLED_APPS:
register_setup(admin_site)
register_tool(maintenance_menu)
register_tool(statistics)
register_tool(diagnostics)
if 'sentry' in settings.INSTALLED_APPS:
register_tool(sentry)
| gpl-3.0 | 7,616,351,844,105,236,000 | 35.119403 | 177 | 0.659504 | false |
ssvlab/esbmc-gpu | regression/esbmc-cpp/resultados.py | 1 | 2672 | #!/bin/python
#############################
# Script to display test suite results
##############################
import sys
import os
from sys import argv
import xml.etree.ElementTree as ET
def error(message):
sys.stderr.write("error: %s\n" % message)
#sys.exit(1)
error_file = "resultados_error.log"
f = open(error_file, 'w')
suc = 0
fai = 0
fsuc = 0
fneg = 0
total = 0
crash = 0
def disp_resul():
"""
Display the verification result
"""
print "Verification Success: ", suc
print "Verification Fail: ", fai
print "False positive:", fsuc
print "False negative:", fneg
print "Crashed:", crash
print "Total: ", total
def resultados(ite_ex, ite_ac):
"""
Check the result. Sucess, Fail, false sucess and false fail
"""
global suc, fai, fsuc, fneg, total, crash
if ite_ac.text == "[ABORTED]":
crash+=1
#print "CRASH"
elif ite_ex.text == ite_ac.text:
if ite_ac.text == "[SUCCESSFUL]":
suc+=1
#print "SUCCESSFUL"
elif ite_ac.text in ["[FAILED]", "[CONVERSION_ERROR]", "[PARSING_ERROR]"]:
fai+=1
#print "FAILED"
elif ite_ex.text == "[FAILED]" and ite_ac.text == "[SUCCESSFUL]":
fsuc+=1
#print "FALSO POSITIVO"
else:
fneg+=1
#print "FALSO NEGATIVO"
total+=1
#print
def show_info(path):
"""
Parse tree
"""
global suc, fai, fsuc, fneg, total, crash
suc = fai = fsuc = fneg = total = crash = 0
os.chdir(path)
print "##### Directory: " + path
try:
tree = ET.parse("test_log.xml")
except IOError as e:
#sys.exit("Could not open test_log.xml")
error("Could not open test_log.xml")
return
#XML file
root = tree.getroot()
for res in root.findall('run-test'):
ite_ex = res.find('item_09_expected-result')
ite_ac = res.find('item_10_actual-result')
#print res.find('item_01_test-name').text
#print ite_ex.text
#print ite_ac.text
if ite_ex != None and ite_ac != None:
resultados(ite_ex, ite_ac)
else:
f.write('Error file: ')
f.write(res.find('item_01_test-name').text + '\n')
error("Parser error at " + res.find('item_01_test-name').text)
disp_resul() #display result
def main():
if len(sys.argv) < 2:
print "usage: %s <PATH>" % argv[0]
sys.exit(1)
path = argv[1];
listing = os.listdir(path)
listing.sort() #sort files
os.chdir(path)
for infile in listing:
if os.path.isdir(infile):
show_info(infile)
print
os.chdir("..")
if __name__ == "__main__":
main()
| apache-2.0 | -8,661,437,765,248,857,000 | 23.290909 | 80 | 0.552395 | false |
475Cumulus/TBone | tests/data/test_models.py | 1 | 3358 | #!/usr/bin/env python
# encoding: utf-8
import pytest
import datetime
from itertools import zip_longest
from tbone.data.fields import *
from tbone.data.models import *
from tbone.testing.fixtures import event_loop
def test_model_repr():
''' Test Model repr function '''
class M(Model):
pass
m = M()
assert repr(m) == '<M instance>'
@pytest.mark.asyncio
async def test_model_creation_and_serialization():
'''
Simple model creation test
'''
class M(Model):
name = StringField()
age = IntegerField()
decimal = FloatField()
dt = DateTimeField()
m = M({'name': 'Ron Burgundy', 'age': 45, 'decimal': '34.77', 'dt': '2017-07-25T12:34:14.414471'})
# convert model to primitive form
data = await m.serialize()
# check result is dict
assert isinstance(data, dict)
# check keys match
assert all(a == b for a, b in zip_longest(m._fields.keys(), data.keys(), fillvalue=None))
@pytest.mark.asyncio
async def test_model_import():
class M(Model):
first_name = StringField()
last_name = StringField()
m = M()
m.import_data({'first_name': 'Ron', 'last_name': 'Burgundy'})
data = await m.serialize()
assert data['first_name'] == 'Ron'
assert data['last_name'] == 'Burgundy'
with pytest.raises(ValueError):
m.import_data('Ron Burgundy')
@pytest.mark.asyncio
async def test_model_serialize_decorator():
class M(Model):
first_name = StringField()
last_name = StringField()
@serialize
async def full_name(self):
return '{} {}'.format(self.first_name, self.last_name)
m = M({'first_name': 'Ron', 'last_name': 'Burgundy'})
data = await m.serialize()
assert data['first_name'] == 'Ron'
assert data['last_name'] == 'Burgundy'
assert 'full_name' in data
assert data['full_name'] == 'Ron Burgundy'
def test_model_items():
class M(Model):
first_name = StringField()
last_name = StringField()
dob = DateTimeField()
data = {'first_name': 'Ron', 'last_name': 'Burgundy', 'dob': datetime.datetime.now()}
mo = M(data)
for key, value in mo.items():
assert value == data[key]
@pytest.mark.asyncio
async def test_field_projection():
class M(Model):
first_name = StringField()
last_name = StringField()
dob = DateTimeField()
number_of_views = IntegerField(default=0, projection=None)
data = {'first_name': 'Ron', 'last_name': 'Burgundy', 'dob': datetime.datetime.now()}
mo = M(data)
serialized = await mo.serialize()
for key in data.keys():
assert key in serialized
assert 'number_of_views' not in serialized
def test_model_field_exclusion():
class User(Model):
username = StringField()
password = StringField()
first_name = StringField()
last_name = StringField()
@serialize
async def full_name(self):
return '{} {}'.format(self.first_name, self.last_name)
class PublicUser(User):
class Meta:
exclude_fields = ['password', 'none_existing_field']
exclude_serialize = ['full_name', 'none_existing_serialize_method']
assert 'password' not in PublicUser._fields
assert 'full_name' not in PublicUser._serialize_methods
| mit | 3,444,917,842,115,831,300 | 24.633588 | 102 | 0.611376 | false |
rbarlow/pulp | nodes/test/nodes_tests/base.py | 1 | 3158 | from ConfigParser import SafeConfigParser
from unittest import TestCase
import logging
import mock
import os
import shutil
import unittest
import okaara
import pymongo
from pulp.bindings.bindings import Bindings
from pulp.bindings.server import PulpConnection
from pulp.client.extensions.core import PulpCli, ClientContext, PulpPrompt
from pulp.client.extensions.exceptions import ExceptionHandler
from pulp.common.config import Config
from pulp.server.async import celery_instance
from pulp.server.config import config as pulp_conf
from pulp.server.db import connection
from pulp.server.logs import start_logging, stop_logging
from pulp.server.managers import factory as managers
from pulp.server.managers.auth.cert.cert_generator import SerialNumber
SerialNumber.PATH = '/tmp/sn.dat'
class ServerTests(unittest.TestCase):
TMP_ROOT = '/tmp/pulp/nodes'
@classmethod
def setUpClass(cls):
# This will make Celery tasks run synchronously
celery_instance.celery.conf.CELERY_ALWAYS_EAGER = True
if not os.path.exists(cls.TMP_ROOT):
os.makedirs(cls.TMP_ROOT)
stop_logging()
path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'data',
'pulp.conf')
pulp_conf.read(path)
start_logging()
storage_dir = pulp_conf.get('server', 'storage_dir')
if not os.path.exists(storage_dir):
os.makedirs(storage_dir)
shutil.rmtree(storage_dir + '/*', ignore_errors=True)
managers.initialize()
@classmethod
def tearDownClass(cls):
name = pulp_conf.get('database', 'name')
db = pymongo.database.Database(connection._CONNECTION, name)
for name in db.collection_names():
if name[:7] == 'system.':
continue
db.drop_collection(name)
class ClientTests(TestCase):
def setUp(self):
TestCase.setUp(self)
self.config = SafeConfigParser()
path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'data',
'client.conf')
self.config = Config(path)
self.server_mock = mock.Mock()
self.pulp_connection = \
PulpConnection('', server_wrapper=self.server_mock)
self.bindings = Bindings(self.pulp_connection)
self.recorder = okaara.prompt.Recorder()
self.prompt = PulpPrompt(enable_color=False, output=self.recorder, record_tags=True)
self.logger = logging.getLogger('pulp')
self.exception_handler = ExceptionHandler(self.prompt, self.config)
self.context = ClientContext(
self.bindings,
self.config,
self.logger,
self.prompt,
self.exception_handler)
self.cli = PulpCli(self.context)
self.context.cli = self.cli
class Response:
def __init__(self, code, body):
self.response_code = code
self.response_body = body
class Task:
def __init__(self, task_id=0):
self.task_id = task_id
class TaskResult:
def __init__(self, task_id):
self.spawned_tasks = [Task(task_id)]
| gpl-2.0 | 2,768,533,297,069,624,000 | 29.365385 | 92 | 0.649778 | false |
annarev/tensorflow | tensorflow/python/distribute/input_lib.py | 1 | 101510 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various classes representing distributed inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import six
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import input_ops
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.distribute_lib import InputReplicationMode
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.types import distribute as distribute_types
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
def get_distributed_dataset(dataset,
input_workers,
strategy,
num_replicas_in_sync=None,
input_context=None,
options=None):
"""Returns a distributed dataset from the given tf.data.Dataset instance.
This is a common function that is used by all strategies to return a
distributed dataset. The distributed dataset instance returned is different
depending on if we are in a TF 1 or TF 2 context. The distributed dataset
instances returned differ from each other in the APIs supported by each of
them.
Args:
dataset: a tf.data.Dataset instance.
input_workers: an InputWorkers object which specifies devices on which
iterators should be created.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
num_replicas_in_sync: Optional integer. If this is not None, the value is
used to decide how to rebatch datasets into smaller batches so that
the total batch size for each step (across all workers and replicas)
adds up to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
options: Default is None. `tf.distribute.InputOptions` used to control
options on how this dataset is distributed.
Returns:
A distributed dataset instance.
"""
if tf2.enabled():
return DistributedDataset(
input_workers,
strategy,
dataset,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context,
options=options)
else:
return DistributedDatasetV1(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context,
options=options)
def get_distributed_datasets_from_function(dataset_fn,
input_workers,
input_contexts,
strategy,
options=None):
"""Returns a distributed dataset from the given input function.
This is a common function that is used by all strategies to return a
distributed dataset. The distributed dataset instance returned is different
depending on if we are in a TF 1 or TF 2 context. The distributed dataset
instances returned differ from each other in the APIs supported by each of
them.
Args:
dataset_fn: a function that returns a tf.data.Dataset instance.
input_workers: an InputWorkers object which specifies devices on which
iterators should be created.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `dataset_fn`. Length and order should match worker order in
`worker_device_pairs`.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
options: Default is None. `tf.distribute.InputOptions` used to control
options on how this dataset is distributed.
Returns:
A distributed dataset instance.
Raises:
ValueError: if `options.experimental_replication_mode` and
`options.experimental_place_dataset_on_device` are not consistent
"""
if (options is not None and
options.experimental_replication_mode != InputReplicationMode.PER_REPLICA
and options.experimental_place_dataset_on_device):
raise ValueError(
"When `experimental_place_dataset_on_device` is set for dataset "
"placement, you must also specify `PER_REPLICA` for the "
"replication mode")
if (options is not None and
options.experimental_replication_mode == InputReplicationMode.PER_REPLICA
and options.experimental_fetch_to_device and
options.experimental_place_dataset_on_device):
raise ValueError(
"`experimental_place_dataset_on_device` can not be set to True "
"when experimental_fetch_to_device is True and "
"replication mode is set to `PER_REPLICA`")
if tf2.enabled():
return DistributedDatasetsFromFunction(input_workers, strategy,
input_contexts, dataset_fn, options)
else:
return DistributedDatasetsFromFunctionV1(
input_workers,
strategy,
input_contexts,
dataset_fn,
options)
@tf_export("distribute.DistributedIterator", v1=[])
class DistributedIteratorInterface(collections_abc.Iterator,
distribute_types.Iterator):
"""An iterator over `tf.distribute.DistributedDataset`.
`tf.distribute.DistributedIterator` is the primary mechanism for enumerating
elements of a `tf.distribute.DistributedDataset`. It supports the Python
Iterator protocol, which means it can be iterated over using a for-loop or by
fetching individual elements explicitly via `get_next()`.
You can create a `tf.distribute.DistributedIterator` by calling `iter` on
a `tf.distribute.DistributedDataset` or creating a python loop over a
`tf.distribute.DistributedDataset`.
Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input)
on distributed input for more examples and caveats.
"""
def get_next(self):
"""Returns the next input from the iterator for all replicas.
Example use:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.range(100).batch(2)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> dist_dataset_iterator = iter(dist_dataset)
>>> @tf.function
... def one_step(input):
... return input
>>> step_num = 5
>>> for _ in range(step_num):
... strategy.run(one_step, args=(dist_dataset_iterator.get_next(),))
>>> strategy.experimental_local_results(dist_dataset_iterator.get_next())
(<tf.Tensor: shape=(1,), dtype=int64, numpy=array([10])>,
<tf.Tensor: shape=(1,), dtype=int64, numpy=array([11])>)
Returns:
A single `tf.Tensor` or a `tf.distribute.DistributedValues` which contains
the next input for all replicas.
Raises:
`tf.errors.OutOfRangeError`: If the end of the iterator has been reached.
"""
raise NotImplementedError(
"DistributedIterator.get_next() must be implemented in descendants.")
@property
def element_spec(self):
# pylint: disable=line-too-long
"""The type specification of an element of `tf.distribute.DistributedIterator`.
Example usage:
>>> global_batch_size = 16
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size)
>>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_iterator.element_spec
(PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)),
PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.int32, name=None)))
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this `tf.distribute.DistributedIterator`. This returned value
is typically a `tf.distribute.DistributedValues` object and specifies the
`tf.TensorSpec` of individual components.
"""
raise NotImplementedError(
"DistributedIterator.element_spec() must be implemented in descendants")
def get_next_as_optional(self):
# pylint: disable=line-too-long
"""Returns a `tf.experimental.Optional` that contains the next value for all replicas.
If the `tf.distribute.DistributedIterator` has reached the end of the
sequence, the returned `tf.experimental.Optional` will have no value.
Example usage:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> global_batch_size = 2
>>> steps_per_loop = 2
>>> dataset = tf.data.Dataset.range(10).batch(global_batch_size)
>>> distributed_iterator = iter(
... strategy.experimental_distribute_dataset(dataset))
>>> def step_fn(x):
... # train the model with inputs
... return x
>>> @tf.function
... def train_fn(distributed_iterator):
... for _ in tf.range(steps_per_loop):
... optional_data = distributed_iterator.get_next_as_optional()
... if not optional_data.has_value():
... break
... per_replica_results = strategy.run(step_fn, args=(optional_data.get_value(),))
... tf.print(strategy.experimental_local_results(per_replica_results))
>>> train_fn(distributed_iterator)
... # ([0 1], [2 3])
... # ([4], [])
Returns:
An `tf.experimental.Optional` object representing the next value from the
`tf.distribute.DistributedIterator` (if it has one) or no value.
"""
# pylint: enable=line-too-long
raise NotImplementedError(
"get_next_as_optional() not implemented in descendants")
@tf_export("distribute.DistributedDataset", v1=[])
class DistributedDatasetInterface(collections_abc.Iterable,
distribute_types.Iterable):
# pylint: disable=line-too-long
"""Represents a dataset distributed among devices and machines.
A `tf.distribute.DistributedDataset` could be thought of as a "distributed"
dataset. When you use `tf.distribute` API to scale training to multiple
devices or machines, you also need to distribute the input data, which leads
to a `tf.distribute.DistributedDataset` instance, instead of a
`tf.data.Dataset` instance in the non-distributed case. In TF 2.x,
`tf.distribute.DistributedDataset` objects are Python iterables.
Note: `tf.distribute.DistributedDataset` instances are *not* of type
`tf.data.Dataset`. It only supports two usages we will mention below:
iteration and `element_spec`. We don't support any other APIs to transform or
inspect the dataset.
There are two APIs to create a `tf.distribute.DistributedDataset` object:
`tf.distribute.Strategy.experimental_distribute_dataset(dataset)`and
`tf.distribute.Strategy.distribute_datasets_from_function(dataset_fn)`.
*When to use which?* When you have a `tf.data.Dataset` instance, and the
regular batch splitting (i.e. re-batch the input `tf.data.Dataset` instance
with a new batch size that is equal to the global batch size divided by the
number of replicas in sync) and autosharding (i.e. the
`tf.data.experimental.AutoShardPolicy` options) work for you, use the former
API. Otherwise, if you are *not* using a canonical `tf.data.Dataset` instance,
or you would like to customize the batch splitting or sharding, you can wrap
these logic in a `dataset_fn` and use the latter API. Both API handles
prefetch to device for the user. For more details and examples, follow the
links to the APIs.
There are two main usages of a `DistributedDataset` object:
1. Iterate over it to generate the input for a single device or multiple
devices, which is a `tf.distribute.DistributedValues` instance. To do this,
you can:
* use a pythonic for-loop construct:
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(4).batch(global_batch_size)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> @tf.function
... def train_step(input):
... features, labels = input
... return labels - 0.3 * features
>>> for x in dist_dataset:
... # train_step trains the model using the dataset elements
... loss = strategy.run(train_step, args=(x,))
... print("Loss is", loss)
Loss is PerReplica:{
0: tf.Tensor(
[[0.7]
[0.7]], shape=(2, 1), dtype=float32),
1: tf.Tensor(
[[0.7]
[0.7]], shape=(2, 1), dtype=float32)
}
Placing the loop inside a `tf.function` will give a performance boost.
However `break` and `return` are currently not supported if the loop is
placed inside a `tf.function`. We also don't support placing the loop
inside a `tf.function` when using
`tf.distribute.experimental.MultiWorkerMirroredStrategy` or
`tf.distribute.experimental.TPUStrategy` with multiple workers.
* use `__iter__` to create an explicit iterator, which is of type
`tf.distribute.DistributedIterator`
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> train_dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(50).batch(global_batch_size)
>>> train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
>>> @tf.function
... def distributed_train_step(dataset_inputs):
... def train_step(input):
... loss = tf.constant(0.1)
... return loss
... per_replica_losses = strategy.run(train_step, args=(dataset_inputs,))
... return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses,axis=None)
>>> EPOCHS = 2
>>> STEPS = 3
>>> for epoch in range(EPOCHS):
... total_loss = 0.0
... num_batches = 0
... dist_dataset_iterator = iter(train_dist_dataset)
... for _ in range(STEPS):
... total_loss += distributed_train_step(next(dist_dataset_iterator))
... num_batches += 1
... average_train_loss = total_loss / num_batches
... template = ("Epoch {}, Loss: {:.4f}")
... print (template.format(epoch+1, average_train_loss))
Epoch 1, Loss: 0.2000
Epoch 2, Loss: 0.2000
To achieve a performance improvement, you can also wrap the `strategy.run`
call with a `tf.range` inside a `tf.function`. This runs multiple steps in a
`tf.function`. Autograph will convert it to a `tf.while_loop` on the worker.
However, it is less flexible comparing with running a single step inside
`tf.function`. For example, you cannot run things eagerly or arbitrary
python code within the steps.
2. Inspect the `tf.TypeSpec` of the data generated by `DistributedDataset`.
`tf.distribute.DistributedDataset` generates
`tf.distribute.DistributedValues` as input to the devices. If you pass the
input to a `tf.function` and would like to specify the shape and type of
each Tensor argument to the function, you can pass a `tf.TypeSpec` object to
the `input_signature` argument of the `tf.function`. To get the
`tf.TypeSpec` of the input, you can use the `element_spec` property of the
`tf.distribute.DistributedDataset` or `tf.distribute.DistributedIterator`
object.
For example:
>>> global_batch_size = 4
>>> epochs = 1
>>> steps_per_epoch = 1
>>> mirrored_strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([2.])).repeat(100).batch(global_batch_size)
>>> dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
>>> @tf.function(input_signature=[dist_dataset.element_spec])
... def train_step(per_replica_inputs):
... def step_fn(inputs):
... return tf.square(inputs)
... return mirrored_strategy.run(step_fn, args=(per_replica_inputs,))
>>> for _ in range(epochs):
... iterator = iter(dist_dataset)
... for _ in range(steps_per_epoch):
... output = train_step(next(iterator))
... print(output)
PerReplica:{
0: tf.Tensor(
[[4.]
[4.]], shape=(2, 1), dtype=float32),
1: tf.Tensor(
[[4.]
[4.]], shape=(2, 1), dtype=float32)
}
Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input)
on distributed input for more examples and caveats.
"""
def __iter__(self):
"""Creates an iterator for the `tf.distribute.DistributedDataset`.
The returned iterator implements the Python Iterator protocol.
Example usage:
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4]).repeat().batch(global_batch_size)
>>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> print(next(distributed_iterator))
PerReplica:{
0: tf.Tensor([1 2], shape=(2,), dtype=int32),
1: tf.Tensor([3 4], shape=(2,), dtype=int32)
}
Returns:
An `tf.distribute.DistributedIterator` instance for the given
`tf.distribute.DistributedDataset` object to enumerate over the
distributed data.
"""
raise NotImplementedError("Must be implemented in descendants")
@property
def element_spec(self):
"""The type specification of an element of this `tf.distribute.DistributedDataset`.
Example usage:
>>> global_batch_size = 16
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> dist_dataset.element_spec
(PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)),
PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.int32, name=None)))
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this `tf.distribute.DistributedDataset`. This returned value is
typically a `tf.distribute.DistributedValues` object and specifies the
`tf.TensorSpec` of individual components.
"""
raise NotImplementedError(
"DistributedDataset.element_spec must be implemented in descendants.")
@doc_controls.do_not_generate_docs
def reduce(self, initial_state, reduce_func):
raise NotImplementedError(
"DistributedDataset.reduce must be implemented in descendants.")
class InputWorkers(object):
"""A 1-to-many mapping from input worker devices to compute devices."""
def __init__(self, worker_device_pairs):
"""Initialize an `InputWorkers` object.
Args:
worker_device_pairs: A sequence of pairs:
`(input device, a tuple of compute devices fed by that input device)`.
"""
self._worker_device_pairs = worker_device_pairs
self._input_worker_devices = tuple(d for d, _ in self._worker_device_pairs)
self._fed_devices = tuple(tuple(device_util.canonicalize(d) for d in f)
for _, f in self._worker_device_pairs)
@property
def num_workers(self):
return len(self._input_worker_devices)
@property
def worker_devices(self):
return self._input_worker_devices
def compute_devices_for_worker(self, worker_index):
return self._fed_devices[worker_index]
def __repr__(self):
devices = self.worker_devices
debug_repr = ",\n".join(" %d %s: %s" %
(i, devices[i], self._fed_devices[i])
for i in range(len(devices)))
return "%s:{\n%s}" % (self.__class__.__name__, debug_repr)
def serialize(self):
return self._worker_device_pairs
def deserialize(self, worker_device_pairs):
return InputWorkers(worker_device_pairs)
def _get_next_as_optional(iterator, strategy, return_per_replica=False):
"""Returns an empty dataset indicator and the next input from the iterator.
Args:
iterator: a DistributedIterator object.
strategy: the `tf.distribute.Strategy` instance.
return_per_replica: a boolean. If True, the returned data will be wrapped
with `PerReplica` structure. Otherwise it is a 2D
num_input_workers*num_replicas_per_worker list.
Returns:
A tuple (a boolean tensor indicating whether the next batch has value
globally, data from all replicas).
"""
replicas = []
worker_has_values = []
worker_devices = []
for i, worker in enumerate(iterator._input_workers.worker_devices): # pylint: disable=protected-access
with ops.device(worker):
worker_has_value, next_element = (
iterator._iterators[i].get_next_as_list()) # pylint: disable=protected-access
# Collective all-reduce requires explicit devices for inputs.
with ops.device("/cpu:0"):
# Converting to integers for all-reduce.
worker_has_value = math_ops.cast(worker_has_value, dtypes.int64)
worker_devices.append(worker_has_value.device)
worker_has_values.append(worker_has_value)
# Make `replicas` a flat list of values across all replicas.
replicas.append(next_element)
if return_per_replica:
flattened_data = []
for per_worker_data in replicas:
flattened_data.extend(per_worker_data)
replicas = _create_per_replica(flattened_data, strategy)
# Run an all-reduce to see whether any worker has values.
# TODO(b/131423105): we should be able to short-cut the all-reduce in some
# cases.
if getattr(strategy.extended, "_support_per_replica_values", True):
# `reduce` expects a `PerReplica`, so we pass it one, even
# though it doesn't actually have a value per replica
worker_has_values = values.PerReplica(worker_has_values)
global_has_value = strategy.reduce(
reduce_util.ReduceOp.SUM, worker_has_values, axis=None)
else:
assert len(worker_has_values) == 1
global_has_value = worker_has_values[0]
global_has_value = array_ops.reshape(
math_ops.cast(global_has_value, dtypes.bool), [])
return global_has_value, replicas
def _is_statically_shaped(element_spec):
"""Test if an iterator output is statically shaped.
For sparse and ragged tensors this only tests the batch dimension.
Args:
element_spec: a nest structure of `tf.TypeSpec`. The element spec of the
dataset of the iterator.
Returns:
True if the shape is static, false otherwise.
"""
for spec in nest.flatten(element_spec):
if isinstance(
spec, (sparse_tensor.SparseTensorSpec, ragged_tensor.RaggedTensorSpec)):
# For sparse or ragged tensor, we should only check the first
# dimension in order to get_next_as_optional. This is because
# when these tensors get batched by dataset only the batch dimension
# is set.
if spec.shape.rank > 0 and spec.shape.as_list()[0] is None:
return False
else:
for component in nest.flatten(spec._component_specs): # pylint: disable=protected-access
if not component.shape.is_fully_defined():
return False
return True
class DistributedIteratorBase(DistributedIteratorInterface):
"""Common implementation for all input iterators."""
# pylint: disable=super-init-not-called
def __init__(self, input_workers, iterators, strategy,
enable_get_next_as_optional):
assert isinstance(input_workers, InputWorkers)
if not input_workers.worker_devices:
raise ValueError("Should have at least one worker for input iterator.")
self._iterators = iterators
self._input_workers = input_workers
self._strategy = strategy
self._enable_get_next_as_optional = enable_get_next_as_optional
def next(self):
return self.__next__()
def __next__(self):
try:
return self.get_next()
except errors.OutOfRangeError:
raise StopIteration
def __iter__(self):
return self
def get_next_as_optional(self):
global_has_value, replicas = _get_next_as_optional(
self, self._strategy, return_per_replica=True)
def return_none():
return optional_ops.Optional.empty(self._element_spec)
return control_flow_ops.cond(
global_has_value, lambda: optional_ops.Optional.from_value(replicas),
return_none)
def get_next(self, name=None):
"""Returns the next input from the iterator for all replicas."""
if not self._enable_get_next_as_optional:
replicas = []
for i, worker in enumerate(self._input_workers.worker_devices):
if name is not None:
d = tf_device.DeviceSpec.from_string(worker)
new_name = "%s_%s_%d" % (name, d.job, d.task)
else:
new_name = None
with ops.device(worker):
# Make `replicas` a flat list of values across all replicas.
replicas.extend(
self._iterators[i].get_next_as_list_static_shapes(new_name))
return _create_per_replica(replicas, self._strategy)
out_of_range_replicas = []
def out_of_range_fn(worker_index, device):
"""This function will throw an OutOfRange error."""
# As this will be only called when there is no data left, so calling
# get_next() will trigger an OutOfRange error.
data = self._iterators[worker_index].get_next(device)
out_of_range_replicas.append(data)
return data
global_has_value, replicas = _get_next_as_optional(
self, self._strategy, return_per_replica=False)
results = []
for i, worker in enumerate(self._input_workers.worker_devices):
with ops.device(worker):
devices = self._input_workers.compute_devices_for_worker(i)
for j, device in enumerate(devices):
with ops.device(device):
# pylint: disable=undefined-loop-variable
# pylint: disable=cell-var-from-loop
# It is fine for the lambda to capture variables from the loop as
# the lambda is executed in the loop as well.
result = control_flow_ops.cond(
global_has_value,
lambda: replicas[i][j],
lambda: out_of_range_fn(i, device),
strict=True,
)
# pylint: enable=cell-var-from-loop
# pylint: enable=undefined-loop-variable
results.append(result)
replicas = results
return _create_per_replica(replicas, self._strategy)
class DistributedIteratorV1(DistributedIteratorBase):
"""Input Iterator for a distributed dataset."""
# We need a private initializer method for re-initializing multidevice
# iterators when used with Keras training loops. If we don't reinitialize the
# iterator we run into memory leak issues (b/123315763).
@property
def _initializer(self):
init_ops = []
for it in self._iterators:
init_ops.extend(it.initialize())
return control_flow_ops.group(init_ops)
@deprecated(None, "Use the iterator's `initializer` property instead.")
def initialize(self):
"""Initialize underlying iterators.
Returns:
A list of any initializer ops that should be run.
"""
return self._initializer
@property
def initializer(self):
"""Returns a list of ops that initialize the iterator."""
return self.initialize()
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_classes(self):
return self._iterators[0].output_classes
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_shapes(self):
return self._iterators[0].output_shapes
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_types(self):
return self._iterators[0].output_types
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
def get_iterator(self, worker):
for i, w in enumerate(self._input_workers.worker_devices):
if worker == w:
return self._iterators[i]
return None
@property
def element_spec(self):
"""The type specification of an element of this iterator."""
return self._element_spec
class DistributedDatasetAndIteratorSpec(type_spec.TypeSpec):
"""Common Type specification for `DistributedDataset and DistributedDatasetsFromFunction."""
__slots__ = [
"_input_workers", "_element_spec", "_strategy",
"_enable_get_next_as_optional", "_options"
]
def __init__(self,
input_workers,
element_spec,
strategy,
options,
enable_get_next_as_optional=None):
# We don't want to allow deserialization of this class because we don't
# serialize the strategy object. Currently the only places where
# _deserialize is called is when we save/restore using SavedModels.
if isinstance(input_workers, tuple):
raise NotImplementedError("DistributedIteratorSpec does not have support "
"for deserialization.")
else:
self._input_workers = input_workers
self._element_spec = element_spec
self._strategy = strategy
self._enable_get_next_as_optional = enable_get_next_as_optional
self._options = options
def _serialize(self):
# We cannot serialize the strategy object so we convert it to an id that we
# can use for comparison.
return (self._input_workers.serialize(), self._element_spec,
id(self._strategy), id(self._options))
def _deserialize(self):
raise ValueError(
f"Deserialization is currently unsupported for {type(self)}.")
def sanity_check_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
if type(self) is not type(other):
raise ValueError("No TypeSpec is compatible with both %s and %s" %
(self, other))
if self._input_workers.serialize() != other._input_workers.serialize():
raise ValueError("_input_workers is not compatible with both %s "
"and %s" % (self, other))
if self._strategy is not other._strategy:
raise ValueError("tf.distribute strategy is not compatible with both %s "
"and %s" % (self, other))
class DistributedIteratorSpec(DistributedDatasetAndIteratorSpec):
"""Type specification for `DistributedIterator`."""
def __init__(self, input_workers, element_spec, strategy,
enable_get_next_as_optional, options):
super(DistributedIteratorSpec,
self).__init__(input_workers, element_spec, strategy, options,
enable_get_next_as_optional)
@property
def value_type(self):
return DistributedIterator
# Overriding this method so that we can merge and reconstruct the spec object
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
self.sanity_check_type(other)
element_spec = nest.map_structure(
lambda a, b: a.most_specific_compatible_type(b), self._element_spec,
other._element_spec)
return DistributedIteratorSpec(self._input_workers, element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
@property
def _component_specs(self):
specs = []
worker_device_pairs = self._input_workers._worker_device_pairs # pylint: disable=protected-access
for i, (input_device, compute_devices) in enumerate(worker_device_pairs):
element_spec = nest.map_structure(
functools.partial(_replace_per_replica_spec, i=i), self._element_spec)
specs.append(
_SingleWorkerDatasetIteratorSpec(input_device, compute_devices,
element_spec, self._options))
return specs
def _to_components(self, value):
return value._iterators # pylint: disable=protected-access
def _from_components(self, components):
return DistributedIterator(
input_workers=self._input_workers,
iterators=None,
components=components,
element_spec=self._element_spec,
strategy=self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return DistributedIteratorSpec(value._input_workers, value._element_spec,
value._strategy,
value._enable_get_next_as_optional,
value._options)
def _with_tensor_ranks_only(self):
element_spec = nest.map_structure(
lambda s: s._with_tensor_ranks_only(), # pylint: disable=protected-access
self._element_spec)
return DistributedIteratorSpec(self._input_workers, element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
class DistributedIterator(DistributedIteratorBase,
composite_tensor.CompositeTensor):
"""Input Iterator for a distributed dataset."""
def __init__(self,
input_workers=None,
iterators=None,
strategy=None,
components=None,
element_spec=None,
enable_get_next_as_optional=False,
options=None):
if input_workers is None:
raise ValueError("`input_workers` should be "
"provided.")
error_message = ("Either `input_workers` or "
"both `components` and `element_spec` need to be "
"provided.")
self._options = options
if iterators is None:
if (components is None or element_spec is None):
raise ValueError(error_message)
self._element_spec = element_spec
self._input_workers = input_workers
self._iterators = components
self._strategy = strategy
self._enable_get_next_as_optional = enable_get_next_as_optional
else:
if (components is not None and element_spec is not None):
raise ValueError(error_message)
super(DistributedIterator,
self).__init__(input_workers, iterators, strategy,
enable_get_next_as_optional)
@property
def element_spec(self):
# When partial batch handling is enabled, always set the batch dimension to
# None, otherwise we just follow element_spec of the underlying dataset
# (whose batch dimension may also be None). This is because with partial
# batching handling we could always produce empty batches.
if (self._enable_get_next_as_optional and
self._strategy.extended._in_multi_worker_mode()): # pylint: disable=protected-access
return nest.map_structure(
_rebatch_as_dynamic, self._element_spec, expand_composites=False)
return self._element_spec
@property
def _type_spec(self):
# Note that we use actual element_spec instead of the rebatched-as-dynamic
# one to create DistributedIteratorSpec, to be consistent with the
# underlying iterators' specs.
return DistributedIteratorSpec(self._input_workers, self._element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
class _IterableInput(DistributedDatasetInterface):
"""Base class for iterable inputs for distribution strategies."""
# pylint: disable=super-init-not-called
def __init__(self, input_workers):
assert isinstance(input_workers, InputWorkers)
self._input_workers = input_workers
def __iter__(self):
raise NotImplementedError("must be implemented in descendants")
def reduce(self, initial_state, reduce_fn):
"""Execute a `reduce_fn` over all the elements of the input."""
iterator = iter(self)
has_data, data = _get_next_as_optional(
iterator, self._strategy, return_per_replica=True)
def cond(has_data, data, state):
del data, state # Unused.
return has_data
def loop_body(has_data, data, state):
"""Executes `reduce_fn` in a loop till the dataset is empty."""
del has_data # Unused.
state = reduce_fn(state, data)
has_data, data = _get_next_as_optional(
iterator, self._strategy, return_per_replica=True)
return has_data, data, state
has_data, data, final_state = control_flow_ops.while_loop(
cond, loop_body, [has_data, data, initial_state], parallel_iterations=1)
return final_state
class DistributedDatasetSpec(DistributedDatasetAndIteratorSpec):
"""Type specification for `DistributedDataset."""
def __init__(self, input_workers, element_spec, strategy,
enable_get_next_as_optional, options):
super(DistributedDatasetSpec,
self).__init__(input_workers, element_spec, strategy, options,
enable_get_next_as_optional)
@property
def value_type(self):
return DistributedDataset
# Overriding this method so that we can merge and reconstruct the spec object
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
self.sanity_check_type(other)
element_spec = nest.map_structure(
lambda a, b: a.most_specific_compatible_type(b), self._element_spec,
other._element_spec)
return DistributedDatasetSpec(self._input_workers, element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
@property
def _component_specs(self):
specs = []
worker_device_pairs = self._input_workers._worker_device_pairs # pylint: disable=protected-access
for i, _ in enumerate(worker_device_pairs):
element_spec = nest.map_structure(
functools.partial(_replace_per_replica_spec, i=i), self._element_spec)
specs.append(dataset_ops.DatasetSpec(element_spec))
return specs
def _to_components(self, value):
return value._cloned_datasets # pylint: disable=protected-access
def _from_components(self, components):
return DistributedDataset(
input_workers=self._input_workers,
strategy=self._strategy,
components=components,
element_spec=self._element_spec,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return DistributedDatasetSpec(value._input_workers, value._element_spec,
value._strategy,
value._enable_get_next_as_optional,
value._options)
class DistributedDataset(_IterableInput, composite_tensor.CompositeTensor):
"""Distributed dataset that supports prefetching to multiple devices."""
def __init__(self,
input_workers,
strategy,
dataset=None,
num_replicas_in_sync=None,
input_context=None,
components=None,
element_spec=None,
enable_get_next_as_optional=None,
options=None):
"""Distribute the dataset on all workers.
If `num_replicas_in_sync` is not None, we split each batch of the dataset
into `num_replicas_in_sync` smaller batches, to be distributed among that
worker's replicas, so that the batch size for a global step (across all
workers and replicas) is as expected.
Args:
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
dataset: `tf.data.Dataset` that will be used as the input source. Either
dataset or components field should be passed when constructing
DistributedDataset. Use this when contructing DistributedDataset from a
new `tf.data.Dataset`. Use components when constructing using
DistributedDatasetSpec.
num_replicas_in_sync: Optional integer. If this is not None, the value
is used to decide how to rebatch datasets into smaller batches so that
the total batch size for each step (across all workers and replicas)
adds up to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
components: datasets when DistributedDataset is constructed from
DistributedDatasetSpec. Either field dataset or components should be
passed.
element_spec: element spec for DistributedDataset when constructing from
DistributedDatasetSpec. This will be used to set the element_spec for
DistributedDataset and verified against element_spec from components.
enable_get_next_as_optional: this is required when components is passed
instead of dataset.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
"""
super(DistributedDataset, self).__init__(input_workers=input_workers)
if input_workers is None or strategy is None:
raise ValueError("input_workers and strategy are required arguments")
if dataset is not None and components is not None:
raise ValueError("Only one of dataset or components should be present")
if dataset is None and components is None:
raise ValueError("At least one of dataset or components should be passed")
if dataset is not None:
self._create_cloned_datasets_from_dataset(dataset, input_context,
input_workers, strategy,
num_replicas_in_sync)
else:
if enable_get_next_as_optional is None:
raise ValueError(
"When constructing DistributedDataset with components, " +
"enable_get_next_as_optional should also be passed")
self._cloned_datasets = components
self._enable_get_next_as_optional = enable_get_next_as_optional
self._input_workers = input_workers
self._strategy = strategy
self._options = options
if element_spec is not None:
if element_spec != _create_distributed_tensor_spec(
self._strategy, self._cloned_datasets[0].element_spec):
raise ValueError("Mismatched element_spec from the passed components")
self._element_spec = element_spec
else:
self._element_spec = _create_distributed_tensor_spec(
self._strategy, self._cloned_datasets[0].element_spec)
def _create_cloned_datasets_from_dataset(self, dataset, input_context,
input_workers, strategy,
num_replicas_in_sync):
# We clone and shard the dataset on each worker. The current setup tries to
# shard the dataset by files if possible so that each worker sees a
# different subset of files. If that is not possible, will attempt to shard
# the final input such that each worker will run the entire preprocessing
# pipeline and only receive its own shard of the dataset.
# Additionally, we rebatch the dataset on each worker into
# `num_replicas_in_sync` smaller batches to be distributed among that
# worker's replicas, so that the batch size for a global step (across all
# workers and replicas) adds up to the original dataset's batch size.
if num_replicas_in_sync is not None:
num_workers = input_context.num_input_pipelines if input_context else len(
input_workers.worker_devices)
rebatch_fn = self._make_rebatch_fn(dataset, num_workers,
num_replicas_in_sync)
else:
rebatch_fn = None
self._cloned_datasets = []
if input_context:
# Between-graph where we rely on the input_context for sharding
assert input_workers.num_workers == 1
if rebatch_fn is not None:
dataset = rebatch_fn(dataset, input_context.input_pipeline_id)
dataset = input_ops.auto_shard_dataset(dataset,
input_context.num_input_pipelines,
input_context.input_pipeline_id,
num_replicas_in_sync)
self._cloned_datasets.append(dataset)
else:
replicated_ds = distribute.replicate(dataset,
input_workers.worker_devices)
for i, worker in enumerate(input_workers.worker_devices):
with ops.device(worker):
cloned_dataset = replicated_ds[worker]
cloned_dataset = cloned_dataset.with_options(dataset.options())
if rebatch_fn is not None:
cloned_dataset = rebatch_fn(cloned_dataset, i)
cloned_dataset = input_ops.auto_shard_dataset(
cloned_dataset, len(input_workers.worker_devices), i,
num_replicas_in_sync)
self._cloned_datasets.append(cloned_dataset)
self._enable_get_next_as_optional = _enable_get_next_as_optional(
strategy, dataset)
def _make_rebatch_fn(self, dataset, num_workers, num_replicas_in_sync):
"""Returns a callable that rebatches the input dataset.
Args:
dataset: A `tf.data.Dataset` representing the dataset to be distributed.
num_workers: An integer representing the number of workers to distribute
`dataset` among.
num_replicas_in_sync: An integer representing the number of replicas in
sync across all workers.
"""
if num_replicas_in_sync % num_workers:
raise ValueError(
"tf.distribute expects every worker to have the same number of "
"replicas. However, encountered `num_replicas_in_sync` ({}) that "
"cannot be divided by `num_workers` ({})".format(
num_replicas_in_sync, num_workers))
num_replicas_per_worker = num_replicas_in_sync // num_workers
with ops.colocate_with(dataset._variant_tensor): # pylint: disable=protected-access
batch_size = distribute.compute_batch_size(dataset)
def rebatch_fn(dataset, worker_index):
try:
# pylint: disable=protected-access
def apply_rebatch():
batch_sizes = distribute.batch_sizes_for_worker(
batch_size, num_workers, num_replicas_per_worker, worker_index)
return distribute._RebatchDataset(
dataset, batch_sizes).prefetch(num_replicas_per_worker)
def apply_legacy_rebatch():
return distribute._LegacyRebatchDataset(
dataset, num_replicas_in_sync).prefetch(num_replicas_per_worker)
with ops.colocate_with(dataset._variant_tensor):
return control_flow_ops.cond(
math_ops.not_equal(batch_size, -1),
true_fn=apply_rebatch,
false_fn=apply_legacy_rebatch)
except errors.InvalidArgumentError as e:
if "without encountering a batch" in str(e):
six.reraise(
ValueError,
ValueError(
"Call the `batch` method on the input Dataset in order to be "
"able to split your input across {} replicas.\n Please see "
"the tf.distribute.Strategy guide. {}".format(
num_replicas_in_sync, e)),
sys.exc_info()[2])
else:
raise
return rebatch_fn
def __iter__(self):
if not (context.executing_eagerly() or
ops.get_default_graph().building_function):
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
# This is an optional flag that can be used to turn off using
# OwnedMultiDeviceIterators and instead use the legacy MultiDeviceIterators
# as a stop gap solution that will allow us to roll out this change.
enable_legacy_iterators = getattr(self._strategy,
"_enable_legacy_iterators", False)
worker_iterators = _create_iterators_per_worker(self._cloned_datasets,
self._input_workers,
enable_legacy_iterators,
self._options)
if enable_legacy_iterators:
iterator = DistributedIteratorV1(
self._input_workers,
worker_iterators,
self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional)
else:
iterator = DistributedIterator(
self._input_workers,
worker_iterators,
self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync point
# here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
@property
def element_spec(self):
"""The type specification of an element of this dataset."""
# When partial batch handling is enabled, always set the batch dimension to
# None, otherwise we just follow element_spec of the underlying dataset
# (whose batch dimension may also be None). This is because with partial
# batching handling we could always produce empty batches.
if (self._enable_get_next_as_optional and
self._strategy.extended._in_multi_worker_mode()): # pylint: disable=protected-access
return nest.map_structure(
_rebatch_as_dynamic, self._element_spec, expand_composites=False)
return self._element_spec
@property
def _type_spec(self):
return DistributedDatasetSpec(self._input_workers, self._element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
class DistributedDatasetV1(DistributedDataset):
"""Distributed dataset that supports prefetching to multiple devices."""
def __init__(self,
dataset,
input_workers,
strategy,
num_replicas_in_sync=None,
input_context=None,
options=None):
self._input_workers = input_workers
super(DistributedDatasetV1, self).__init__(
input_workers,
strategy,
dataset,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context,
options=options)
def make_one_shot_iterator(self):
"""Get a one time use iterator for DistributedDatasetV1.
Note: This API is deprecated. Please use `for ... in dataset:` to iterate
over the dataset or `iter` to create an iterator.
Returns:
A DistributedIteratorV1 instance.
"""
return self._make_one_shot_iterator()
def _make_one_shot_iterator(self):
"""Get an iterator for DistributedDatasetV1."""
# Graph mode with one shot iterator is disabled because we have to call
# `initialize` on the iterator which is only required if we are using a
# tf.distribute strategy.
if not context.executing_eagerly():
raise ValueError("Cannot create a one shot iterator. Please use "
"`make_initializable_iterator()` instead.")
return self._get_iterator()
def make_initializable_iterator(self):
"""Get an initializable iterator for DistributedDatasetV1.
Note: This API is deprecated. Please use
`tf.compat.v1.data.make_initializable_iterator(dataset)` to create an
initializable iterator.
Returns:
A DistributedIteratorV1 instance.
"""
return self._make_initializable_iterator()
def _make_initializable_iterator(self, shared_name=None): # pylint: disable=unused-argument
"""Get an initializable iterator for DistributedDatasetV1."""
# Eager mode generates already initialized iterators. Hence we cannot create
# an initializable iterator.
if context.executing_eagerly():
raise ValueError("Cannot create initializable iterator in Eager mode. "
"Please use `iter()` instead.")
return self._get_iterator()
def _get_iterator(self):
worker_iterators = _create_iterators_per_worker(self._cloned_datasets,
self._input_workers, True,
self._options)
iterator = DistributedIteratorV1(self._input_workers, worker_iterators,
self._strategy,
self._enable_get_next_as_optional)
iterator._element_spec = self.element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync point
# here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
def __iter__(self):
if (ops.executing_eagerly_outside_functions() or
ops.get_default_graph().building_function):
return self._get_iterator()
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
class DistributedDatasetsFromFunctionSpec(DistributedDatasetAndIteratorSpec):
"""Type specification for `DistributedDatasetsFromFunction."""
def __init__(self, input_workers, element_spec, strategy, options):
super(DistributedDatasetsFromFunctionSpec,
self).__init__(input_workers, element_spec, strategy, options)
@property
def value_type(self):
return DistributedDatasetsFromFunction
@property
def _component_specs(self):
specs = []
worker_device_pairs = self._input_workers._worker_device_pairs # pylint: disable=protected-access
for i, _ in enumerate(worker_device_pairs):
element_spec = nest.map_structure(
functools.partial(_replace_per_replica_spec, i=i), self._element_spec)
specs.append(dataset_ops.DatasetSpec(element_spec))
return specs
# Overriding this method so that we can merge and reconstruct the spec object
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
self.sanity_check_type(other)
element_spec = nest.map_structure(
lambda a, b: a.most_specific_compatible_type(b), self._element_spec,
other._element_spec) # pylint: disable=protected-access
return DistributedDatasetsFromFunctionSpec(self._input_workers,
element_spec, self._strategy,
self._options)
def _to_components(self, value):
return value._datasets # pylint: disable=protected-access
def _from_components(self, components):
return DistributedDatasetsFromFunction(
input_workers=self._input_workers,
strategy=self._strategy,
components=components,
element_spec=self._element_spec,
options=self._options)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return DistributedDatasetsFromFunctionSpec(
input_workers=value._input_workers,
element_spec=value._element_spec,
strategy=value._strategy,
options=value._options)
# TODO(priyag): Add other replication modes.
class DistributedDatasetsFromFunction(_IterableInput,
composite_tensor.CompositeTensor):
"""Inputs created from dataset function."""
def __init__(self,
input_workers,
strategy,
input_contexts=None,
dataset_fn=None,
options=None,
components=None,
element_spec=None):
"""Makes an iterable from datasets created by the given function.
Args:
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `dataset_fn`. Length and order should match worker order in
`worker_device_pairs`.
dataset_fn: A function that returns a `Dataset` given an `InputContext`.
Either dataset_fn or components should be passed to construct
DistributedDatasetsFromFunction. Use this when contructing
DistributedDataset using a function. Use components when constructing
using DistributedDatasetsFromFunctionSpec.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
components: datasets when DistributedDatasetsFromFunction is constructed
from DistributedDatasetsFromFunctionSpec. Only one of dataset or
components should be passed.
element_spec: element spec for DistributedDataset when constructing from
DistributedDatasetSpec. This will be used to set the element_spec for
DistributedDatasetsFromFunctionSpec and verified against element_spec
from components.
"""
super(DistributedDatasetsFromFunction, self).__init__(
input_workers=input_workers)
self._input_workers = input_workers
self._strategy = strategy
self._options = options
if dataset_fn is not None and components is not None:
raise ValueError("Only one of dataset_fn or components should be set")
if dataset_fn is None and components is None:
raise ValueError("At least one of dataset_fn or components should be set")
if dataset_fn is not None:
if input_workers.num_workers != len(input_contexts):
raise ValueError(
"Number of input workers (%d) is not same as number of "
"input_contexts (%d)" %
(input_workers.num_workers, len(input_contexts)))
self._datasets, element_spec = (
_create_datasets_from_function_with_input_context(
input_contexts, self._input_workers, dataset_fn))
self._element_spec = _create_distributed_tensor_spec(
self._strategy, element_spec)
else:
if element_spec is None:
raise ValueError(
"element_spec should also be passed when passing components")
self._element_spec = element_spec
self._datasets = components
self._enable_get_next_as_optional = _enable_get_next_as_optional(
self._strategy, self._datasets[0])
def __iter__(self):
if (ops.executing_eagerly_outside_functions() or
ops.get_default_graph().building_function):
# This is an optional flag that can be used to turn off using
# OwnedMultiDeviceIterators and instead use the legacy
# MultiDeviceIterators as a stop gap solution that will allow us to roll
# out this change.
enable_legacy_iterators = getattr(self._strategy,
"_enable_legacy_iterators", False)
iterators = _create_iterators_per_worker(self._datasets,
self._input_workers,
enable_legacy_iterators,
self._options)
if enable_legacy_iterators:
iterator = DistributedIteratorV1(
self._input_workers,
iterators,
self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional)
else:
iterator = DistributedIterator(
input_workers=self._input_workers,
iterators=iterators,
strategy=self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync
# point here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
@property
def element_spec(self):
"""The type specification of an element of this dataset."""
# When partial batch handling is enabled, always set the batch dimension to
# None, otherwise we just follow element_spec of the underlying dataset
# (whose batch dimension may also be None). This is because with partial
# batching handling we could always produce empty batches.
if (self._enable_get_next_as_optional and
self._strategy.extended._in_multi_worker_mode()): # pylint: disable=protected-access
return nest.map_structure(
_rebatch_as_dynamic, self._element_spec, expand_composites=False)
return self._element_spec
@property
def _type_spec(self):
return DistributedDatasetsFromFunctionSpec(self._input_workers,
self._element_spec,
self._strategy, self._options)
class DistributedDatasetsFromFunctionV1(DistributedDatasetsFromFunction):
"""Inputs created from dataset function."""
def _make_initializable_iterator(self, shared_name=None):
"""Get an initializable iterator for DistributedDatasetsFromFunctionV1."""
del shared_name # Unused
# Eager mode generates already initialized iterators. Hence we cannot create
# an initializable iterator.
if context.executing_eagerly():
raise ValueError("Cannot create initializable iterator in Eager mode. "
"Please use `iter()` instead.")
return self._get_iterator()
def _make_one_shot_iterator(self):
"""Get an iterator for iterating over DistributedDatasetsFromFunctionV1."""
# Graph mode with one shot iterator is disabled because we have to call
# `initialize` on the iterator which is only required if we are using a
# tf.distribute strategy.
if not context.executing_eagerly():
raise ValueError("Cannot create a one shot iterator. Please use "
"`make_initializable_iterator()` instead.")
return self._get_iterator()
def _get_iterator(self):
iterators = _create_iterators_per_worker(self._datasets,
self._input_workers, True,
self._options)
iterator = DistributedIteratorV1(self._input_workers, iterators,
self._strategy,
self._enable_get_next_as_optional)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync point
# here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
def __iter__(self):
if (ops.executing_eagerly_outside_functions() or
ops.get_default_graph().building_function):
return self._get_iterator()
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
# TODO(anjalisridhar): This class will be soon removed in favor of newer
# APIs.
class InputFunctionIterator(DistributedIteratorV1):
"""Iterator created from input function."""
def __init__(self, input_fn, input_workers, input_contexts, strategy):
"""Make an iterator for input provided via an input function.
Currently implements PER_WORKER mode, in which the `input_fn` is called
once on each worker.
TODO(priyag): Add other replication modes.
Args:
input_fn: Input function that returns a `tf.data.Dataset` object.
input_workers: an `InputWorkers` object.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `input_fn`. Length and order should match worker order in
`worker_device_pairs`.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
"""
assert isinstance(input_workers, InputWorkers)
if input_workers.num_workers != len(input_contexts):
raise ValueError(
"Number of input workers (%d) is not same as number of "
"input_contexts (%d)" %
(input_workers.num_workers, len(input_contexts)))
iterators = []
for i, ctx in enumerate(input_contexts):
worker = input_workers.worker_devices[i]
with ops.device(worker):
result = input_fn(ctx)
devices = input_workers.compute_devices_for_worker(i)
if isinstance(result, dataset_ops.DatasetV2):
iterator = _SingleWorkerDatasetIterator(result, worker, devices)
elif callable(result):
iterator = _SingleWorkerCallableIterator(result, worker, devices)
else:
raise ValueError(
"input_fn must return a tf.data.Dataset or a callable.")
iterators.append(iterator)
super(InputFunctionIterator, self).__init__(
input_workers, iterators, strategy, enable_get_next_as_optional=False)
self._enable_get_next_as_optional = False
# TODO(anjalisridhar): This class will soon be removed and users should move
# to using DistributedIterator.
class DatasetIterator(DistributedIteratorV1):
"""Iterator created from input dataset."""
def __init__(self,
dataset,
input_workers,
strategy,
num_replicas_in_sync=None,
input_context=None):
"""Make an iterator for the dataset on given devices.
If `num_replicas_in_sync` is not None, we split each batch of the dataset
into `num_replicas_in_sync` smaller batches, to be distributed among that
worker's replicas, so that the batch size for a global step (across all
workers and replicas) is as expected.
Args:
dataset: `tf.data.Dataset` that will be used as the input source.
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
num_replicas_in_sync: Optional integer. If this is not None, the value is
used to decide how to rebatch datasets into smaller batches so that the
total batch size for each step (across all workers and replicas) adds up
to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
"""
dist_dataset = DistributedDatasetV1(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
worker_iterators = _create_iterators_per_worker(
dist_dataset._cloned_datasets, input_workers, True) # pylint: disable=protected-access
super(DatasetIterator,
self).__init__(input_workers, worker_iterators, strategy,
dist_dataset._enable_get_next_as_optional) # pylint: disable=protected-access
self._element_spec = dist_dataset.element_spec
def _dummy_tensor_fn(value_structure):
"""A function to create dummy tensors from `value_structure`."""
def create_dummy_tensor(spec):
"""Create a dummy tensor with possible batch dimensions set to 0."""
if isinstance(spec, ragged_tensor.RaggedTensorSpec):
# Splice out the ragged dimensions.
# pylint: disable=protected-access
feature_shape = spec._shape[:1].concatenate(
spec._shape[(1 + spec._ragged_rank):])
feature_type = spec._dtype
# pylint: enable=protected-access
else:
feature_shape = spec.shape
feature_type = spec.dtype
# Ideally we should set the batch dimension to 0, however as in
# DistributionStrategy we don't know the batch dimension, we try to
# guess it as much as possible. If the feature has unknown dimensions, we
# will set them to 0. If the feature shape is already static, we guess the
# first dimension as batch dimension and set it to 0.
dims = ([dim if dim is not None else 0 for dim in feature_shape.as_list()]
if feature_shape else [])
if dims and (isinstance(spec, ragged_tensor.RaggedTensorSpec) or
feature_shape.is_fully_defined()):
dims[0] = tensor_shape.Dimension(0)
if isinstance(spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensor(
values=array_ops.zeros(0, feature_type),
indices=array_ops.zeros((0, len(dims)), dtypes.int64),
dense_shape=dims)
# Create the dummy tensor.
dummy_tensor = array_ops.zeros(tensor_shape.TensorShape(dims), feature_type)
if isinstance(spec, ragged_tensor.RaggedTensorSpec):
# Reinsert the ragged dimensions with size 0.
# pylint: disable=protected-access
row_splits = array_ops.zeros(1, spec._row_splits_dtype)
dummy_tensor = ragged_tensor.RaggedTensor.from_nested_row_splits(
dummy_tensor, (row_splits,) * spec._ragged_rank, validate=False)
# pylint: enable=protected-access
return dummy_tensor
return nest.map_structure(create_dummy_tensor, value_structure)
def _recover_shape_fn(data, value_structure):
"""Recover the shape of `data` the same as shape of `value_structure`."""
flattened_data = nest.flatten(data)
for i, spec in enumerate(nest.flatten(value_structure)):
for target, source in zip(
nest.flatten(flattened_data[i], expand_composites=True),
nest.flatten(spec, expand_composites=True)):
target.set_shape(source.shape)
# `SparseTensor` shape is not determined by the shape of its component
# tensors. Rather, its shape depends on a tensor's values.
if isinstance(spec, sparse_tensor.SparseTensorSpec) and spec.shape:
dense_shape = spec.shape
with ops.device(flattened_data[i].op.device):
# For partially defined shapes, fill in missing values from tensor.
if not dense_shape.is_fully_defined():
dense_shape = array_ops.stack([
flattened_data[i].dense_shape[j] if dim is None else dim
for j, dim in enumerate(dense_shape.as_list())
])
flattened_data[i] = sparse_tensor.SparseTensor(
indices=flattened_data[i].indices,
values=flattened_data[i].values,
dense_shape=dense_shape)
data = nest.pack_sequence_as(data, flattened_data)
return data
class _SingleWorkerDatasetIteratorBase(object):
"""Iterator for a single `tf.data.Dataset`."""
def __init__(self, dataset, worker, devices, options=None):
"""Create iterator for the `dataset` to fetch data to worker's `devices` .
A `MultiDeviceIterator` or `OwnedMultiDeviceIterator` is used to prefetch
input to the devices on the given worker.
Args:
dataset: A `tf.data.Dataset` instance.
worker: Worker on which ops should be created.
devices: Distribute data from `dataset` to these devices.
options: options.
"""
self._dataset = dataset
self._worker = worker
self._devices = devices
self._element_spec = dataset.element_spec
self._options = options
self._make_iterator()
def _make_iterator(self):
raise NotImplementedError("must be implemented in descendants")
def _format_data_list_with_options(self, data_list):
"""Change the data in to a list type if required.
The OwnedMultiDeviceIterator returns the list data type,
while the PER_REPLICA iterator (when used with prefetch disabled)
returns without the enclosed list. This is to fix the inconsistency.
Args:
data_list: data_list
Returns:
list
"""
if (self._options and self._options.experimental_replication_mode ==
InputReplicationMode.PER_REPLICA and
not self._options.experimental_fetch_to_device):
return [data_list]
else:
return data_list
def get_next(self, device, name=None):
"""Get next element for the given device."""
del name
with ops.device(self._worker):
if _should_use_multi_device_iterator(self._options):
return self._iterator.get_next(device)
else:
return self._iterator.get_next()
def get_next_as_list_static_shapes(self, name=None):
"""Get next element from the underlying iterator.
Runs the iterator get_next() within a device scope. Since this doesn't use
get_next_as_optional(), it is considerably faster than get_next_as_list()
(but can only be used when the shapes are static).
Args:
name: not used.
Returns:
A list consisting of the next data from each device.
"""
del name
with ops.device(self._worker):
return self._format_data_list_with_options(self._iterator.get_next())
def get_next_as_list(self, name=None):
"""Get next element from underlying iterator.
If there is no data left, a list of dummy tensors with possible batch
dimensions set to 0 will be returned. Use of get_next_as_optional() and
extra logic adds overhead compared to get_next_as_list_static_shapes(), but
allows us to handle non-static shapes.
Args:
name: not used.
Returns:
A boolean tensor indicates whether there is any data in next element and
the real data as the next element or a list of dummy tensors if no data
left.
"""
del name
with ops.device(self._worker):
data_list = self._format_data_list_with_options(
self._iterator.get_next_as_optional())
result = []
for i, data in enumerate(data_list):
# Place the condition op in the same device as the data so the data
# doesn't need to be sent back to the worker.
with ops.device(self._devices[i]):
# Data will be fetched in order, so we only need to check if the first
# replica has value to see whether there is data left for this single
# worker.
if i == 0:
worker_has_value = data.has_value()
# pylint: disable=unnecessary-lambda
# pylint: disable=cell-var-from-loop
real_data = control_flow_ops.cond(
data.has_value(),
lambda: data.get_value(),
lambda: _dummy_tensor_fn(data.element_spec),
strict=True,
)
# Some dimensions in `replicas` will become unknown after we
# conditionally return the real tensors or the dummy tensors. Recover
# the shapes from `data.element_spec`. We only need to do this in
# non eager mode because we always know the runtime shape of the
# tensors in eager mode.
if not context.executing_eagerly():
real_data = _recover_shape_fn(real_data, data.element_spec)
result.append(real_data)
# pylint: enable=cell-var-from-loop
# pylint: enable=unnecessary-lambda
return worker_has_value, result
class _SingleWorkerDatasetIteratorSpec(type_spec.TypeSpec):
"""Type specification for `_SingleWorkerOwnedDatasetIterator`."""
__slots__ = ["_worker", "_devices", "_element_spec", "_options"]
def __init__(self, worker, devices, element_spec, options):
self._worker = worker
self._devices = tuple(device_util.canonicalize(d) for d in devices)
self._element_spec = element_spec
self._options = options
@property
def value_type(self):
return _SingleWorkerOwnedDatasetIterator
def _serialize(self):
return (self._worker, self._devices, self._element_spec, self._options)
@property
def _component_specs(self):
specs = []
if _should_use_multi_device_iterator(self._options):
specs.append(
multi_device_iterator_ops.MultiDeviceIteratorSpec(
self._devices, self._worker, element_spec=self._element_spec))
else:
specs.append(iterator_ops.IteratorSpec(element_spec=self._element_spec))
return specs
def _to_components(self, value):
return [value._iterator] # pylint: disable=protected-access
def _from_components(self, components):
return _SingleWorkerOwnedDatasetIterator(
dataset=None,
worker=self._worker,
devices=self._devices,
components=components,
element_spec=self._element_spec,
options=self._options)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return _SingleWorkerDatasetIteratorSpec(value._worker, value._devices,
value._element_spec, value._options)
class _SingleWorkerOwnedDatasetIterator(_SingleWorkerDatasetIteratorBase,
composite_tensor.CompositeTensor):
"""Iterator for a DistributedDataset instance."""
def __init__(self,
dataset=None,
worker=None,
devices=None,
components=None,
element_spec=None,
options=None):
"""Create iterator for the `dataset` to fetch data to worker's `devices` .
`OwnedMultiDeviceIterator` is used to prefetch input to the devices on the
given worker. The lifetime of this iterator is tied to the encompassing
python object. Once we go out of scope of the python object or return from
a tf.function the underlying iterator resource is deleted.
Args:
dataset: A `tf.data.Dataset` instance.
worker: Worker on which ops should be created.
devices: Distribute data from `dataset` to these devices.
components: Tensor components to construct the
_SingleWorkerOwnedDatasetIterator from.
element_spec: A nested structure of `TypeSpec` objects that represents the
type specification of elements of the iterator.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
"""
if worker is None or devices is None:
raise ValueError("Both `worker` and `devices` should be provided")
error_message = ("Either `dataset` or both `components` and `element_spec` "
"need to be provided.")
self._options = options
if dataset is None:
if (components is None or element_spec is None):
raise ValueError(error_message)
self._element_spec = element_spec
self._worker = worker
self._devices = devices
self._iterator = components[0]
else:
if (components is not None or element_spec is not None):
raise ValueError(error_message)
super(_SingleWorkerOwnedDatasetIterator,
self).__init__(dataset, worker, devices, self._options)
def _make_iterator(self):
"""Make appropriate iterator on the dataset."""
if not self._worker:
raise ValueError("Worked device must be specified when creating an "
"owned iterator.")
if _should_use_multi_device_iterator(self._options):
host_device = device_util.get_host_for_device(self._worker)
with ops.device(self._worker):
if self._options is not None:
self._iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
self._dataset,
self._devices,
source_device=host_device,
max_buffer_size=self._options
.experimental_per_replica_buffer_size,
prefetch_buffer_size=self._options
.experimental_per_replica_buffer_size)
else:
self._iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
self._dataset, self._devices, source_device=host_device)
else:
with ops.device(self._worker):
self._iterator = iter(self._dataset)
@property
def element_spec(self):
return self._element_spec
@property
def _type_spec(self):
return _SingleWorkerDatasetIteratorSpec(self._worker, self._devices,
self._element_spec, self._options)
@property
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._element_spec)
@property
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._element_spec)
@property
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._element_spec)
class _SingleWorkerDatasetIterator(_SingleWorkerDatasetIteratorBase):
"""Iterator for a single DistributedDatasetV1 instance."""
def _make_iterator(self):
"""Make appropriate iterator on the dataset."""
with ops.device(self._worker):
if self._options is not None:
self._iterator = multi_device_iterator_ops.MultiDeviceIterator(
self._dataset,
self._devices,
max_buffer_size=self._options.experimental_per_replica_buffer_size,
prefetch_buffer_size=self._options
.experimental_per_replica_buffer_size)
else:
self._iterator = multi_device_iterator_ops.MultiDeviceIterator(
self._dataset,
self._devices,
)
def initialize(self):
"""Initialize underlying iterator.
In eager execution, this simply recreates the underlying iterator.
In graph execution, it returns the initializer ops for the underlying
iterator.
Returns:
A list of any initializer ops that should be run.
"""
if ops.executing_eagerly_outside_functions():
self._iterator._eager_reset() # pylint: disable=protected-access
return []
else:
return [self._iterator.initializer]
@property
def output_classes(self):
return dataset_ops.get_legacy_output_classes(self._iterator)
@property
def output_shapes(self):
return dataset_ops.get_legacy_output_shapes(self._iterator)
@property
def output_types(self):
return dataset_ops.get_legacy_output_types(self._iterator)
class _SingleWorkerCallableIterator(object):
"""Iterator for a single tensor-returning callable."""
def __init__(self, fn, worker, devices):
self._fn = fn
self._worker = worker
self._devices = devices
def get_next(self, device, name=None):
"""Get next element for the given device from the callable."""
del device, name
with ops.device(self._worker):
return self._fn()
def get_next_as_list_static_shapes(self, name=None):
"""Get next element from the callable."""
del name
with ops.device(self._worker):
data_list = [self._fn() for _ in self._devices]
return data_list
def get_next_as_list(self, name=None):
"""Get next element from the callable."""
del name
with ops.device(self._worker):
data_list = [self._fn() for _ in self._devices]
return constant_op.constant(True), data_list
def initialize(self):
# TODO(petebu) Should this throw an exception instead?
return []
def _create_iterators_per_worker(worker_datasets,
input_workers,
enable_legacy_iterators,
options=None):
"""Create a multidevice iterator on each of the workers."""
assert isinstance(input_workers, InputWorkers)
assert len(worker_datasets) == len(input_workers.worker_devices)
iterators = []
for i, worker in enumerate(input_workers.worker_devices):
with ops.device(worker):
worker_devices = input_workers.compute_devices_for_worker(i)
if tf2.enabled() and not enable_legacy_iterators:
iterator = _SingleWorkerOwnedDatasetIterator(
dataset=worker_datasets[i],
worker=worker,
devices=worker_devices,
options=options)
else:
iterator = _SingleWorkerDatasetIterator(worker_datasets[i], worker,
worker_devices, options)
iterators.append(iterator)
return iterators
def _create_datasets_from_function_with_input_context(input_contexts,
input_workers,
dataset_fn):
"""Create device datasets per worker given a dataset function."""
datasets = []
for i, ctx in enumerate(input_contexts):
worker = input_workers.worker_devices[i]
with ops.device(worker):
dataset = dataset_fn(ctx)
datasets.append(dataset)
return datasets, dataset.element_spec
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
def _get_batched_dataset(d):
"""Get the batched dataset from `d`."""
# pylint: disable=protected-access
if isinstance(d, dataset_ops.DatasetV1Adapter):
d = d._dataset
if isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)):
return d
elif isinstance(d, (dataset_ops.PrefetchDataset,
dataset_ops._OptionsDataset)):
return _get_batched_dataset(d._input_dataset)
raise ValueError(
"Unable to get batched dataset from the input dataset. `batch` "
"`map_and_batch` need to be the last operations on the dataset. "
"The batch operations can be followed by a prefetch.")
def _get_batched_dataset_attributes(d):
"""Get `batch_size`, `drop_remainder` of dataset."""
# pylint: disable=protected-access
assert isinstance(d,
(dataset_ops.BatchDataset, batching._MapAndBatchDataset))
if isinstance(d, dataset_ops.BatchDataset):
batch_size = d._batch_size
drop_remainder = d._drop_remainder
elif isinstance(d, batching._MapAndBatchDataset):
batch_size = d._batch_size_t
drop_remainder = d._drop_remainder_t
# pylint: enable=protected-access
if tensor_util.is_tf_type(batch_size):
batch_size = tensor_util.constant_value(batch_size)
if tensor_util.is_tf_type(drop_remainder):
drop_remainder = tensor_util.constant_value(drop_remainder)
return batch_size, drop_remainder
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
def _get_dataset_attributes(dataset):
"""Get the underlying attributes from the dataset object."""
# pylint: disable=protected-access
# First, get batch_size and drop_remainder from the dataset. We need
# to walk back the dataset creation process and find the batched version in
# order to get the attributes.
batched_dataset = _get_batched_dataset(dataset)
batch_size, drop_remainder = _get_batched_dataset_attributes(batched_dataset)
# Second, prefetch buffer should be get from the original dataset.
prefetch_buffer = None
if isinstance(dataset, dataset_ops.PrefetchDataset):
prefetch_buffer = dataset._buffer_size
elif (isinstance(dataset, dataset_ops.DatasetV1Adapter)
and isinstance(dataset._dataset, dataset_ops.PrefetchDataset)):
prefetch_buffer = dataset._dataset._buffer_size
return batch_size, drop_remainder, prefetch_buffer
def _should_use_multi_device_iterator(options):
"""Determine whether to use multi_device_iterator_ops."""
if (options is None or
options.experimental_replication_mode == InputReplicationMode.PER_WORKER
or
(options.experimental_replication_mode == InputReplicationMode.PER_REPLICA
and options.experimental_fetch_to_device)):
return True
return False
class MultiStepContext(object):
"""A context object that can be used to capture things when running steps.
This context object is useful when running multiple steps at a time using the
`experimental_run_steps_on_iterator` API. For e.g. it allows the user's step
function to specify which outputs to emit at what frequency. Currently it
supports capturing output from the last step, as well as capturing non tensor
outputs. In the future it will be augmented to support other use cases such
as output each N steps.
"""
def __init__(self):
"""Initialize an output context.
Returns:
A context object.
"""
self._last_step_outputs = {}
self._last_step_outputs_reduce_ops = {}
self._non_tensor_outputs = {}
@property
def last_step_outputs(self):
"""A dictionary consisting of outputs to be captured on last step.
Keys in the dictionary are names of tensors to be captured, as specified
when `set_last_step_output` is called.
Values in the dictionary are the tensors themselves. If
`set_last_step_output` was called with a `reduce_op` for this output,
then the value is the reduced value.
Returns:
A dictionary with last step outputs.
"""
return self._last_step_outputs
def _set_last_step_outputs(self, outputs):
"""Replace the entire dictionary of last step outputs."""
if not isinstance(outputs, dict):
raise ValueError("Need a dictionary to set last_step_outputs.")
self._last_step_outputs = outputs
def set_last_step_output(self, name, output, reduce_op=None):
"""Set `output` with `name` to be outputted from the last step.
Args:
name: String, name to identify the output. Doesn't need to match tensor
name.
output: The tensors that should be outputted with `name`. See below for
actual types supported.
reduce_op: Reduction method to use to reduce outputs from multiple
replicas. Required if `set_last_step_output` is called in a replica
context. Optional in cross_replica_context.
When present, the outputs from all the replicas are reduced using the
current distribution strategy's `reduce` method. Hence, the type of
`output` must be what's supported by the corresponding `reduce` method.
For e.g. if using MirroredStrategy and reduction is set, output
must be a `PerReplica` value.
The reduce method is also recorded in a dictionary
`_last_step_outputs_reduce_ops` for later interpreting of the
outputs as already reduced or not.
"""
if distribution_strategy_context.in_cross_replica_context():
self._last_step_outputs_reduce_ops[name] = reduce_op
if reduce_op is None:
self._last_step_outputs[name] = output
else:
distribution = distribution_strategy_context.get_strategy()
self._last_step_outputs[name] = distribution.reduce(reduce_op, output,
axis=None)
else:
assert reduce_op is not None
def merge_fn(distribution, value):
self._last_step_outputs[name] = distribution.reduce(reduce_op, value,
axis=None)
# Setting this inside the `merge_fn` because all replicas share the same
# context object, so it's more robust to set it only once (even if all
# the replicas are trying to set the same value).
self._last_step_outputs_reduce_ops[name] = reduce_op
distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=(output,))
@property
def non_tensor_outputs(self):
"""A dictionary consisting of any non tensor outputs to be captured."""
return self._non_tensor_outputs
def set_non_tensor_output(self, name, output):
"""Set `output` with `name` to be captured as a non tensor output."""
if distribution_strategy_context.in_cross_replica_context():
self._non_tensor_outputs[name] = output
else:
def merge_fn(distribution, value):
# NOTE(priyag): For non tensor outputs, we simply return all the values
# in a list as reduction doesn't make sense on non tensors.
self._non_tensor_outputs[name] = (
distribution.experimental_local_results(value))
distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=(output,))
def _create_distributed_tensor_spec(strategy, tensor_spec):
"""Create a `tf.TypeSpec` for a given strategy and input `tensor_spec`.
Args:
strategy: The given `tf.distribute` strategy.
tensor_spec: `tf.TensorSpec` of a given value. The batch dimension of the
shape should be None if you have partial batches.
Returns:
A `tf.TypeSpec` that matches the values produced by a given strategy. This
can be a `tf.TensorSpec` or a `PerRelicaSpec`.
"""
num_replicas = len(strategy.extended.worker_devices)
# For one device strategy that is not MultiWorkerMirroredStrategy, return the
# tensor_spec as is, since we don't wrap the output with PerReplica in this
# case.
# TODO(b/166464552): remove after we always wrap for all strategies.
if not _always_wrap(strategy):
return tensor_spec
# For other cases we assume the input to tf.function is a per replica type.
def _get_value_per_replica(tensor_spec_per_input):
value_specs = [tensor_spec_per_input for _ in range(num_replicas)]
return values.PerReplicaSpec(*value_specs)
return nest.map_structure(_get_value_per_replica, tensor_spec)
def _replace_per_replica_spec(spec, i):
"""If `spec` is a `PerReplicaSpec`, then return its `i`th value_spec."""
if isinstance(spec, values.PerReplicaSpec):
return spec._value_specs[i] # pylint: disable=protected-access
else:
return spec
def _enable_get_next_as_optional(strategy, dataset):
"""Returns whether to enable using partial batch handling."""
# TODO(b/133073708): we currently need a flag to control the usage because
# there is a performance difference between get_next() and
# get_next_as_optional(). And we only enable get_next_as_optional when the
# output shapes are not static.
#
# TODO(rxsang): We want to always enable the get_next_as_optional behavior
# when user passed input_fn instead of dataset.
if not getattr(strategy.extended, "experimental_enable_get_next_as_optional",
False):
return False
if context.executing_eagerly():
# If the dataset is infinite, we don't need to enable last partial batch
# support. Currently the logic only applies to the case that distributed
# dataset is created in eager mode, as we need to evaluate the dataset
# cardinality.
with ops.device(dataset._variant_tensor.device): # pylint: disable=protected-access
if dataset.cardinality().numpy() == cardinality.INFINITE:
return False
return not _is_statically_shaped(
dataset.element_spec) or strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access
def _create_per_replica(value_list, strategy):
"""Creates a PerReplica.
For strategies other than OneDeviceStrategy, it creates a PerReplica whose
type spec is set to the element spec of the dataset. This helps avoid
retracing for partial batches. Retracing is problematic for multi client when
different client retraces different time, since retracing changes the
collective keys in the tf.function, and causes mismatches among clients.
For single client strategies, this simply calls distribute_utils.regroup().
Args:
value_list: a list of values, one for each replica.
strategy: the `tf.distribute.Strategy`.
Returns:
a structure of PerReplica.
"""
# TODO(b/166464552): always wrap for all one device strategies as well.
always_wrap = _always_wrap(strategy)
per_replicas = distribute_utils.regroup(value_list, always_wrap=always_wrap)
return per_replicas
def _always_wrap(strategy):
"""Returns whether to always wrap the values in a DistributedValues."""
return strategy.extended._in_multi_worker_mode() or len( # pylint: disable=protected-access
strategy.extended.worker_devices) > 1
def _rebatch_as_dynamic(per_replica_spec):
"""Rebatch the spec to have a dynamic batch dimension."""
assert isinstance(per_replica_spec, values.PerReplicaSpec), per_replica_spec
# pylint: disable=protected-access
def _rebatch(spec):
# Rebatch if possible.
try:
return spec._unbatch()._batch(None)
except ValueError:
pass
return spec
return values.PerReplicaSpec(
*nest.map_structure(_rebatch, per_replica_spec._value_specs))
# pylint: enable=protected-access
| apache-2.0 | 2,909,316,437,415,284,700 | 39.799839 | 110 | 0.663353 | false |
dellytools/maze | readfq.py | 1 | 1674 | # source: https://github.com/lh3/readfq
def readfq(fp): # this is a generator function
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last: break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last: break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs); # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
if __name__ == "__main__":
import sys
n, slen, qlen = 0, 0, 0
for name, seq, qual in readfq(sys.stdin):
n += 1
slen += len(seq)
qlen += qual and len(qual) or 0
print n, '\t', slen, '\t', qlen
| mit | -3,479,504,749,326,615,600 | 39.829268 | 74 | 0.492832 | false |
hello-base/web | apps/merchandise/music/managers.py | 1 | 1125 | # -*- coding: utf-8 -*-
from django.db import models
from django.db.models.query import QuerySet
class EditionManager(models.Manager):
def find_edition(self, release, edition, **kwargs):
if release:
kwargs[release.identifier] = release
if edition:
kwargs[edition.parent.identifier] = edition.parent
qs = super(EditionManager, self).get_queryset().order_by('released', 'romanized_name')
try:
return qs.filter(**kwargs)[0]
except IndexError:
return qs.none()
def primary_edition(self, release=None, edition=None):
editions = [self.model.EDITIONS.regular, self.model.EDITIONS.limited, self.model.EDITIONS.digital]
for kind in editions:
edition = self.find_edition(release, edition, kind=kind)
if edition:
return edition
return None
class TrackQuerySet(QuerySet):
def originals(self):
return self.filter(original_track__isnull=True)
class TrackOrderQuerySet(QuerySet):
def original_only(self):
return self.filter(is_instrumental=False)
| apache-2.0 | 4,398,973,060,987,976,000 | 30.25 | 106 | 0.648 | false |
ndronen/pylearnutils | pylearnutils/datasets/sparse_expander.py | 1 | 7801 | # From https://gist.github.com/ccsevers/10295174
import os.path
import numpy as np
from .utils import take_subset
from pylearn2.datasets.dataset import Dataset
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.utils.iteration import (SequentialSubsetIterator,
FiniteDatasetIterator,
resolve_iterator_class)
import functools
import logging
import numpy
import warnings
from pylearn2.space import CompositeSpace, Conv2DSpace, VectorSpace, IndexSpace
from pylearn2.utils import safe_zip
try:
import scipy.sparse
except ImportError:
warnings.warn("Couldn't import scipy.sparse")
import theano
import gzip
floatX = theano.config.floatX
logger = logging.getLogger(__name__)
class SparseExpanderDataset(Dataset):
"""
SparseExpanderDataset takes a numpy/scipy sparse matrix and calls .todense()
as the batches are passed out of the iterator.
"""
def __init__(self, X_path=None, y_path=None, from_scipy_sparse_dataset=None, zipped_npy=False, means_path=None, stds_path=None, start_fraction=None, end_fraction=None, start=None, stop=None):
self.X_path = X_path
self.y_path = y_path
if self.X_path != None:
if zipped_npy == True:
logger.info('... loading sparse data set from a zip npy file')
self.X = scipy.sparse.csr_matrix(
numpy.load(gzip.open(X_path)), dtype=floatX)
else:
logger.info('... loading sparse data set from a npy file')
self.X = scipy.sparse.csr_matrix(
numpy.load(X_path).item(), dtype=floatX)
else:
logger.info('... building from given sparse dataset')
self.X = from_scipy_sparse_dataset.astype(floatX)
if self.y_path != None:
if zipped_npy == True:
logger.info('... loading sparse data set from a zip npy file')
#self.y = scipy.sparse.csr_matrix(
# numpy.load(gzip.open(y_path)), dtype=floatX).todense()
self.y = numpy.load(gzip.open(y_path))
if not isinstance(self.y, np.ndarray):
print("calling y.item")
self.y = y.item()
else:
logger.info('... loading sparse data set from a npy file')
self.y = numpy.load(y_path)
if not isinstance(self.y, np.ndarray):
print("calling y.item")
self.y = self.y.item()
# We load y as a sparse matrix, but convert it to a dense array,
# because otherwise MLP.mean_of_targets breaks.
orig_shape = self.y.shape
if scipy.sparse.issparse(self.y):
self.y = np.asarray(self.y.todense())
# Only make this a column vector if it's not one-hot.
if 1 in orig_shape or len(orig_shape) == 1:
nrow = np.max(orig_shape)
self.y = self.y.reshape((nrow, 1))
else:
self.y = None
self.y = self.y.astype(floatX)
self.X, self.y = take_subset(self.X, self.y,
start_fraction, end_fraction, start, stop)
self.data_n_rows = self.X.shape[0]
self.num_examples = self.data_n_rows
self.fancy = False
self.stochastic = False
X_space = VectorSpace(dim=self.X.shape[1])
X_source = 'features'
if y_path is None:
space = X_space
source = X_source
else:
if self.y.ndim == 1:
dim = 1
else:
dim = self.y.shape[-1]
y_space = VectorSpace(dim=dim)
y_source = 'targets'
space = CompositeSpace((X_space, y_space))
source = (X_source, y_source)
if means_path is not None:
self.means = np.load(means_path)
if stds_path is not None:
self.stds = np.load(stds_path)
self.data_specs = (space, source)
self.X_space = X_space
self._iter_data_specs = (self.X_space, 'features')
def get_design_matrix(self):
return self.X
def get_batch_design(self, batch_size, include_labels=False):
"""
method inherited from Dataset
"""
self.iterator(mode='sequential', batch_size=batch_size)
return self.next()
def get_batch_topo(self, batch_size):
"""
method inherited from Dataset
"""
raise NotImplementedError('Not implemented for sparse dataset')
def get_data_specs(self):
"""
Returns the data_specs specifying how the data is internally stored.
This is the format the data returned by `self.get_data()` will be.
"""
return self.data_specs
def get_data(self):
"""
Returns
-------
data : numpy matrix or 2-tuple of matrices
Returns all the data, as it is internally stored.
The definition and format of these data are described in
`self.get_data_specs()`.
"""
if self.y is None:
return self.X
else:
return (self.X, self.y)
def get_num_examples(self):
return self.X.shape[0]
@functools.wraps(Dataset.iterator)
def iterator(self, mode=None, batch_size=None, num_batches=None,
topo=None, targets=None, rng=None, data_specs=None,
return_tuple=False):
"""
method inherited from Dataset
"""
self.mode = mode
self.batch_size = batch_size
self._targets = targets
self._return_tuple = return_tuple
if data_specs is None:
data_specs = self._iter_data_specs
# If there is a view_converter, we have to use it to convert
# the stored data for "features" into one that the iterator
# can return.
# if
space, source = data_specs
if isinstance(space, CompositeSpace):
sub_spaces = space.components
sub_sources = source
else:
sub_spaces = (space,)
sub_sources = (source,)
convert = []
for sp, src in safe_zip(sub_spaces, sub_sources):
if src == 'features':
conv_fn = lambda x: x.todense()
elif src == 'targets':
conv_fn = lambda x: x
else:
conv_fn = None
convert.append(conv_fn)
if mode is None:
if hasattr(self, '_iter_subset_class'):
mode = self._iter_subset_class
else:
raise ValueError('iteration mode not provided and no default '
'mode set for %s' % str(self))
else:
mode = resolve_iterator_class(mode)
return FiniteDatasetIterator(self,
mode(self.X.shape[0],
batch_size,
num_batches,
rng),
data_specs=data_specs,
return_tuple=return_tuple,
convert=convert)
def __iter__(self):
return self
def next(self):
indx = self.subset_iterator.next()
try:
rval = self.X[indx].todense()
if self.center:
rval = rval - self.means
if self.scale:
rval = rval / self.stds
except IndexError:
# the ind of minibatch goes beyond the boundary
import ipdb; ipdb.set_trace()
rval = tuple(rval)
if not self._return_tuple and len(rval) == 1:
rval, = rval
return rval
| bsd-3-clause | -6,055,127,632,347,842,000 | 32.337607 | 195 | 0.542879 | false |
prov-suite/service-tests | prov_service_tests/test_provvalidator.py | 1 | 8645 | """Test class for ProvValidator service.
"""
# Copyright (c) 2015 University of Southampton
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
import json
import os
import requests
import unittest
from nose.tools import istest
from nose_parameterized import parameterized
from prov_service_tests import http
from prov_service_tests import standards
from prov_service_tests.test_service import ServiceTestCase
@istest
class ProvValidatorTestCase(ServiceTestCase):
"""Test class for ProvValidator service. These tests check that
ProvValidator is available and responds to requests directed
against the
`ProvValidator REST API <https://provenance.ecs.soton.ac.uk/validator/view/api.html>`_.
The class expects one environment variable to be set:
- ``PROVVALIDATOR_URL`` - ProvValidator base URL e.g.
``https://provenance.ecs.soton.ac.uk/validator/provapi/documents/``
"""
URL_ENV = "PROVVALIDATOR_URL"
"""str or unicode: environment variable holding ProvValidator URL
"""
CONTENT_TYPES = {
standards.PROVN: "text/provenance-notation",
standards.TTL: "text/turtle",
standards.TRIG: "application/trig",
standards.PROVX: "application/provenance+xml",
standards.JSON: "application/json"
}
"""dict: mapping from :mod:`prov_service_tests.standards` formats to
content types understood by ProvStore
"""
def setUp(self):
super(ProvValidatorTestCase, self).setUp()
self.url = os.environ[ProvValidatorTestCase.URL_ENV]
def tearDown(self):
super(ProvValidatorTestCase, self).tearDown()
def post_translate(self, document, format=standards.JSON):
"""Submit POST /provapi/documents to translate a document. The
request is requested not to allow redirects and a test is done to
check that the response code is 303 SEE OTHER.
:param document: document in given format
:type document: str or unicode
:param format: a :mod:`prov_service_tests.standards` format
:type format: str or unicode
:return: URL of stored document
:rtype: :class:`requests.Response`
"""
headers={http.CONTENT_TYPE:
ProvValidatorTestCase.CONTENT_TYPES[format]}
response = requests.post( \
self.url,
headers=headers,
allow_redirects=False,
data=document)
self.assertEqual(requests.codes.see_other, response.status_code)
return response
@parameterized.expand(list(itertools.product(standards.FORMATS,
standards.FORMATS)))
def test_post_translate(self, format1, format2):
"""Test POST /provapi/documents/ for translation.
"""
headers = {http.CONTENT_TYPE: ProvValidatorTestCase.CONTENT_TYPES[format1],
http.ACCEPT: ProvValidatorTestCase.CONTENT_TYPES[format2]}
response = requests.post(self.url,
headers=headers,
data=self.get_primer(format1))
self.assertEqual(requests.codes.ok, response.status_code)
def test_translate_get_document(self):
"""Test GET /provapi/documents/{docId}.
"""
response = self.post_translate(self.get_primer(standards.JSON),
standards.JSON)
graph_url = response.headers["location"]
response = requests.get(graph_url)
self.assertEqual(requests.codes.ok, response.status_code)
def test_translate_get_document_original(self):
"""Test GET /provapi/documents/{docId}/original.
"""
response = self.post_translate(self.get_primer(standards.JSON),
standards.JSON)
graph_url = response.headers["location"]
response = requests.get(graph_url + "/original")
self.assertEqual(requests.codes.ok, response.status_code)
@parameterized.expand(standards.FORMATS)
def test_translate_get_document_type(self, format):
"""Test GET /provapi/documents/{docId}.{type}.
"""
response = self.post_translate(self.get_primer(standards.JSON),
standards.JSON)
graph_url = response.headers["location"]
response = requests.get(graph_url + "." + format)
self.assertEqual(requests.codes.ok, response.status_code)
@parameterized.expand(standards.FORMATS)
def test_post_validate(self, format):
"""Test POST /provapi/documents for validation.
"""
response = requests.post( \
self.url,
files={"statements": self.get_primer(format)},
data={"validate": "Validate",
"type": format},
allow_redirects=True)
self.assertEqual(requests.codes.ok, response.status_code)
def validate(self):
"""Submit POST /provapi/documents then GET
/provapi/documents/{docId}/validation/report to validate
document.
- Submit POST /provapi/documents request with a JSON document.
- Get the graph URL from the response header ``location`` field.
- Submit GET /provapi/documents/{docId}/validation/report,
to validate the document.
- Test that the response to GET is 200 OK.
Accessing the validation report is a pre-requisite of
validation-related requests including /validation, /metrics,
/normalForm and /matrix.
:return: graph URL
:rtype: str or unicode
"""
response = self.post_translate(self.get_primer(standards.JSON),
standards.JSON)
graph_url = response.headers["location"]
response = requests.get(graph_url + "/validation/report")
self.assertEqual(requests.codes.ok, response.status_code)
return graph_url
def test_get_metrics(self):
"""Test GET /provapi/documents/{docId}/metrics.
"""
graph_url = self.validate()
response = requests.get(graph_url + "/metrics")
self.assertEqual(requests.codes.ok, response.status_code)
@parameterized.expand(["txt", "png"])
def test_get_validation_matrix_format(self, format):
"""Test GET /provapi/documents/{docId}/validation/matrix.txt and png.
"""
graph_url = self.validate()
response = requests.get(graph_url + "/validation/matrix." + format)
self.assertEqual(requests.codes.ok, response.status_code)
def test_get_validation_matrix_diagonal(self):
"""Test GET /provapi/documents/{docId}/validation/matrix/diagonal.
"""
graph_url = self.validate()
response = requests.get(graph_url + "/validation/matrix/diagonal")
self.assertEqual(requests.codes.ok, response.status_code)
def test_get_validation_normal_form(self):
"""Test GET /provapi/documents/{docId}/validation/normalForm.
"""
graph_url = self.validate()
response = requests.get(graph_url + "/validation/normalForm")
self.assertEqual(requests.codes.ok, response.status_code)
@parameterized.expand(standards.FORMATS)
def test_get_validation_normal_form_format(self, format):
"""Test GET /provapi/documents/{docId}/validation/normalForm.{type}.
"""
graph_url = self.validate()
response = requests.get(graph_url + "/validation/normalForm." + format)
self.assertEqual(requests.codes.ok, response.status_code)
def test_get_random_nodes_degree(self):
"""Test GET /provapi/documents/random/{nodes}/{degree}.
"""
response = requests.get(self.url + "random/1/1")
self.assertEqual(requests.codes.ok, response.status_code)
def test_get_random_nodes_degree_seed(self):
"""Test GET /provapi/documents/random/{nodes}/{degree}/{seed}.
"""
response = requests.get(self.url + "random/1/2/3")
self.assertEqual(requests.codes.ok, response.status_code)
| mit | 3,297,192,996,235,916,000 | 36.751092 | 89 | 0.695084 | false |
FedoraScientific/salome-paravis | test/VisuPrs/IsoSurfaces/E8.py | 1 | 1513 | # Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/IsoSurfaces/E8 case
# Create Iso Surface for all data of the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Create presentations
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("IsoSurfaces/E8")
file = datadir + "KCOUPLEX1.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "CreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.ISOSURFACES], picturedir, pictureext)
| lgpl-2.1 | 3,320,585,536,059,934,000 | 37.794872 | 84 | 0.734964 | false |
watson-developer-cloud/python-primer-companion-code | episode-2/flask/src/translation.py | 1 | 2054 | # -*- coding: utf-8 -*-
# Copyright 2016 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from watson_developer_cloud import LanguageTranslationV2 as LanguageTranslationService
def getTranslationService():
return LanguageTranslationService(username='<your username key for the Watson language translation service>',
password='<your password key for the service>')
def identifyLanguage(app, data):
txt = data.encode("utf-8", "replace")
language_translation = getTranslationService()
langsdetected = language_translation.identify(txt)
app.logger.info(json.dumps(langsdetected, indent=2))
primarylang = langsdetected["languages"][0]
retData = {key: primarylang[key] for key in ('language', 'confidence')}
app.logger.info(json.dumps(retData, indent=2))
return retData
def checkForTranslation(app, fromlang, tolang):
supportedModels = []
lt = getTranslationService()
models = lt.list_models()
modelList = models.get("models")
supportedModels = [model['model_id'] for model in modelList
if fromlang == model['source']
and tolang == model['target']]
return supportedModels
def performTranslation(app, txt, primarylang, targetlang):
lt = getTranslationService()
translation = lt.translate(txt, source=primarylang, target=targetlang)
theTranslation = None
if translation and ("translations" in translation):
theTranslation = translation['translations'][0]['translation']
return theTranslation
| apache-2.0 | -5,859,510,575,257,219,000 | 36.345455 | 111 | 0.729309 | false |
justanr/pyxl | pyxl.py | 1 | 13205 | '''
This simple module consists of the Pyxl class and a few helper functions.
'''
from os.path import basename, join
from glob import glob
from PIL import Image, ImageDraw, ImageFont
#import flickrapi
#Helper functions.
def buildHex(hexStr):
'''
Accepts a supposed hex color string and ensures it's 6 characters long.
'''
hexStr = hexStr.lower().replace(' ','').replace('#','')
#TODO: Make this prettier.
if len(hexStr) == 1:
return hexStr * 6
elif len(hexStr) == 2:
return hexStr * 3
elif len(hexStr) == 3:
return (hexStr[0] * 2) + (hexStr[1] * 2) + (hexStr[2] * 2)
elif len(hexStr) > 3 and len(hexStr) < 6:
return '{0:0<6}'.format(hexStr)
elif len(hexStr) > 6:
return hexStr[0:6]
else:
return hexStr
def hexToRGB(hexStr):
'''Converts a hexStr color to a RGB tuple'''
# Pretty self explainatory, but as a note this converts
# each hex pair (base16) to a base10 value
# hexToRGB('ff0000') would return (255, 0, 0) or pure red
hexStr = buildHex(hexStr)
return tuple([int(hexStr[i:i+2], 16) for i in range(0, 6, 2)])
def RGBToHex(RGB):
'''Converts a RGB tuple into a hex color'''
#TODO: Convert to new style formatting
return '%02x%02x%02x' % RGB
def calcGradDiff(startFill, stopFill, distance):
'''
Calculates the difference between the start and
stop fills over the specified distance.
'''
# account for the last pixel
distance = distance - 1.0
return tuple((stopFill[x] - startFill[x])/distance for x in range(3))
def buildPyxlName(pyxl):
'''
Builds an MD5 hash from Pyxl.getInfo, Pyxl.getSize and Pyxl.getOptions
'''
from hashlib import md5
name = '{}-{}-{}'.format(pyxl.getInfo(), pyxl.getSize(), pyxl.getOptions())
return md5(name).hexdigest() + ".jpg"
def savePyxlImage(pyxl, path='imgs'):
'''
A simple save function for pyxl. Consider replacing with your own.
'''
import ImageFile
ImageFile.MAXBLOCK = pyxl.image.size[0] * pyxl.image.size[1]
fullpath = join(path, buildPyxlName(pyxl))
pyxl.image.save(fullpath, 'JPEG', optimize=True,
progressive=True
)
def shiftRGB(old, new, shift):
'''
Shifts an RGB towards a new value.
Shift can be anything that returns an integer or float.
'''
change = lambda x: (x[1]*shift)+(x[0]*(1-shift))
return tuple(map(change, zip(old, new)))
class Pyxl(object):
'''
This class builds an image based on a series of inputs.
Either constructing it solely in PIL or pulling one from flickr.
'''
#TODO: Better documentation.
def __init__(self, info, size, options=None, fonts='fonts'):
# Initializing some very key variables.
self.info = {}
self.size = ()
self.options = {}
self.fonts = {}
self.draw = None
self.image = None
self.defaults = {
'font':'liberationsans',
'colors':[hexToRGB('ffffff'), hexToRGB('ff0000')]
}
# Build the fonts dictionary.
self.loadFonts(fonts)
# Load all the arguments passed to Pyxl
self.setInfo(info)
self.setSize(size)
self.setOptions(options)
def setInfo(self, info):
'''
This function sets the information Pyxl needs to start an image.
It accepts one of three string patterns:
tag or a series of tags delimited by a comma
-- In this case, it is a flickr image
OR
color:hex
-- A solid color image
OR
gradient:hex,hex
-- A gradient image, there is an optional h argument at the end
The info variable contains the following bits:
type: This tells Pyxl what sort of image to produce
tags: This key is only set for a flickr image,
it determines what tags to pull an image from.
color: A list of RGB tuples.
'''
# Determine which kind of image we want
# No colon found, we want to contact flickr
if info.find(':') == -1:
self.info['type'] = 'flickr'
self.info['tags'] = info.split(',')
self.draw = self.drawFlickr
# We are building an image with PIL
else:
info = info.split(':')
# We're drawing a gradient.
if info[1].find(',') != -1:
self.draw = self.drawGradient
self.info['type'] = 'gradient'
info[1] = info[1].split(',')
self.info['colors'] = [ hexToRGB(info[1][0]),
hexToRGB(info[1][1])
]
# Specifically, a horizontal gradient
if len(info[1]) == 3:
self.info['type'] = 'hgradient'
# Just a solid image please
else:
self.draw = self.drawColor
self.info['type'] = 'color'
self.info['colors'] = [hexToRGB(info[1])]
def getInfo(self):
'''Returns a string representation of info dictionary.'''
if self.info['type'] == 'flickr':
return ','.join(self.info['tags'])
elif self.info['type'] == 'color':
return 'color:{}'.format(RGBToHex(self.info['colors'][0]))
else:
colors = ','.join([RGBToHex(x) for x in self.info['colors']])
if self.info['type'] == 'hgradient':
colors = colors + ',h'
return 'gradient:{}'.format(colors)
def setSize(self, size):
'''
Sets the total size of the image.
This function accepts a string in the form of widthxheight.
This function will also ensure that the dimensions are between 1
and the maximum (currently 2500)
'''
default = 200
maximum = 2000
# seriously, who needs an image this big
sizes = []
for x in size.split('x'):
try:
# Probably a better way to do this, but no point in letting this
# ruin the script Even though I highly doubt someone will
# pass something like axd as the size argument from the API,
# better safe than sorry.
x = int(x)
except ValueError:
x = default
if x > maximum:
x = maximum
elif x < 1:
x = default
sizes.append(x)
if len(sizes) != 2:
sizes = [sizes[0], sizes[0]]
self.size = tuple(sizes)
def getSize(self):
'''
Returns string representation of the iamge size in
form of widthxheight
'''
return 'x'.join([str(x) for x in self.size])
def setOptions(self, options):
'''
This function accepts a string for the options of Pyxl.
It should be formatted as: option:value,option2:value.
There are just a few current valid options:
seed: This option is to create a new image from the same options.
text: A hex color that is converted to a RGB tuple.
dimensions: This SHOULD be set to hide,
but if it's there, the dimensions are not displayed on the image.
font: This sets the font for the image text,
it uses a defaults if the font isn't listed in Pyxl.fonts
'''
if options is None:
#defaults ahoy!
self.options = {
'text':self.defaults['colors'][0],
'font':self.setFont(self.defaults['font'])
}
else:
valid = ['seed', 'dimensions', 'text', 'font']
for option in options.lower().split(','):
option = option.split(':')
#prevent a bunch of spamming non-recognized options
if option[0] not in valid:
continue
elif option[0] == 'font':
option[1] = self.setFont(option[1])
elif option[0] == 'text':
try:
# again, probably a better way
# but better safe than sorry
option[1] = hexToRGB(option[1])
except ValueError:
option[1] = self.defaults['colors'][0]
elif option[0] == 'dimensions':
option[1] = 'hide'
elif option[0] == 'seed' and self.info['type'] != 'flickr':
# There's no point in a seed for a none flickr image
continue
self.options[option[0]] = option[1]
#double check to make sure at least font and text got set.
if 'font' not in self.options:
self.options['font'] = self.setFont(self.defaults['font'])
if 'text' not in self.options:
self.options['text'] = self.defaults['colors'][0]
def getOptions(self):
'''Returns a string representation of all the options set.'''
options = ''
for key in sorted(self.options.keys()):
if key == 'text':
option = RGBToHex(self.options['text'])
elif key == 'font':
option = basename(self.options['font']).lower().split('.')[0]
else:
option = self.options[key]
options = options + '{}:{},'.format(key, option)
return options.rstrip(',')
def loadFonts(self, location='fonts'):
'''
This function scans the location folder for fonts and stores them in a
dictionary. The keys are the lowercased version of the file name,
split at the first dot.
LiberationSans.ttf becomes
{'liberationsans':'fonts/LiberationSans.ttf'}
Currently, it is only implemented to find TrueType fonts.
'''
fonts = glob(join(location, '*.ttf'))
self.fonts = {
basename(font).lower().split('.')[0]:font for font in fonts
}
def setFont(self, font):
'''
This function sets the font for the text on the image.
If it receives a font that isn't in Pyxl's font library,
it sets it to the default.
'''
if font not in self.fonts.keys():
return self.fonts[self.defaults['font']]
return self.fonts[font]
def drawColor(self):
'''Creates a solid colored image.'''
self.image = Image.new('RGB', self.size, self.info['colors'][0])
if 'dimensions' not in self.options:
self.drawDimensions()
def drawGradient(self):
'''Creates a gradient image.'''
# this'll be much easier to work with
height = self.size[1]
width = self.size[0]
# set the correct distance
if self.info['type'] == 'hgradient':
distance = width
else:
distance = height
# again, easier to work with
start = self.info['colors'][0]
stop = self.info['colors'][1]
# make a new blank image
self.image = Image.new('RGB', self.size, hexToRGB('ffffff'))
draw = ImageDraw.Draw(self.image)
for i in range(distance):
# set the correct draw positions
if self.info['type'] == 'hgradient':
pos = (i, 0, i, height)
else:
pos = (0, i, width, i)
# move the start color closer to the end color
rgb = shiftRGB(start, stop, float(i)/distance)
fill = tuple(map(int, map(round, rgb)))
draw.line(pos, fill=fill)
if 'dimensions' not in self.options:
self.drawDimensions()
def drawFlickr(self):
'''Creates an image based on a flickr image.'''
pass
def getFlickrImage(self):
'''
Retrieves a single flickr image based on Pyxl.info['tags']
'''
pass
def drawDimensions(self):
'''Creates the dimensions image.'''
text = self.getSize()
size = 1
font = ImageFont.truetype(self.options['font'], size)
img_fraction = 0.5
while (font.getsize(text)[0] < int(self.size[0] * img_fraction)) and \
(font.getsize(text)[1] < int(self.size[1]*img_fraction)):
size += 1
font = ImageFont.truetype(self.options['font'], size)
font = ImageFont.truetype(self.options['font'], size)
pos = ( (self.size[0] - font.getsize(text)[0])/2,
(self.size[1] - font.getsize(text)[1])/2
)
draw = ImageDraw.Draw(self.image)
draw.text(pos, text, font=font, fill=self.options['text'])
| mit | 8,869,948,142,380,855,000 | 29.780886 | 79 | 0.526089 | false |
elemel/drillion | drillion/cannon_entity_creator.py | 1 | 1825 | from drillion.animation_component import AnimationComponent
from drillion.collision import CollisionBody
from drillion.collision_component import CollisionComponent
from drillion.entity import Entity
from drillion.maths import Polygon2, Transform2
from drillion.sprite import PolygonSprite
from drillion.sprite_component import SpriteComponent
from drillion.transform_component import TransformComponent
import random
class CannonEntityCreator(object):
def __init__(self, animation_update_phase, draw_phase, batch):
self._animation_update_phase = animation_update_phase
self._draw_phase = draw_phase
self._batch = batch
def create(self, ship_entity, position=(0.0, 0.0), angle=0.0, length=1.0,
width=0.1, color=(255, 255, 255, 255)):
vertices = [(0.0, -0.5), (1.0, -0.5), (1.0, 0.5), (0.0, 0.5)]
polygon = Polygon2(vertices)
parent_transform_component = \
ship_entity.find_component(TransformComponent)
transform = Transform2()
transform.rotate(angle)
transform.scale(length, width)
transform.translate(*position)
transform_component = \
TransformComponent(transform, parent=parent_transform_component)
sprite = PolygonSprite(vertices, color=color, transform=transform)
sprite_component = SpriteComponent(sprite, self._batch)
animation_component = AnimationComponent(transform_component,
sprite_component,
self._animation_update_phase,
self._draw_phase)
components = [transform_component, sprite_component,
animation_component]
return Entity(components, parent=ship_entity)
| mit | -3,641,659,004,998,115,000 | 42.452381 | 78 | 0.647123 | false |
desmo999r/cmssysadmin | cmssysadmin/__init__.py | 1 | 1437 | import os
import socket
import fcntl
import struct
import subprocess
import logging
logger = logging.getLogger(__name__)
class CmdLine(object):
options = {}
class __metaclass__(type):
def __new__(cls, *kargs, **kwargs):
t = type.__new__(cls, *kargs, **kwargs)
with open("/proc/cmdline") as f:
for option in f.readline().strip().split():
fields = option.split("=")
if len(fields) == 1:
t.options[fields[0]] = True
else:
t.options[fields[0]] = fields[1]
logger.info("/proc/cmdline options: " + str(t.options))
return t
def get_bootif():
try:
mac = CmdLine.options['BOOTIF'][3:].replace('-', ':').strip().lower()
except KeyError:
return None
for n in os.listdir("/sys/class/net"):
with open("/sys/class/net/" + n + "/address") as f:
if mac == f.read().strip().lower():
return n, mac
raise Exception("There is a BOOTIF param but no matching interface")
def get_ip_address(ifname):
"""Returns the NIC current IPv4 address"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
logger.info("Current IP is %s", ip)
return ip
# vim: set ts=4 sw=4 tw=0 et :
| gpl-2.0 | -8,696,028,773,016,488,000 | 28.9375 | 77 | 0.551844 | false |
spadev/chatlogsync | chatlogsync.py | 1 | 10218 | #!/usr/bin/env python
# Copyright 2013 Evan Vitero
# This file is part of chatlogsync.
# chatlogsync is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# chatlogsync is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with chatlogsync. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import sys
import signal
import traceback
from os.path import join, dirname, exists, isfile, isdir, realpath
from argparse import ArgumentParser, ArgumentTypeError
from multiprocessing import Process, cpu_count, Value, Manager, Lock
import chatlogsync
from chatlogsync import const, formats, util, timezones
WORKERS = []
class Progress(object):
"""Thread-safe progress updater"""
def __init__(self):
self._nread = Value('i', 0, lock=False)
self._nwrote = Value('i', 0, lock=False)
self._nexisting = Value('i', 0, lock=False)
self._nerror = Value('i', 0, lock=False)
self._lock = Lock()
def print_status(self, msg=None):
dryrun = ' (DRY RUN)' if const.DRYRUN else ''
if msg:
print_v(msg)
print_('\r[read:%i wrote:%i existing:%i error:%i]%s ' %
(self.nread, self.nwrote, self.nexisting, self.nerror, dryrun),
end='', flush=True, file=sys.stderr)
if msg:
print_v('\n')
def _incr(self, var, n=1):
with self._lock:
var.value += n
def read(self, path):
self._incr(self._nread)
def wrote(self, path):
self._incr(self._nwrote)
self.print_status('wrote %s' % path)
def error(self, path):
tb = traceback.format_exc()
self._incr(self._nerror)
print_e('%s\n%s' % (path, tb))
def existing(self, path):
self._incr(self._nexisting)
print_v('existing %s' % path)
@property
def nerror(self):
return self._nerror.value
@property
def nwrote(self):
return self._nwrote.value
@property
def nread(self):
return self._nread.value
@property
def nexisting(self):
return self._nexisting.value
class Parser(Process):
def __init__(self, outformat, force, destination, queue, files,
progress, fslock):
super(Parser, self).__init__()
self.queue = queue
self.progress = progress
self.tempfiles = []
self.destination = destination
self.outformat = outformat
self.force = force
self._files = files
self._fslock = fslock
self._modules = [x() for x in formats.all_formats.values()]
self._modules_map = {x.type: x for x in self._modules}
self._stopped = Value('i', 0)
self._curpath = ''
def stop(self):
self._stopped.value = 1
@property
def stopped(self):
return self._stopped.value == 1
def cleanup(self):
for tempfile in self.tempfiles:
if exists(tempfile):
os.unlink(tempfile)
def _process_path(self, path):
self._curpath = path
for i, rmodule in enumerate(self._modules):
parsed = rmodule.parse_path(path)
if parsed:
# try this module first next time
if i != 0:
self._modules[i] = self._modules[0]
self._modules[0] = rmodule
break
# file is not a chatlog
if not parsed:
return None
self.progress.read(path)
wmodule = self._modules_map[self.outformat] \
if self.outformat else rmodule
for c in parsed:
self._curpath = path
dstpath = wmodule.get_path(c)
real_dstpath = realpath(join(self.destination, dstpath))
with self._fslock:
if real_dstpath in self._files:
f = 1
elif exists(real_dstpath):
f = 2
else:
f = 0
self._files[real_dstpath] = f
if f:
self.progress.existing(dstpath)
self.progress.print_status()
if not self.force:
continue
if const.DRYRUN:
conversation = c
else:
conversation = rmodule.parse_conversation(c)
tmppath = real_dstpath+'.tmp'
self.tempfiles.append(tmppath)
self._curpath = real_dstpath
self._write_outfile(wmodule, real_dstpath, tmppath,
[conversation])
del self.tempfiles[-1]
self.progress.wrote(dstpath)
def _write_outfile(self, module, path, tmppath, conversations):
# return len(conversations)
dstdir = dirname(path)
with self._fslock:
if not exists(dstdir):
os.makedirs(dstdir)
module.write(tmppath, conversations)
os.rename(tmppath, path)
return len(conversations)
def run(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
path = ''
while True:
try:
path = self.queue.get()
if path is None:
break
self._process_path(path)
except IOError as e:
break
except Exception as e:
self.progress.error(self._curpath)
self.cleanup()
def isfileordir(value):
if not isfile(value) and not isdir(value):
raise ArgumentTypeError("'%s' is not a file or directory" % value)
return value
def isnotfile(value):
if isfile(value):
raise ArgumentTypeError("'%s' is not a file" % value)
return value
def parse_args():
parser = \
ArgumentParser(description=const.PROGRAM_DESCRIPTION,
prog=const.PROGRAM_NAME)
parser.add_argument('source', nargs='+', type=isfileordir,
help=_('source log file or directory'))
parser.add_argument('destination', type=isnotfile,
help=_('destination log directory'))
parser.add_argument("-d", "--debug",
help=_("enable debug output"),
action='store_true',
default=False,
)
parser.add_argument("-f", "--format",
choices=[str(x) for x in formats.output_formats],
help=_("format to use for output files"),
default=None,
)
parser.add_argument("-F", "--force",
help=_("force regeneration of existing logs at "
"destination"),
action='store_true',
default=False,
)
parser.add_argument("-n", "--dry-run",
help=_("perform a trial run with no changes made"),
action='store_true',
default=False,
)
parser.add_argument("--no-comments",
help=_("do not write comments to converted logs"),
action='store_true',
default=False,
)
parser.add_argument("-q", "--quiet",
help=_("suppress warnings"),
action='store_true',
default=False,
)
parser.add_argument("-t", "--threads", metavar="NUM_THREADS",
help=_("use NUM_THREADS worker processes for parsing"),
type=int,
default=cpu_count(),
)
parser.add_argument("-v", "--verbose",
help=_("enable verbose output"),
action='store_true',
default=False,
)
options = parser.parse_args()
if options.debug:
const.DEBUG = True
if options.verbose:
const.VERBOSE = True
if options.quiet:
const.QUIET = True
if options.dry_run:
const.DRYRUN = True
if options.no_comments:
const.NO_COMMENTS = True
return options
def convert(paths, options):
global WORKERS
manager = Manager()
fslock = Lock()
progress = Progress()
queue = manager.Queue()
files = manager.dict()
WORKERS = [Parser(options.format, options.force, options.destination,
queue, files, progress, fslock)
for i in range(options.threads)]
for w in WORKERS:
w.start()
for path in paths:
queue.put(path)
for w in WORKERS:
queue.put(None)
for w in WORKERS:
w.join()
return 0
def main(options):
print_('gathering paths...', end='', flush=True, file=sys.stderr)
src_paths = util.get_paths(options.source)
print_('done', file=sys.stderr)
convert(src_paths, options)
return 0
def cleanup(exitcode):
progress = None
for w in WORKERS:
progress = w.progress
w.stop()
for w in WORKERS:
w.join()
if progress:
progress.print_status('done')
exitcode += progress.nerror
if not const.VERBOSE:
print_('')
return exitcode
if __name__ == "__main__":
options = parse_args()
exitcode = 0
try:
timezones.init()
exitcode = main(options)
except KeyboardInterrupt:
exitcode = 1
print_e("***aborted***")
except Exception as e:
exitcode = 1
traceback.print_exc()
finally:
sys.exit(cleanup(exitcode))
| gpl-3.0 | -4,886,149,893,731,973,000 | 29.777108 | 79 | 0.534743 | false |
uyaly/test | testcase/0_add1/Case05_HZ_add.py | 1 | 2884 | # coding:utf-8
import time
import unittest
import ddt
from pageobject.account.Page_Account import Page_Account
from selenium import webdriver
from pageobject.Page_Login import Page_Login
from pageobject.account.Page_Account_HZ_ADD import Page_Account_HZ_ADD
from utils.config import Config
from utils.log1 import Log
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
log = Log()
@ddt.ddt
class addHZ(unittest.TestCase):
'''总监登录,新增会长'''
@classmethod
def setUpClass(self):
self.url = Config().get('URL')
self.driver = webdriver.Firefox()
self.l = Page_Login(self.driver) # login参数是LoginPage的实例
self.A = Page_Account(self.driver)
self.A_HZ_ADD = Page_Account_HZ_ADD(self.driver)
self.l.open(self.url)
def test01_login(self):
'''总监登录'''
self.username = Config().get('CEO_LOGINNAME')
self.psw = Config().get('PASSWORD')
self.l.login(self.username, self.psw)
# 判断是否登录成功
self.assertTrue(self.l.is_text_in_element(self.A.loginout_loc, "退出", "-------总监登录 失败-------"))
log.info("-------总监登录 用例结束-------")
def test02_add(self):
'''新增会长'''
self.username = Config().get('HZ_LOGINNAME')
self.psw = Config().get('PASSWORD')
self.loginid = Config().get('HZ_NAME')
self.phone = Config().get('PHONE')
# 进入模块
self.A.IntoModule("帐号2会长2")
# 点击新增按钮
i = self.driver.find_element_by_id("mainIframe")
self.driver.switch_to.frame(i)
self.A.add()
self.driver.switch_to.default_content()
# 新增界面
time.sleep(2)
self.A_HZ_ADD.input_club(self.username)
time.sleep(3)
# 滚动到底部
self.driver.execute_script("$('#form>div')[0].scrollTop=500")
time.sleep(3)
self.A_HZ_ADD.input_loginid(self.loginid)
time.sleep(2)
self.A_HZ_ADD.input_psw(self.psw)
time.sleep(2)
self.A_HZ_ADD.input_psw1(self.psw)
time.sleep(2)
self.A_HZ_ADD.input_name(self.username)
time.sleep(2)
self.A_HZ_ADD.input_phone(self.phone)
time.sleep(2)
self.A_HZ_ADD.click_save()
time.sleep(2)
# 判断是否新建成功
self.assertTrue(self.l.is_text_in_element(self.A.alert_text, "新增成功", str(self.l.get_text(self.A.alert_text))))
# 确定
self.A_HZ_ADD.click_ok()
log.info('-------新增会长 用例结束-------')
@classmethod
def tearDownClass(self):
# 关闭浏览器
self.driver.quit()
# 执行测试主函数
if __name__ == '__main__':
# 执行main全局方法,将会执行上述所有以test开头的测试方法
unittest.main(verbosity=2) | gpl-2.0 | 6,771,792,635,812,235,000 | 30.321429 | 118 | 0.597719 | false |
aldryn/aldryn-news | setup.py | 1 | 1204 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from aldryn_news import __version__
REQUIREMENTS = [
'django-filer',
'django-hvad',
'django_select2',
# last version that supports django 1.5
'django-taggit<=0.18.1',
'djangocms-text-ckeditor',
'translitcodec',
'Unidecode',
]
CLASSIFIERS = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
]
setup(
name='aldryn-news',
version=__version__,
description='Publish news in django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/aldryn/aldryn-news',
packages=find_packages(),
license='LICENSE.txt',
platforms=['OS Independent'],
install_requires=REQUIREMENTS,
classifiers=CLASSIFIERS,
include_package_data=True,
zip_safe=False
)
| bsd-3-clause | -8,503,469,181,565,606,000 | 27 | 75 | 0.649502 | false |
KittyHawkIrc/modules | reddit.py | 1 | 3560 | import json, random, urllib2
#Update schema
__url__ = "https://raw.githubusercontent.com/KittyHawkIrc/modules/production/" + __name__ + ".py"
__version__ = 1.0
def declare():
return {"reddit": "privmsg", "guess": "privmsg"}
def callback(self):
channel = self.channel
command = self.command
user = self.user
msg = self.message
type = self.type
isop = self.isop
if command == 'guess':
u = 'FitOrFat'
else:
try:
u = str(msg.split(' ', 1)[1])
except:
return self.msg(channel, "Please specify a subreddit!")
try:
req = urllib2.Request("https://www.reddit.com/r/" + u + "/new.json", headers={ 'User-Agent': 'UNIX:the_kgb:reddit https://github.com/stqism/THE_KGB-apps' })
fd = urllib2.urlopen(req)
reddit_api = json.loads(fd.read())
fd.close()
cringe = []
for i in reddit_api['data']['children']:
url = i['data']['url']
title = i['data']['title']
selfpost = bool(i['data']['is_self'])
post = "https://reddit.com" + i['data']['permalink']
if 'imgur' in url:
if 'http://i.imgur.com' in url: #force https
url = 'https://i.imgur.com/%s' % (url.split('/')[3])
if 'http://' in url and '/a/' not in url: #direct URLs
if 'gallery' in url:
url = 'https://i.imgur.com/%s.jpg' % (url.split('/')[4])
else:
url = 'https://i.imgur.com/%s.jpg' % (url.split('/')[3])
cringe.append([title, url, post])
item = random.choice(cringe)
if command == 'guess':
try:
u = str(msg.split(' ', 1)[1])
return self.msg(channel, u + ": Am I fit or fat? " + item[1])
except:
return self.msg(channel, "Am I fit or fat? " + item[1])
else:
if not selfpost:
via = " (via: " + item[2] + ")"
return self.msg(channel, str(item[0] + " " + item[1] + via))
else:
return self.msg(channel, str(item[0] + " " + item[1]))
except Exception, e:
return self.msg('#the_kgb', str(e))
class api:
def msg(self, channel, text):
return "[%s] %s" % (channel, text)
if __name__ == "__main__":
api = api()
c = "#test"
setattr(api, 'isop', True)
setattr(api, 'type', 'privmsg')
setattr(api, 'command', 'reddit')
setattr(api, 'user', 'joe!username@hostmask')
setattr(api, 'channel', c)
setattr(api, 'message', '^reddit')
if callback(api) != '[%s] Please specify a subreddit!' % (c):
print '[TESTFAIL] no arguments'
exit(1)
setattr(api, 'message', '^reddit fatpeoplehate')
if callback(api) != '[#the_kgb] HTTP Error 404: Not Found':
print '[TESTFAIL] error catcher'
exit(1)
setattr(api, 'message', '^reddit fatlogic')
if not callback(api).startswith('[%s] ' % (c)):
print '[TESTFAIL] Subreddit loader'
exit(1)
setattr(api, 'message', '^guess')
setattr(api, 'command', 'guess')
if not callback(api).startswith('[%s] Am I fit or fat?' % (c)):
print '[TESTFAIL] guess no user'
print '[%s] Am I male or female?' % (c)
exit(1)
n = 'bob'
setattr(api, 'message', '^guess %s' % (n))
if not callback(api).startswith('[%s] %s: Am I fit or fat?' % (c, n)):
print '[TESTFAIL] guess with user'
exit(1)
| apache-2.0 | -6,751,224,005,319,157,000 | 29.689655 | 164 | 0.504494 | false |
evilsephiroth/plugin.video.vvvvid | vvvvid.py | 1 | 1369 | import urllib2
def f(m):
l = list()
o = 0
b = None
while not b and o < len(m):
n = m[o] <<2
o +=1
k = -1
j = -1
if o < len(m):
n += m[o] >> 4
o += 1
if o < len(m):
k = (m[o - 1] << 4) & 255;
k += m[o] >> 2;
o += 1
if o < len(m):
j = (m[o - 1] << 6) & 255;
j += m[o]
o += 1
else:
b = True
else:
b = True
else:
b = True
l.append(n)
if k != -1:
l.append(k)
if j != -1:
l.append(j)
return l
def dec_ei(h):
g = 'MNOPIJKL89+/4567UVWXQRSTEFGHABCDcdefYZabstuvopqr0123wxyzklmnghij'
c = list()
for e in range(0,len(h)):
c.append(g.find(h[e]))
for e in range(len(c)*2-1,-1,-1):
#print 'e=' + str(e)
a = c[e % len(c)] ^ c[(e+1)%len(c)]
#print 'a='+str(a)
c[e%len(c)] = a
#print 'c['+str(e % len(c))+']='+ str(c[e % len(c)])
c = f(c)
d = ''
for e in range(0,len(c)):
d += '%'+ (('0'+ (str(format(c[e],'x'))))[-2:])
return urllib2.unquote(d) | gpl-2.0 | -2,904,059,955,838,083,000 | 22.482143 | 74 | 0.308985 | false |
yola/yolapy | docs/conf.py | 1 | 9271 | # -*- coding: utf-8 -*-
#
# Yolapy documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 27 12:47:53 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from yolapy import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Yolapy'
copyright = u'2015, Yola'
author = u'Yola'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yolapydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Yolapy.tex', u'Yolapy Documentation',
u'Yola', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [
# (master_doc, 'yolapy', u'Yolapy Documentation',
# [author], 1)
#]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
#texinfo_documents = [
# (master_doc, 'Yolapy', u'Yolapy Documentation',
# author, 'Yolapy', 'One line description of project.',
# 'Miscellaneous'),
#]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 7,168,053,449,254,915,000 | 30.968966 | 79 | 0.70672 | false |
shree-shubham/Unitype | Coupling Passions.py | 1 | 1941 | import math
# Enter your code here. Read input from STDIN. Print output to STDOUT
def distance_between(point1, point2):
EARTH_RADIUS = 6371
point1_lat_in_radians = math.radians( point1['latitude'] )
point2_lat_in_radians = math.radians( point2['latitude'] )
point1_long_in_radians = math.radians( point1['longitude'] )
point2_long_in_radians = math.radians( point2['longitude'] )
return math.acos( math.sin( point1_lat_in_radians ) * math.sin( point2_lat_in_radians ) +
math.cos( point1_lat_in_radians ) * math.cos( point2_lat_in_radians ) *
math.cos( point2_long_in_radians - point1_long_in_radians) ) * EARTH_RADIUS
m = int(raw_input())
people = {}
locations = {}
interests = {}
for p in xrange(m):
s = raw_input().split(' ')
people[p] = s[1:]
for i in s[1:]:
interests[i] = 1
z = int(raw_input())
for l in xrange(z):
s = raw_input().split(' ')
locations[s[0]] = {'latitude': float(s[1]), 'longitude': float(s[2])}
locations[s[0]]['passions'] = set()
for ll in xrange(4, 4 + int(s[3])):
locations[s[0]]['passions'].add(s[ll])
res = []
for l in locations:
interest_set = set()
for i in interests:
if i in locations[l]['passions']:
interest_set.add(i)
res += [[l, interest_set]]
commons = 0
commons_list = []
for i in xrange(len(res)):
for j in xrange(i+1, len(res)):
temp = len(res[i][1] | res[j][1])
if temp >= commons:
commons = temp
if res[i][0] < res[j][0]:
commons_list += [[res[i][0], res[j][0], commons, distance_between(locations[res[i][0]], locations[res[j][0]])]]
else:
commons_list += [[res[j][0], res[i][0], commons, distance_between(locations[res[i][0]], locations[res[j][0]])]]
commons_list = sorted(commons_list, key = lambda x : (-x[2], x[3]))
print commons_list[0][0] + ' ' + commons_list[0][1]
| gpl-3.0 | -4,238,094,887,164,742,700 | 38.612245 | 127 | 0.578568 | false |
lfairchild/PmagPy | dialogs/pmag_er_magic_dialogs.py | 1 | 52576 | """
dialogs for ErMagicBuilder
"""
# pylint: disable=W0612,C0111,C0103,W0201,C0301
import os
import wx
import wx.grid
import numpy as np
from . import drop_down_menus2 as drop_down_menus
from . import pmag_widgets as pw
from . import magic_grid2 as magic_grid
from . import grid_frame2
from . import grid_frame3
from pmagpy import find_pmag_dir
from pmagpy import contribution_builder as cb
class ErMagicCheckFrame3(wx.Frame):
def __init__(self, parent, title, WD, contribution):
wx.Frame.__init__(self, parent, -1, title)
self.WD = WD
self.main_frame = self.Parent
self.contribution = contribution
self.temp_data = {}
self.grid = None
self.deleteRowButton = None
self.selected_rows = set()
self.min_size = (1160, 350)
self.contribution.propagate_ages()
# re-do the 'quit' binding so that it only closes the current window
self.main_frame.Bind(wx.EVT_MENU, lambda event: self.main_frame.menubar.on_quit(event, self), self.main_frame.menubar.file_quit)
self.InitSpecCheck()
def InitSpecCheck(self):
"""
make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to
"""
#wait = wx.BusyInfo("Please wait, working...")
#wx.SafeYield()
self.contribution.propagate_lithology_cols()
spec_df = self.contribution.tables['specimens'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'specimens', 'specimens', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitSampCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.backButton.Disable()
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def InitSampCheck(self):
"""
make an interactive grid in which users can edit sample names
as well as which site a sample belongs to
"""
# propagate any type/lithology/class data from sites to samples table
# will only overwrite if sample values are blank
self.contribution.propagate_lithology_cols()
samp_df = self.contribution.tables['samples'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'samples', 'samples', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
next_dia = self.InitSiteCheck
prev_dia = self.InitSpecCheck
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, next_dia),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, prev_dia),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def InitSiteCheck(self):
"""
make an interactive grid in which users can edit site names
as well as which location a site belongs to
"""
# propagate average lat/lon info from samples table if
# available in samples and missing in sites
self.contribution.propagate_average_up(cols=['lat', 'lon', 'height'],
target_df_name='sites',
source_df_name='samples')
# propagate lithology columns
self.contribution.propagate_lithology_cols()
site_df = self.contribution.tables['sites'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'sites', 'sites', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitLocCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSampCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def InitLocCheck(self):
"""
make an interactive grid in which users can edit locations
"""
# if there is a location without a name, name it 'unknown'
self.contribution.rename_item('locations', 'nan', 'unknown')
# propagate lat/lon values from sites table
self.contribution.get_min_max_lat_lon()
# propagate lithologies & geologic classes from sites table
self.contribution.propagate_cols_up(['lithologies',
'geologic_classes'], 'locations', 'sites')
res = self.contribution.propagate_min_max_up()
if cb.not_null(res):
self.contribution.propagate_cols_up(['age_unit'], 'locations', 'sites')
# set up frame
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'locations', 'locations', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitAgeCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSiteCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, min_size=self.min_size)
# center
self.grid_frame.Centre()
return
def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
age_df = self.contribution.tables['ages'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'ages', 'ages', self.panel,
main_frame=self.main_frame)
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON, lambda event: self.onContinue(event, grid, None),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitLocCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def on_close_grid_frame(self, event=None):
# required placeholder
pass
def onContinue(self, event, grid, next_dia=None):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.grid_frame.drop_down_menu:
self.grid_frame.drop_down_menu.clean_up()
# remove '**' and '^^' from col names
#self.remove_starred_labels(grid)
grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid_name = str(grid.GetName())
# save all changes to data object and write to file
self.grid_frame.grid_builder.save_grid_data()
# check that all required data are present
validation_errors = self.validate(grid)
if validation_errors:
warn_string = ""
for error_name, error_cols in list(validation_errors.items()):
if error_cols:
warn_string += "You have {}: {}.\n\n".format(error_name, ", ".join(error_cols))
warn_string += "Are you sure you want to continue?"
result = pw.warning_with_override(warn_string)
if result == wx.ID_YES:
pass
else:
return False
else:
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
self.panel.Destroy()
if next_dia:
next_dia()
else:
# propagate any type/lithology/class data from sites to samples table
# will only overwrite if sample values are blank or "Not Specified"
self.contribution.propagate_lithology_cols()
wx.MessageBox('Done!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
def onbackButton(self, event=None, prev_dia=None):
if prev_dia:
alert = True if self.grid_frame.grid.changes else False
self.grid_frame.onSave(event=None, alert=alert, destroy=True)
#if self.grid_frame.grid.name == 'samples':
# self.sample_window -= 2
self.panel.Destroy()
prev_dia()
def validate(self, grid):
"""
Using the MagIC data model, generate validation errors on a MagicGrid.
Parameters
----------
grid : dialogs.magic_grid3.MagicGrid
The MagicGrid to be validated
Returns
---------
warnings: dict
Empty dict if no warnings, otherwise a dict with format {name of problem: [problem_columns]}
"""
grid_name = str(grid.GetName())
dmodel = self.contribution.dmodel
reqd_headers = dmodel.get_reqd_headers(grid_name)
df = self.contribution.tables[grid_name].df
df = df.replace('', np.nan) # python does not view empty strings as null
if df.empty:
return {}
col_names = set(df.columns)
missing_headers = set(reqd_headers) - col_names
present_headers = set(reqd_headers) - set(missing_headers)
non_null_headers = df.dropna(how='all', axis='columns').columns
null_reqd_headers = present_headers - set(non_null_headers)
if any(missing_headers) or any (null_reqd_headers):
warnings = {'missing required column(s)': sorted(missing_headers),
'no data in required column(s)': sorted(null_reqd_headers)}
else:
warnings = {}
return warnings
def on_saveButton(self, event, grid):
"""saves any editing of the grid but does not continue to the next window"""
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if self.grid_frame.drop_down_menu: # unhighlight selected columns, etc.
self.grid_frame.drop_down_menu.clean_up()
# remove '**' and '^^' from col labels
starred_cols, hatted_cols = grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid.HideCellEditControl() # removes focus from cell that was being edited
if grid.changes:
self.onSave(grid)
for col in starred_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '**')
for col in hatted_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '^^')
del wait
def on_backButton(self, event, previous_dia, current_dia=None):
# save first?
if self.grid.changes:
result = pw.warning_with_override("You have unsaved data which will be lost. Are you sure you want to go back?")
if result == wx.ID_NO:
return
# go back to previous grid
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if current_dia == self.InitLocCheck:
pass
#elif previous_dia == self.InitSpecCheck or previous_dia == self.InitSampCheck:
# self.sample_window = 0
self.panel.Destroy()
previous_dia()
del wait
### Manage data methods ###
def onSave(self, grid):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.grid_frame.drop_down_menu:
self.grid_frame.drop_down_menu.clean_up()
# save all changes to data object and write to file
self.grid_builder.save_grid_data()
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
class ErMagicCheckFrame(wx.Frame):
def __init__(self, parent, title, WD, magic_data): # magic_data was ErMagic
wx.Frame.__init__(self, parent, -1, title)
self.WD = WD
self.main_frame = self.Parent
self.er_magic_data = magic_data
self.er_magic_data.no_pmag_data = set(['specimen', 'sample', 'site', 'location'])
self.temp_data = {}
self.drop_down_menu = None
# sample window must be displayed (differently) twice, so it is useful to keep track
self.sample_window = 0
self.grid = None
self.deleteRowButton = None
self.selected_rows = set()
self.InitSpecCheck()
def InitSpecCheck(self):
"""make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
#import wx.lib.scrolledpanel as libpanel # does not work well
#self.panel = libpanel.ScrolledPanel(self, style=wx.SIMPLE_BORDER)
text = """Step 1:
Check that all specimens belong to the correct sample
(if sample name is simply wrong, that will be fixed in step 2)"""
label = wx.StaticText(self.panel, label=text)
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'specimen',
self.er_magic_data.headers, self.panel,
'sample')
self.spec_grid = self.grid_builder.make_grid(incl_pmag=False)
self.grid = self.spec_grid
self.spec_grid.InitUI()
self.grid_builder.add_data_to_grid(self.spec_grid, 'specimen', incl_pmag=False)
samples = self.er_magic_data.make_name_list(self.er_magic_data.samples)
self.drop_down_menu = drop_down_menus.Menus("specimen", self, self.spec_grid, samples)
#### Create Buttons ####
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addSampleButton = wx.Button(self.panel, label="Add a new sample")
self.samples = [name for name in self.er_magic_data.samples]
self.Bind(wx.EVT_BUTTON, self.on_addSampleButton, self.addSampleButton)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicSpecimenHelp.html"), self.helpButton)
hbox_one.Add(self.addSampleButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hbox_one.Add(self.helpButton)
#
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.spec_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.spec_grid, next_dia=self.InitSampCheck), self.continueButton)
hboxok.Add(self.saveButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.ALIGN_LEFT)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'specimen', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Create Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(10)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=10)
vbox.Add(hbox_one, flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.spec_grid, flag=wx.ALL, border=10)#|wx.EXPAND, border=30)
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show()
def InitSampCheck(self):
"""make an interactive grid in which users can edit sample names
as well as which site a sample belongs to"""
self.sample_window += 1
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
if self.sample_window == 1:
text = """Step 2:
Check that all samples are correctly named,
and that they belong to the correct site
(if site name is simply wrong, that will be fixed in step 3)"""
step_label = wx.StaticText(self.panel, label=text)#, size=(900, 100))
else:
text = """Step 4:
Some of the data from the er_sites table has propogated into er_samples.
Check that these data are correct, and fill in missing cells using controlled vocabularies.
The columns for class, lithology, and type can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(see Help button for more details)\n\n** Denotes controlled vocabulary"""
step_label = wx.StaticText(self.panel, label=text)#, size=(900, 100))
if self.sample_window == 1:
# provide no extra headers
headers = {'sample': {'er': [[], [], []],
'pmag': [[], [], []]}}
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'sample',
headers, self.panel,
'site')
if self.sample_window > 1:
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'sample',
self.er_magic_data.headers, self.panel,
'site')
self.samp_grid = self.grid_builder.make_grid(incl_pmag=False)
self.samp_grid.InitUI()
self.grid_builder.add_data_to_grid(self.samp_grid, 'sample', incl_pmag=False)
self.grid = self.samp_grid
sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites))
self.drop_down_menu = drop_down_menus.Menus("sample", self, self.samp_grid, sites) # initialize all needed drop-down menus
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addSiteButton = wx.Button(self.panel, label="Add a new site")
self.Bind(wx.EVT_BUTTON, self.on_addSiteButton, self.addSiteButton)
hbox_one.Add(self.addSiteButton, flag=wx.RIGHT, border=10)
if self.sample_window == 1:
html_help = "ErMagicSampleHelp1.html"
if self.sample_window > 1:
html_help = "ErMagicSampleHelp.html"
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, html_help), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.samp_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
next_dia = self.InitSiteCheck if self.sample_window < 2 else self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.samp_grid, next_dia=next_dia), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSpecCheck if self.sample_window < 2 else self.InitSiteCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia=previous_dia), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'sample', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(step_label, flag=wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.samp_grid, flag=wx.ALL, border=10) # using wx.EXPAND or not does not affect re-size problem
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
## this combination may prevent a display error that (without the fix) only resolves on manually resizing the window
self.panel.Refresh()
self.samp_grid.ForceRefresh()
self.panel.Refresh()
self.Refresh()
# this prevents display errors
self.Hide()
self.Show()
#self.Fit() # this make it worse!
#self.Layout() # doesn't fix display resize error
#self.panel.Layout() # doesn't fix display resize error
#self.main_frame.Layout()# doesn't fix display resize error
def InitSiteCheck(self):
"""make an interactive grid in which users can edit site names
as well as which location a site belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 3:
Check that all sites are correctly named, and that they belong to the correct location.
Fill in the additional columns with controlled vocabularies.
The columns for class, lithology, and type can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(see the help button for more details)
note: Changes to site_class, site_lithology, or site_type will overwrite er_samples.txt
However, you will be able to edit sample_class, sample_lithology, and sample_type in step 4
**Denotes controlled vocabulary"""
label = wx.StaticText(self.panel, label=text)
#self.Data_hierarchy = self.ErMagic.Data_hierarchy
self.sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites))
#for val in ['er_citation_names', 'er_location_name', 'er_site_name', 'site_class', 'site_lithology', 'site_type', 'site_definition', 'site_lat', 'site_lon']: #
# try:
# self.er_magic_data.headers['site']['er'][0].remove(val)
# except ValueError:
# pass
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'site',
self.er_magic_data.headers, self.panel,
'location')
self.site_grid = self.grid_builder.make_grid(incl_pmag=False)
self.site_grid.InitUI()
self.grid_builder.add_data_to_grid(self.site_grid, 'site', incl_pmag=False)
self.grid = self.site_grid
# populate site_definition as 's' by default if no value is provided (indicates that site is single, not composite)
rows = self.site_grid.GetNumberRows()
col = 6
for row in range(rows):
cell = self.site_grid.GetCellValue(row, col)
if not cell:
self.site_grid.SetCellValue(row, col, 's')
# initialize all needed drop-down menus
locations = sorted(self.er_magic_data.make_name_list(self.er_magic_data.locations))
self.drop_down_menu = drop_down_menus.Menus("site", self, self.site_grid, locations)
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addLocButton = wx.Button(self.panel, label="Add a new location")
self.Bind(wx.EVT_BUTTON, self.on_addLocButton, self.addLocButton)
hbox_one.Add(self.addLocButton, flag=wx.RIGHT, border=10)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicSiteHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.site_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.site_grid, next_dia=self.InitSampCheck), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSampCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia=previous_dia), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'site', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.BOTTOM|wx.TOP, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.site_grid, flag=wx.ALL|wx.EXPAND, border=10) # EXPAND ??
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
# this combination prevents a display error that (without the fix) only resolves on manually resizing the window
self.site_grid.ForceRefresh()
self.panel.Refresh()
self.Hide()
self.Show()
def InitLocCheck(self):
"""make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 5:
Check that locations are correctly named.
Fill in any blank cells using controlled vocabularies.
(See Help button for details)
** Denotes controlled vocabulary"""
label = wx.StaticText(self.panel, label=text)
#self.Data_hierarchy = self.ErMagic.Data_hierarchy
self.locations = self.er_magic_data.locations
#
if not self.er_magic_data.locations:
msg = "You have no data in er_locations, so we are skipping step 5.\n Note that location names must be entered at the measurements level,so you may need to re-import your data, or you can add a location in step 3"
dlg = wx.MessageDialog(None, caption="Message:", message=msg, style=wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.panel.Destroy()
self.InitAgeCheck()
return
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'location',
self.er_magic_data.headers, self.panel)
self.loc_grid = self.grid_builder.make_grid(incl_pmag=False)
self.loc_grid.InitUI()
self.grid_builder.add_data_to_grid(self.loc_grid, 'location', incl_pmag=False)
self.grid = self.loc_grid
# initialize all needed drop-down menus
self.drop_down_menu = drop_down_menus.Menus("location", self,
self.loc_grid, None)
# need to find max/min lat/lon here IF they were added in the previous grid
sites = self.er_magic_data.sites
location_lat_lon = self.er_magic_data.get_min_max_lat_lon(self.er_magic_data.locations)
col_names = ('location_begin_lat', 'location_end_lat', 'location_begin_lon', 'location_end_lon')
col_inds = [self.grid.col_labels.index(name) for name in col_names]
col_info = list(zip(col_names, col_inds))
for loc in self.er_magic_data.locations:
row_ind = self.grid.row_labels.index(loc.name)
for col_name, col_ind in col_info:
info = location_lat_lon[loc.name][col_name]
self.grid.SetCellValue(row_ind, col_ind, str(info))
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicLocationHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.loc_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.loc_grid, next_dia=self.InitAgeCheck), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSampCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia, current_dia=self.InitLocCheck), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'location', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(self.loc_grid, flag=wx.TOP|wx.BOTTOM, border=10)
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show()
def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 6:
Fill in or correct any cells with information about ages.
The column for magic_method_codes can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(See Help button for details)
**Denotes controlled vocabulary """
label = wx.StaticText(self.panel, label=text)
self.items = self.er_magic_data.data_lists[self.er_magic_data.age_type][0]
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'age',
self.er_magic_data.headers, self.panel, 'location')
self.age_grid = self.grid_builder.make_grid(incl_pmag=False)
self.age_grid.InitUI()
self.grid_builder.add_data_to_grid(self.age_grid, 'age', incl_pmag=False)
self.grid_builder.add_age_data_to_grid()
self.grid = self.age_grid
#
# make it impossible to edit the 1st and 3rd columns
for row in range(self.age_grid.GetNumberRows()):
for col in (0, 2):
self.age_grid.SetReadOnly(row, col, True)
# initialize all needed drop-down menus
self.drop_down_menu = drop_down_menus.Menus("age", self, self.age_grid, None)
# re-set first column name
self.age_grid.SetColLabelValue(0, 'er_site_name')
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicAgeHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.age_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.age_grid, next_dia=None), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia), self.backButton)
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=20)#, flag=wx.ALIGN_LEFT|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM, border=10)
vbox.Add(self.age_grid, flag=wx.TOP|wx.BOTTOM, border=10) # EXPAND ??
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show()
### Grid methods ###
def make_simple_table(self, column_labels, data_dict, grid_name):
row_labels = sorted(data_dict.keys())
if len(row_labels) in range(1, 4):
num_rows = len(row_labels)
height = {1: 70, 2: 90, 3: 110, 4: 130}
grid = magic_grid.MagicGrid(self.panel, grid_name, row_labels, column_labels, (-1, height[num_rows])) # autosizes width, but enforces fixed pxl height to prevent display problems
else:
grid = magic_grid.MagicGrid(self.panel, grid_name, row_labels, column_labels)
data = grid.InitUI()
if grid_name == 'ages':
temp_data_key = 'ages'
else:
temp_data_key = column_labels[0]
self.temp_data[temp_data_key] = data
grid.add_data(data_dict)
grid.size_grid()
grid.do_event_bindings()
return grid
def onMouseOver(self, event, grid):
"""
Displays a tooltip over any cell in a certain column
"""
x, y = grid.CalcUnscrolledPosition(event.GetX(), event.GetY())
coords = grid.XYToCell(x, y)
col = coords[1]
row = coords[0]
# creates tooltip message for cells with long values
# note: this works with EPD for windows, and modern wxPython, but not with Canopy Python
msg = grid.GetCellValue(row, col)
if len(msg) > 15:
event.GetEventObject().SetToolTipString(msg)
else:
event.GetEventObject().SetToolTipString('')
def validate(self, grid):
validations = ['er_specimen_name', 'er_sample_name', 'er_site_name', 'er_location_name', 'site_class', 'site_lithology', 'site_type', 'site_definition', 'site_lon', 'site_lat', 'sample_class', 'sample_lithology', 'sample_type', 'sample_lat', 'sample_lon', 'location_type', 'age_unit', 'age']#, 'magic_method_codes']
cols = list(range(grid.GetNumberCols()))
rows = list(range(grid.GetNumberRows()))
data_missing = []
for col in cols:
col_label = str(grid.GetColLabelValue(col))
if col_label in validations:
for row in rows:
value = grid.GetCellValue(row, col)
if not value:
data_missing.append(col_label)
break
return data_missing
### Button methods ###
def on_addSampleButton(self, event):
def add_sample(sample, site):
add_sample_data(sample, site)
sites = self.er_magic_data.make_name_list(self.er_magic_data.sites)
pw.AddItem(self, 'Sample', add_sample, owner_items=sites, belongs_to='site') # makes window for adding new data
def add_sample_data(sample, site):
# add sample
self.er_magic_data.add_sample(sample, site)
# re-Bind so that the updated samples list shows up on a left click
samples = sorted(self.er_magic_data.make_name_list(self.er_magic_data.samples))
choices = self.drop_down_menu.choices
choices[1] = (samples, False)
self.drop_down_menu.update_drop_down_menu(self.spec_grid, choices)
def on_addSiteButton(self, event):
def add_site(site, location):
add_site_data(site, location)
locations = self.er_magic_data.make_name_list(self.er_magic_data.locations)
pw.AddItem(self, 'Site', add_site, locations, 'location')
def add_site_data(site, location):
# add site
self.er_magic_data.add_site(site, location)
# re-Bind so that the updated sites list shows up on a left click
sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites))
self.drop_down_menu.update_drop_down_menu(self.samp_grid, {1: (sites, False)})
def on_addLocButton(self, event):
def add_loc(loc, parent=None):
add_loc_data(loc)
#def __init__(self, parent, title, data_items, data_method):
if not self.er_magic_data.locations:
pass
pw.AddItem(self, 'Location', add_loc, owner_items=None, belongs_to=None) # makes window for adding new data
def add_loc_data(loc):
# add location
self.er_magic_data.add_location(loc)
# re-Bind so that the updated locations list shows up on a left click
locations = self.er_magic_data.make_name_list(self.er_magic_data.locations)
choices = self.drop_down_menu.choices
choices[1] = (locations, False)
self.drop_down_menu.update_drop_down_menu(self.site_grid, choices)
def on_helpButton(self, event, page=None):
"""shows html help page"""
# for use on the command line:
path = find_pmag_dir.get_pmag_dir()
# for use with pyinstaller
#path = self.main_frame.resource_dir
help_page = os.path.join(path, 'dialogs', 'help_files', page)
# if using with py2app, the directory structure is flat,
# so check to see where the resource actually is
if not os.path.exists(help_page):
help_page = os.path.join(path, 'help_files', page)
html_frame = pw.HtmlFrame(self, page=help_page)
html_frame.Show()
def on_continueButton(self, event, grid, next_dia=None):
"""
pulls up next dialog, if there is one.
gets any updated information from the current grid and runs ErMagicBuilder
"""
#wait = wx.BusyInfo("Please wait, working...")
# unhighlight selected columns, etc.
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# remove '**' from col names
#self.remove_starred_labels(grid)
grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid_name = str(grid.GetName())
# check that all required data are present
validation_errors = self.validate(grid)
if validation_errors:
result = pw.warning_with_override("You are missing required data in these columns: {}\nAre you sure you want to continue without these data?".format(', '.join(validation_errors)))
if result == wx.ID_YES:
pass
else:
return False
if grid.changes:
self.onSave(grid)
self.deleteRowButton = None
#self.panel.Destroy() # calling Destroy here breaks with Anaconda Python (segfault)
# make sure that specimens get propagated with
# any default sample info
if next_dia == self.InitLocCheck:
if self.er_magic_data.specimens:
for spec in self.er_magic_data.specimens:
spec.propagate_data()
if next_dia:
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
wx.CallAfter(self.panel.Destroy) # no segfault here!
next_dia()
# need to wait to process the resize:
event = wx.PyCommandEvent(wx.EVT_SIZE.typeId, self.GetId())
wx.CallAfter(self.GetEventHandler().ProcessEvent, event)
del wait
else:
wait = wx.BusyInfo("Please wait, writing data to files...")
wx.SafeYield()
# actually write data:
self.er_magic_data.write_files()
self.Destroy()
del wait
def on_saveButton(self, event, grid):
"""saves any editing of the grid but does not continue to the next window"""
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if self.drop_down_menu: # unhighlight selected columns, etc.
self.drop_down_menu.clean_up()
# remove '**' from col labels
starred_cols = grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid.HideCellEditControl() # removes focus from cell that was being edited
if grid.changes:
self.onSave(grid)
for col in starred_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '**')
del wait
def on_cancelButton(self, event):
dlg = pw.YesNoCancelDialog(self, "Your changes so far have not been written to file.\nSave changes?", "Not so fast")
res = dlg.ShowModal()
dlg.Destroy()
if res == wx.ID_YES:
self.onSave(self.grid)
self.er_magic_data.write_files()
self.Destroy()
if res == wx.ID_NO:
self.Destroy()
if res == wx.ID_CANCEL:
pass
def on_backButton(self, event, previous_dia, current_dia=None):
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if current_dia == self.InitLocCheck:
pass
elif previous_dia == self.InitSpecCheck or previous_dia == self.InitSampCheck:
self.sample_window = 0
self.panel.Destroy()
previous_dia()
del wait
def onDeleteRow(self, event, data_type):
"""
On button click, remove relevant object from both the data model and the grid.
"""
ancestry = self.er_magic_data.ancestry
child_type = ancestry[ancestry.index(data_type) - 1]
names = [self.grid.GetCellValue(row, 0) for row in self.selected_rows]
if data_type == 'site':
how_to_fix = 'Make sure to select a new site for each orphaned sample in the next step'
else:
how_to_fix = 'Go back a step and select a new {} for each orphaned {}'.format(data_type, child_type)
orphans = []
for name in names:
row = self.grid.row_labels.index(name)
orphan = self.er_magic_data.delete_methods[data_type](name)
if orphan:
orphans.extend(orphan)
self.grid.remove_row(row)
if orphans:
orphan_names = self.er_magic_data.make_name_list(orphans)
pw.simple_warning('You have deleted:\n\n {}\n\nthe parent(s) of {}(s):\n\n {}\n\n{}'.format(', '.join(names), child_type, ', '.join(orphan_names), how_to_fix))
self.selected_rows = set()
# update grid and data model
self.update_grid(self.grid)#, grids[grid_name])
self.grid.Refresh()
def onLeftClickLabel(self, event):
"""
When user clicks on a grid label, determine if it is a row label or a col label.
Pass along the event to the appropriate function.
(It will either highlight a column for editing all values, or highlight a row for deletion).
"""
if event.Col == -1 and event.Row == -1:
pass
elif event.Col < 0:
self.onSelectRow(event)
elif event.Row < 0:
self.drop_down_menu.on_label_click(event)
def onSelectRow(self, event):
"""
Highlight or unhighlight a row for possible deletion.
"""
grid = self.grid
row = event.Row
default = (255, 255, 255, 255)
highlight = (191, 216, 216, 255)
cell_color = grid.GetCellBackgroundColour(row, 0)
attr = wx.grid.GridCellAttr()
if cell_color == default:
attr.SetBackgroundColour(highlight)
self.selected_rows.add(row)
else:
attr.SetBackgroundColour(default)
try:
self.selected_rows.remove(row)
except KeyError:
pass
if self.selected_rows and self.deleteRowButton:
self.deleteRowButton.Enable()
else:
self.deleteRowButton.Disable()
grid.SetRowAttr(row, attr)
grid.Refresh()
### Manage data methods ###
def update_grid(self, grid):
"""
takes in wxPython grid and ErMagic data object to be updated
"""
data_methods = {'specimen': self.er_magic_data.change_specimen,
'sample': self.er_magic_data.change_sample,
'site': self.er_magic_data.change_site,
'location': self.er_magic_data.change_location,
'age': self.er_magic_data.change_age}
grid_name = str(grid.GetName())
cols = list(range(grid.GetNumberCols()))
col_labels = []
for col in cols:
col_labels.append(grid.GetColLabelValue(col))
for row in grid.changes: # go through changes and update data structures
if row == -1:
continue
else:
data_dict = {}
for num, label in enumerate(col_labels):
if label:
data_dict[str(label)] = str(grid.GetCellValue(row, num))
new_name = str(grid.GetCellValue(row, 0))
old_name = self.temp_data[grid_name][row]
data_methods[grid_name](new_name, old_name, data_dict)
grid.changes = False
def onSave(self, grid):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# save all changes to er_magic data object
self.grid_builder.save_grid_data()
# don't actually write data in this step (time-consuming)
# instead, write to files when user is done editing
#self.er_magic_data.write_files()
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
| bsd-3-clause | -1,870,059,521,684,590,800 | 42.059787 | 323 | 0.604953 | false |
kevinarpe/kevinarpe-rambutan3 | tests/check_args/other/test_RRangeSizeStr.py | 1 | 1887 | import pytest
from rambutan3.check_args.RCheckArgsError import RCheckArgsError
from rambutan3.check_args.other.RRangeSizeStr import RRangeSizeStr
from tests.check_args.collection import test_RRangeSizeMatcher
def test_ctor():
test_RRangeSizeMatcher.core_test_ctor(RRangeSizeStr)
def test_check_arg():
with pytest.raises(RCheckArgsError):
__check_arg([123], min_size=1)
with pytest.raises(RCheckArgsError):
__check_arg([123], max_size=1)
with pytest.raises(RCheckArgsError):
__check_arg([123], min_size=1, max_size=2)
with pytest.raises(RCheckArgsError):
__check_arg(None, min_size=1)
with pytest.raises(RCheckArgsError):
__check_arg(123, min_size=1)
__check_arg('abc', min_size=1)
with pytest.raises(RCheckArgsError):
__check_arg('abc', min_size=4)
__check_arg('abc', max_size=3)
with pytest.raises(RCheckArgsError):
__check_arg('abc', max_size=2)
with pytest.raises(RCheckArgsError):
__check_arg('', min_size=1, max_size=3)
__check_arg('a', min_size=1, max_size=3)
__check_arg('ab', min_size=1, max_size=3)
__check_arg('abc', min_size=1, max_size=3)
with pytest.raises(RCheckArgsError):
__check_arg('abcd', min_size=1, max_size=3)
def __check_arg(value, *, min_size: int=-1, max_size: int=-1):
m = RRangeSizeStr(min_size=min_size, max_size=max_size)
assert value is m.check_arg(value, 'dummy_arg_name')
def test__eq__and__ne__():
test_RRangeSizeMatcher.core_test__eq__and__ne__(RRangeSizeStr)
def test__hash__():
test_RRangeSizeMatcher.core_test__hash__(RRangeSizeStr)
def test__str__():
assert str(RRangeSizeStr(min_size=1)) == 'str where size >= 1'
assert str(RRangeSizeStr(max_size=1)) == 'str where size <= 1'
assert str(RRangeSizeStr(min_size=1, max_size=2)) == 'str where size >= 1 and size <= 2'
| gpl-3.0 | -8,604,642,178,105,160,000 | 28.484375 | 92 | 0.656598 | false |
nohona/cron-crm | usr/local/certbot/certbot/tests/errors_test.py | 4 | 1328 | """Tests for certbot.errors."""
import unittest
import mock
from acme import messages
from certbot import achallenges
from certbot.tests import acme_util
class FailedChallengesTest(unittest.TestCase):
"""Tests for certbot.errors.FailedChallenges."""
def setUp(self):
from certbot.errors import FailedChallenges
self.error = FailedChallenges(set([achallenges.DNS(
domain="example.com", challb=messages.ChallengeBody(
chall=acme_util.DNS01, uri=None,
error=messages.Error(typ="tls", detail="detail")))]))
def test_str(self):
self.assertTrue(str(self.error).startswith(
"Failed authorization procedure. example.com (dns-01): tls"))
class StandaloneBindErrorTest(unittest.TestCase):
"""Tests for certbot.errors.StandaloneBindError."""
def setUp(self):
from certbot.errors import StandaloneBindError
self.error = StandaloneBindError(mock.sentinel.error, 1234)
def test_instance_args(self):
self.assertEqual(mock.sentinel.error, self.error.socket_error)
self.assertEqual(1234, self.error.port)
def test_str(self):
self.assertTrue(str(self.error).startswith(
"Problem binding to port 1234: "))
if __name__ == "__main__":
unittest.main() # pragma: no cover
| gpl-3.0 | -5,918,238,343,923,517,000 | 29.181818 | 73 | 0.676958 | false |
project-hypr/hypr2 | tests/providers/crud/test_crud_crud.py | 1 | 12472 | # Copyright 2014-2016 Morgan Delahaye-Prat. All Rights Reserved.
#
# Licensed under the Simplified BSD License (the "License");
# you may not use this file except in compliance with the License.
"""Test basic CRUD operations of the CRUDProvider."""
import json
import pytest
from hypr.providers import CRUDProvider
def deserialize(data, model):
"""Deserialize JSON data."""
data = json.loads(data)
if 'content' in data and 'count' in data:
return data['count'], [model.load(r) for r in data['content']]
return model.load(data)
@pytest.fixture
def app(app, model):
"""All the tests are conducted with application/json as default mime."""
provider = type('IntestProvider', (CRUDProvider,), {'__model__': model})
app.add_provider(provider, '/test', '/test/<int:id>')
return app
class TestModelCreate:
"""Test create."""
models = 'SQLiteModel',
def test_create(self, app, model):
"""Create one resource."""
payload = json.dumps({'value': 'foo'})
with app.test_client() as client:
rv = client.post('/test', data=payload)
assert rv.status == 201
data = deserialize(rv.text, model)
assert data == model.one(data.id)
def test_bulk_create(self, app, model):
"""Create multiple resources at once."""
payload = json.dumps([
{'value': 'foo'},
{'value': 'bar'}
])
with app.test_client() as client:
rv = client.post('/test?_bulk=1', data=payload)
assert rv.status == 201
count, resources = deserialize(rv.text, model)
for resource in resources:
assert resource == model.one(resource.id)
@pytest.mark.populate(5)
class TestProviderRead:
"""Test read."""
models = 'SQLiteModel',
def test_get_collection(self, app, model):
"""Test."""
with app.test_client() as client:
rv = client.get('/test')
assert rv.status == 200
count, resources = deserialize(rv.text, model)
assert count == model.count() == 5
assert sorted(resources) == sorted(model.get())
def test_get_one(self, app, model):
"""Test."""
with app.test_client() as client:
rv = client.get('/test/1')
assert rv.status == 200
resource = deserialize(rv.text, model)
assert resource == model.one(1)
@pytest.mark.populate(5)
class TestModelUpdate:
"""Test update."""
models = 'SQLiteModel',
def test_update(self, app, model):
"""Update an instance with PATCH."""
ref = model.one(1)
payload = json.dumps({'value': 'test_ok'})
with app.test_client() as client:
rv = client.patch('/test/1', data=payload)
assert rv.status == 200
resource = deserialize(rv.text, model)
assert resource != ref
assert resource == model.one(1)
def test_update_alt(self, app, model):
"""Update an instance with PUT."""
ref = model.one(2)
payload = json.dumps({'value': 'test_ok'})
with app.test_client() as client:
rv = client.put('/test/2', data=payload)
assert rv.status == 200
resource = deserialize(rv.text, model)
assert resource != ref
assert resource == model.one(2)
def test_bulk_update(self, app, model):
"""Update multiple resources at once."""
ref = [model.one(3), model.one(4)]
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'value': 'test_ok1'}
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 200
count, data = deserialize(rv.text, model)
for instance in ref:
assert instance != model.one(instance.id)
for resource in data:
assert resource == model.one(resource.id)
@pytest.mark.populate(5)
class TestModelDelete:
"""Test delete."""
models = 'SQLiteModel',
def test_delete(self, app, model):
"""Delete a resource."""
with app.test_client() as client:
rv = client.delete('/test/1')
assert rv.status == 204
assert model.one(1) is None
def test_bulk_delete(self, app, model):
"""Delete multiple resources at once."""
ref = [model.one(3), model.one(4)]
payload = json.dumps([
{'id': 3},
{'id': 4}
])
with app.test_client() as client:
rv = client.delete('/test?_bulk=1', data=payload)
assert rv.status == 204
for instance in ref:
assert model.one(instance.id) is None
@pytest.mark.populate(5)
class TestMissingPayloadException:
"""Test requests with missing payload."""
models = 'SQLiteModel',
def test_create(self, app, model):
"""Create one resource."""
with app.test_client() as client:
rv = client.post('/test')
assert rv.status == 400
def test_bulk_create(self, app, model):
"""Create multiple resources at once."""
with app.test_client() as client:
rv = client.post('/test?_bulk=1')
assert rv.status == 400
def test_update(self, app, model):
"""Update an instance."""
with app.test_client() as client:
rv = client.patch('/test/1')
assert rv.status == 400
def test_bulk_update(self, app, model):
"""Update multiple resources at once."""
with app.test_client() as client:
rv = client.put('/test?_bulk=1')
assert rv.status == 400
def test_bulk_delete(self, app, model):
"""Delete multiple resources at once."""
with app.test_client() as client:
rv = client.delete('/test?_bulk=1')
assert rv.status == 400
@pytest.mark.populate(5)
class TestInvalidPayloadException:
"""Test requests with invalid payload."""
models = 'SQLiteModel',
def test_create(self, app):
"""Create one resource."""
payload = json.dumps({'invalid': 'property'})
with app.test_client() as client:
rv = client.post('/test', data=payload)
assert rv.status == 400
def test_update(self, app, model):
"""Update one resource."""
ref = model.one(1)
payload = json.dumps({'invalid': 'property'})
with app.test_client() as client:
rv = client.patch('/test/1', data=payload)
assert rv.status == 400
assert ref == model.one(1)
@pytest.mark.populate(5)
class TestInvalidBulkRequest:
"""Test invalid bulk requests."""
models = 'SQLiteModel',
def test_bulk_create_missing_flag(self, app, model):
"""A missing bulk flag returns an error 400."""
payload = json.dumps([
{'value': 'foo'},
{'value': 'bar'}
])
with app.test_client() as client:
rv = client.post('/test', data=payload)
assert rv.status == 400
assert model.count() == 5
def test_bulk_update_missing_flag(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'value': 'test_ok1'}
])
with app.test_client() as client:
rv = client.put('/test', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_missing_flag(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{'id': 4}
])
with app.test_client() as client:
rv = client.delete('/test', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_update_on_single_resource(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'value': 'test_ok1'}
])
with app.test_client() as client:
rv = client.put('/test/1?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_on_single_resource(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{'id': 4}
])
with app.test_client() as client:
rv = client.delete('/test/1?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_update_unknown_resource(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 100, 'value': 'test_ok1'} # unkwnown resource
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_unknown_resource(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{'id': 100} # unknwon resource
])
with app.test_client() as client:
rv = client.delete('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_create_invalid_property(self, app, model):
"""Create multiple resources at once."""
payload = json.dumps([
{'value': 'foo'},
{'invalid': 'property'}
])
with app.test_client() as client:
rv = client.post('/test?_bulk=1', data=payload)
assert rv.status == 400
assert model.count() == 5
def test_bulk_update_invalid_property(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'invalid': 'property'}
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_update_missing_id(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'value': 'test_ok1'} # missing id
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_missing_id(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{} # missing id
])
with app.test_client() as client:
rv = client.delete('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
class TestEmptySet:
"""Crud operations (except create) on an empty database."""
models = 'SQLiteModel',
def test_get_collection(self, app, model):
"""Get an empty set."""
with app.test_client() as client:
rv = client.get('/test')
assert rv.status == 200
count, resources = deserialize(rv.text, model)
assert count == 0
assert resources == []
def test_get_one(self, app, model):
"""Get an unknown resource."""
with app.test_client() as client:
rv = client.get('/test/1')
assert rv.status == 404
def test_update(self, app, model):
"""Update an unknown resource."""
payload = json.dumps({'value': 'test_ok'})
with app.test_client() as client:
rv = client.patch('/test/1', data=payload)
assert rv.status == 404
def test_delete(self, app, model):
"""Delete an unknown resource."""
with app.test_client() as client:
rv = client.delete('/test/1')
assert rv.status == 404
| bsd-2-clause | 4,588,177,312,490,519,600 | 28.980769 | 76 | 0.552357 | false |
sergei-maertens/django-systemjs | docs/_ext/djangodocs.py | 1 | 2159 | """
Taken from djangoproject/django docs.
Sphinx plugins for Django documentation.
"""
import re
from sphinx import addnodes
from sphinx.util.compat import Directive
from sphinx.writers.html import SmartyPantsHTMLTranslator
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.set_translator('djangohtml', DjangoHTMLTranslator)
return {'parallel_read_safe': True}
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
version_text = {
'versionchanged': 'Changed in %s',
'versionadded': 'Added in %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
| mit | 5,868,693,548,885,009,000 | 29.842857 | 83 | 0.6151 | false |
dmach/dnf | dnf/cli/commands/shell.py | 1 | 9173 | # shell.py
# Shell CLI command.
#
# Copyright (C) 2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from dnf.cli import commands
from dnf.i18n import _
import cmd
import copy
import dnf
import logging
import shlex
import sys
logger = logging.getLogger('dnf')
# only demands we'd like to override
class ShellDemandSheet(object):
available_repos = True
resolving = True
root_user = True
sack_activation = True
class ShellCommand(commands.Command, cmd.Cmd):
aliases = ('shell', 'sh')
summary = _('run an interactive DNF shell')
MAPPING = {'repo': 'repo',
'repository': 'repo',
'exit': 'quit',
'quit': 'quit',
'run': 'ts_run',
'ts': 'transaction',
'transaction': 'transaction',
'config': 'config',
'resolvedep': 'resolve',
'help': 'help'
}
def __init__(self, cli):
commands.Command.__init__(self, cli)
cmd.Cmd.__init__(self)
self.prompt = '> '
@staticmethod
def set_argparser(parser):
parser.add_argument('script', nargs='?', metavar=_('SCRIPT'),
help=_('Script to run in DNF shell'))
def configure(self):
# append to ShellDemandSheet missing demands from
# dnf.cli.demand.DemandSheet with their default values.
default_demands = self.cli.demands
self.cli.demands = ShellDemandSheet()
for attr in dir(default_demands):
if attr.startswith('__'):
continue
try:
getattr(self.cli.demands, attr)
except AttributeError:
setattr(self.cli.demands, attr, getattr(default_demands, attr))
def run(self):
if self.opts.script:
self._run_script(self.opts.script)
else:
self.cmdloop()
def _clean(self):
self.base._finalize_base()
self.base._transaction = None
self.base.fill_sack()
def onecmd(self, line):
if not line or line == '\n':
return
if line == 'EOF':
line = 'quit'
try:
s_line = shlex.split(line)
except:
self._help()
return
opts = self.cli.optparser.parse_main_args(s_line)
# Disable shell recursion.
if opts.command == 'shell':
return
if opts.command in self.MAPPING:
getattr(self, '_' + self.MAPPING[opts.command])(s_line[1::])
else:
cmd_cls = self.cli.cli_commands.get(opts.command)
if cmd_cls is not None:
cmd = cmd_cls(self.cli)
try:
opts = self.cli.optparser.parse_command_args(cmd, s_line)
cmd.cli.demands = copy.deepcopy(self.cli.demands)
cmd.configure()
cmd.run()
except dnf.exceptions.Error as e:
logger.error(_("Error:") + " " + e.value)
except:
return
else:
self._help()
def _config(self, args=None):
def print_or_set(key, val, conf):
if val:
setattr(conf, key, val)
else:
try:
print('{}: {}'.format(key, getattr(conf, str(key))))
except:
logger.warning(_('Unsupported key value.'))
if not args or len(args) > 2:
self._help('config')
return
key = args[0]
val = args[1] if len(args) == 2 else None
period = key.find('.')
if period != -1:
repo_name = key[:period]
key = key[period+1:]
repos = self.base.repos.get_matching(repo_name)
for repo in repos:
print_or_set(key, val, repo)
if not repos:
logger.warning(_('Could not find repository: %s'),
repo_name)
else:
print_or_set(key, val, self.base.conf)
def _help(self, args=None):
"""Output help information.
:param args: the command to output help information about. If
*args* is an empty, general help will be output.
"""
arg = args[0] if isinstance(args, list) and len(args) > 0 else args
msg = None
if arg:
if arg == 'config':
msg = _("""{} arg [value]
arg: debuglevel, errorlevel, obsoletes, gpgcheck, assumeyes, exclude,
repo_id.gpgcheck, repo_id.exclude
If no value is given it prints the current value.
If value is given it sets that value.""").format(arg)
elif arg == 'help':
msg = _("""{} [command]
print help""").format(arg)
elif arg in ['repo', 'repository']:
msg = _("""{} arg [option]
list: lists repositories and their status. option = [all | id | glob]
enable: enable repositories. option = repository id
disable: disable repositories. option = repository id""").format(arg)
elif arg == 'resolvedep':
msg = _("""{}
resolve the transaction set""").format(arg)
elif arg in ['transaction', 'ts']:
msg = _("""{} arg
list: lists the contents of the transaction
reset: reset (zero-out) the transaction
run: run the transaction""").format(arg)
elif arg == 'run':
msg = _("""{}
run the transaction""").format(arg)
elif arg in ['exit', 'quit']:
msg = _("""{}
exit the shell""").format(arg)
if not msg:
self.cli.optparser.print_help()
msg = _("""Shell specific arguments:
config set config options
help print help
repository (or repo) enable, disable or list repositories
resolvedep resolve the transaction set
transaction (or ts) list, reset or run the transaction set
run resolve and run the transaction set
exit (or quit) exit the shell""")
print('\n' + msg)
def _repo(self, args=None):
cmd = args[0] if args else None
if cmd in ['list', None]:
self.onecmd('repolist ' + ' '.join(args[1:]))
elif cmd in ['enable', 'disable']:
repos = self.cli.base.repos
fill_sack = False
for repo in args[1::]:
r = repos.get_matching(repo)
if r:
getattr(r, cmd)()
fill_sack = True
else:
logger.critical(_("Error:") + " " + _("Unknown repo: '%s'"),
self.base.output.term.bold(repo))
if fill_sack:
self.base.fill_sack()
else:
self._help('repo')
def _resolve(self, args=None):
if self.cli.base.transaction is None:
try:
self.cli.base.resolve(self.cli.demands.allow_erasing)
except dnf.exceptions.DepsolveError as e:
print(e)
def _run_script(self, file):
try:
with open(file, 'r') as fd:
lines = fd.readlines()
for line in lines:
if not line.startswith('#'):
self.onecmd(line)
except IOError:
logger.info(_('Error: Cannot open %s for reading'), self.base.output.term.bold(file))
sys.exit(1)
def _transaction(self, args=None):
cmd = args[0] if args else None
if cmd == 'reset':
self._clean()
return
self._resolve()
if cmd in ['list', None]:
if self.base._transaction:
out = self.base.output.list_transaction(self.base._transaction)
logger.info(out)
elif cmd == 'run':
try:
self.base.do_transaction()
except:
pass
self._clean()
else:
self._help('transaction')
def _ts_run(self, args=None):
self._transaction(['run'])
def _quit(self, args=None):
logger.info(_('Leaving Shell'))
sys.exit(0)
| gpl-2.0 | -4,418,039,998,933,164,500 | 31.299296 | 97 | 0.528181 | false |
matthew-brett/dmg-wheel-installer | make_installer.py | 1 | 6064 | #!/usr/bin/env python
""" Make dmg installer for Python.org Python from Python wheels """
from __future__ import division, print_function
DESCRIP = "Make dmg installer for Python.org Python from Python wheels"
EPILOG = \
"""Make DMG installer from wheels
* Collect source packages for pip, setuptools
* Collect needed wheels using "pip wheel" command
* Write directory to DMG containing source and wheel packages
* Write "postinstall" script to install setuptools, pip, then install wheels
* Write "postinstall" script in ".pkg" double click installer
* Package result into DMG file.
"""
import os
from os.path import exists, join as pjoin
import shutil
from subprocess import check_call
from argparse import ArgumentParser, RawDescriptionHelpFormatter
try:
from urllib2 import urlopen, URLError # Python 2
except ImportError:
from urllib.request import urlopen, URLError # Python 3
# Defaults
PYTHON_VERSION='2.7'
# Constants
# Installed location of Python.org Python
PY_ORG_BASE='/Library/Frameworks/Python.framework/Versions/'
# Path for directory that will become the dmg contents
DMG_DIR='dmg_root'
# Subdirectory containing wheels and source packages
PKG_DIR = 'packages'
# Package directory within dmg_directory
DMG_PKG_DIR = DMG_DIR + '/' + PKG_DIR
# get-pip.py URL
GET_PIP_URL = 'https://bootstrap.pypa.io/get-pip.py'
def rm_mk_dir(dirname):
if exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
def mkdirs():
[rm_mk_dir(pth) for pth in (
DMG_PKG_DIR,
'scripts',
'pkg_template')]
def get_pip_params(args):
params = '--no-index' if args.no_index else []
for link in args.find_links:
params.append('--find-links=' + link)
return params
def get_pippers(pip_params, get_pip_path=None):
pip_cmd = ['pip', 'install',
'--download', DMG_PKG_DIR,
'pip', 'setuptools'] + pip_params
check_call(pip_cmd)
if not get_pip_path is None:
shutil.copy2(get_pip_path, DMG_PKG_DIR)
return
url_obj = urlopen(GET_PIP_URL)
with open(DMG_PKG_DIR + '/get-pip.py', 'wt') as fobj:
fobj.write(url_obj.read())
def get_wheels(version, requirements, pip_params):
pip_exe = '{0}/{1}/bin/pip{1}'.format(PY_ORG_BASE, version, version)
if not exists(pip_exe):
raise RuntimeError('Need to install pip for python at ' +
'{0}/bin/python{1}'.format(PY_ORG_BASE, version))
# Install wheel locally just in case
check_call([pip_exe, 'install'] + pip_params + ['wheel'])
check_call([pip_exe, 'wheel', '-w', DMG_PKG_DIR] + pip_params +
list(requirements))
def write_post(py_version, requirements):
to_install = ', '.join(['"{0}"'.format(r) for r in requirements])
with open('scripts/postinstall', 'wt') as fobj:
fobj.write(
r"""#!/usr/bin/env python
# Install into Python.org python
import sys
import os
from os.path import exists, dirname
from subprocess import check_call
# Find disk image files
package_path = os.environ.get('PACKAGE_PATH')
if package_path is None:
sys.exit(10)
package_dir = dirname(package_path)
wheelhouse = package_dir + '/{pkg_dir}'
# Find Python.org Python
python_bin = '{py_org_base}/{py_version}/bin'
python_path = python_bin + '/python{py_version}'
if not exists(python_path):
sys.exit(20)
# Install pip
check_call([python_path, wheelhouse + '/get-pip.py', '-f', wheelhouse,
'--no-setuptools'])
# Find pip
expected_pip = python_bin + '/pip{py_version}'
if not exists(expected_pip):
sys.exit(30)
pip_cmd = [expected_pip, 'install', '--no-index', '--upgrade',
'--find-links', wheelhouse]
check_call(pip_cmd + ['setuptools'])
check_call(pip_cmd + [{to_install}])
""".format(py_org_base = PY_ORG_BASE,
py_version = py_version,
to_install = to_install,
pkg_dir = PKG_DIR,
))
check_call(['chmod', 'a+x', 'scripts/postinstall'])
def write_pkg(identifier, version):
pkg_fname = pjoin(DMG_DIR, '{0}-{1}.pkg'.format(identifier, version))
check_call(['pkgbuild', '--root', 'pkg_template', '--nopayload', '--scripts',
'scripts', '--identifier', identifier, '--version', version,
pkg_fname])
def write_dmg(identifier, py_version, pkg_version):
dmg_name = '{0}-py{1}-{2}'.format(
identifier,
py_version.replace('.', ''),
pkg_version)
check_call(['hdiutil', 'create', '-srcfolder', DMG_DIR,
'-volname', dmg_name,
dmg_name + '.dmg'])
def main():
parser = ArgumentParser(description=DESCRIP,
epilog=EPILOG,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('pkg_name', type=str, help='root name of installer')
parser.add_argument('pkg_version', type=str, help='version of installer')
parser.add_argument('requirements', type=str, nargs='+',
help='pip requirement strings')
parser.add_argument('--python-version', type=str, default=PYTHON_VERSION,
help='Python version in major.minor format, e.g "3.4"')
parser.add_argument('--no-index', action='store_true',
help='disable search of pip indices when fetching '
'packages to make installer')
parser.add_argument('--find-links', '-f', type=str, nargs='*', default=[],
help='locations to find packages to make installer')
parser.add_argument('--get-pip-path', type=str,
help='local path to "get-pip.py"')
# parse the command line
args = parser.parse_args()
pip_params = get_pip_params(args)
mkdirs()
get_pippers(pip_params, args.get_pip_path)
get_wheels(args.python_version, args.requirements, pip_params)
write_post(args.python_version, args.requirements)
write_pkg(args.pkg_name, args.pkg_version)
write_dmg(args.pkg_name, args.python_version, args.pkg_version)
if __name__ == '__main__':
main()
| bsd-2-clause | 407,317,834,380,431,300 | 33.850575 | 81 | 0.636873 | false |
dmccloskey/SBaaS_COBRA | SBaaS_COBRA/stage02_physiology_pairWiseTest_query.py | 1 | 9833 | #SBaaS
from .stage02_physiology_pairWiseTest_postgresql_models import *
from SBaaS_base.sbaas_base import sbaas_base
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
#system
from math import log
class stage02_physiology_pairWiseTest_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for stage02_physiology_pairWiseTest
'''
tables_supported = {'data_stage02_physiology_pairWiseTest':data_stage02_physiology_pairWiseTest,
'data_stage02_physiology_pairWiseTestMetabolites':data_stage02_physiology_pairWiseTestMetabolites,
'data_stage02_physiology_pairWiseTestSubsystems':data_stage02_physiology_pairWiseTestSubsystems,
};
self.set_supportedTables(tables_supported);
## Query from data_stage02_physiology_pairWiseTest# Query data from data_stage02_physiology_pairWiseTest
def get_RDataList_simulationIDs_dataStage02PhysiologyPairWiseTest(self,simulation_id_1_I,simulation_id_2_I):
"""get data from simulation_ids 1 and 2"""
#Tested
try:
data = self.session.query(
data_stage02_physiology_pairWiseTest.simulation_id_1,
data_stage02_physiology_pairWiseTest.simulation_id_2,
data_stage02_physiology_pairWiseTest.rxn_id,
data_stage02_physiology_pairWiseTest.test_stat,
data_stage02_physiology_pairWiseTest.test_description,
data_stage02_physiology_pairWiseTest.pvalue,
data_stage02_physiology_pairWiseTest.pvalue_corrected,
data_stage02_physiology_pairWiseTest.pvalue_corrected_description,
data_stage02_physiology_pairWiseTest.mean,
data_stage02_physiology_pairWiseTest.ci_lb,
data_stage02_physiology_pairWiseTest.ci_ub,
data_stage02_physiology_pairWiseTest.ci_level,
data_stage02_physiology_pairWiseTest.fold_change).filter(
data_stage02_physiology_pairWiseTest.simulation_id_1.like(simulation_id_1_I),
data_stage02_physiology_pairWiseTest.simulation_id_2.like(simulation_id_2_I),
data_stage02_physiology_pairWiseTest.used_.is_(True)).group_by(
data_stage02_physiology_pairWiseTest.simulation_id_1,
data_stage02_physiology_pairWiseTest.simulation_id_2,
data_stage02_physiology_pairWiseTest.rxn_id,
data_stage02_physiology_pairWiseTest.test_stat,
data_stage02_physiology_pairWiseTest.test_description,
data_stage02_physiology_pairWiseTest.pvalue,
data_stage02_physiology_pairWiseTest.pvalue_corrected,
data_stage02_physiology_pairWiseTest.pvalue_corrected_description,
data_stage02_physiology_pairWiseTest.mean,
data_stage02_physiology_pairWiseTest.ci_lb,
data_stage02_physiology_pairWiseTest.ci_ub,
data_stage02_physiology_pairWiseTest.ci_level,
data_stage02_physiology_pairWiseTest.fold_change).order_by(
data_stage02_physiology_pairWiseTest.simulation_id_2.asc(),
data_stage02_physiology_pairWiseTest.rxn_id.asc()).all();
data_O = [];
for d in data:
data_1 = {};
data_1['simulation_id_1'] = d.simulation_id_1;
data_1['simulation_id_2'] = d.simulation_id_2;
data_1['rxn_id'] = d.rxn_id;
data_1['test_stat'] = d.test_stat;
data_1['test_description'] = d.test_description;
data_1['pvalue_negLog10'] = None;
data_1['pvalue_corrected_description'] = None
if d.pvalue_corrected:
data_1['pvalue_corrected_negLog10'] = -log(d.pvalue_corrected,10);
if d.pvalue:
data_1['pvalue_negLog10'] = -log(d.pvalue,10);
data_1['pvalue_corrected_description'] = d.pvalue_corrected_description;
data_1['mean'] = d.mean;
data_1['ci_lb'] = d.ci_lb;
data_1['ci_ub'] = d.ci_ub;
data_1['ci_level'] = d.ci_level;
data_1['fold_change'] = d.fold_change;
data_O.append(data_1);
return data_O;
except SQLAlchemyError as e:
print(e);
def get_rows_analysisID_dataStage02PhysiologyPairWiseTest(self,analysis_id_I):
"""get data from simulation_ids 1 and 2"""
#Tested
try:
data = self.session.query(data_stage02_physiology_pairWiseTest).filter(
data_stage02_physiology_pairWiseTest.analysis_id.like(analysis_id_I),
data_stage02_physiology_pairWiseTest.used_.is_(True)).order_by(
data_stage02_physiology_pairWiseTest.simulation_id_1.asc(),
data_stage02_physiology_pairWiseTest.simulation_id_2.asc(),
data_stage02_physiology_pairWiseTest.rxn_id.asc()).all();
data_O = [d.__repr__dict__() for d in data];
for d in data_O:
d['pvalue_corrected_negLog10'] = None;
d['pvalue_corrected_description'] = None
if d['pvalue_corrected']:
d['pvalue_corrected_negLog10'] = -log(d['pvalue_corrected'],10);
if d['pvalue']:
d['pvalue_negLog10'] = -log(d['pvalue'],10);
return data_O;
except SQLAlchemyError as e:
print(e);
def get_rows_analysisID_dataStage02PhysiologyPairWiseTestMetabolites(self,analysis_id_I):
"""get data from simulation_ids 1 and 2"""
#Tested
try:
data = self.session.query(data_stage02_physiology_pairWiseTestMetabolites).filter(
data_stage02_physiology_pairWiseTestMetabolites.analysis_id.like(analysis_id_I),
data_stage02_physiology_pairWiseTestMetabolites.used_.is_(True)).order_by(
data_stage02_physiology_pairWiseTestMetabolites.simulation_id_1.asc(),
data_stage02_physiology_pairWiseTestMetabolites.simulation_id_2.asc(),
data_stage02_physiology_pairWiseTestMetabolites.met_id.asc()).all();
data_O = [d.__repr__dict__() for d in data];
for d in data_O:
d['pvalue_corrected_negLog10'] = None;
d['pvalue_corrected_description'] = None
if d['pvalue_corrected']:
d['pvalue_corrected_negLog10'] = -log(d['pvalue_corrected'],10);
if d['pvalue']:
d['pvalue_negLog10'] = -log(d['pvalue'],10);
return data_O;
except SQLAlchemyError as e:
print(e);
def get_rows_analysisID_dataStage02PhysiologyPairWiseTestSubsystems(self,analysis_id_I):
"""get data from simulation_ids 1 and 2"""
#Tested
try:
data = self.session.query(data_stage02_physiology_pairWiseTestSubsystems).filter(
data_stage02_physiology_pairWiseTestSubsystems.analysis_id.like(analysis_id_I),
data_stage02_physiology_pairWiseTestSubsystems.used_.is_(True)).order_by(
data_stage02_physiology_pairWiseTestSubsystems.simulation_id_1.asc(),
data_stage02_physiology_pairWiseTestSubsystems.simulation_id_2.asc(),
data_stage02_physiology_pairWiseTestSubsystems.subsystem_id.asc()).all();
data_O = [d.__repr__dict__() for d in data];
for d in data_O:
d['pvalue_corrected_negLog10'] = None;
d['pvalue_corrected_description'] = None
if d['pvalue_corrected']:
d['pvalue_corrected_negLog10'] = -log(d['pvalue_corrected'],10);
if d['pvalue']:
d['pvalue_negLog10'] = -log(d['pvalue'],10);
return data_O;
except SQLAlchemyError as e:
print(e);
def reset_dataStage02_physiology_pairWiseTest(self,
tables_I = [],
analysis_id_I = None,
warn_I=True):
try:
if not tables_I:
tables_I = list(self.get_supportedTables().keys());
querydelete = sbaas_base_query_delete(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
for table in tables_I:
query = {};
query['delete_from'] = [{'table_name':table}];
query['where'] = [{
'table_name':table,
'column_name':'analysis_id',
'value':analysis_id_I,
'operator':'LIKE',
'connector':'AND'
}
];
table_model = self.convert_tableStringList2SqlalchemyModelDict([table]);
query = querydelete.make_queryFromString(table_model,query);
querydelete.reset_table_sqlalchemyModel(query_I=query,warn_I=warn_I);
except Exception as e:
print(e);
| mit | -7,389,374,455,009,166,000 | 55.511494 | 137 | 0.58924 | false |
Arcbot-Org/Arcbot | bolt/discord/models/channel.py | 1 | 1176 | from bolt.discord.models.base import Snowflake, Model, Field, ListField, Enum, Timestamp
from bolt.discord.models.user import User
from bolt.discord.permissions import Permission
class ChannelType(Enum):
GUILD_TEXT = 0
DM = 1
GUILD_VOICE = 2
GROUP_DM = 3
GUILD_CATEGORY = 4
class PermissionOverwrite(Model):
__repr_keys__ = ['id', 'type']
id = Field(Snowflake)
type = Field(str)
deny = Field(Permission)
allow = Field(Permission)
class Channel(Model):
__repr_keys__ = ['id', 'name', 'type']
id = Field(Snowflake, required=True)
type = Field(ChannelType, required=True)
guild_id = Field(Snowflake)
position = Field(int)
permission_overwrites = ListField(PermissionOverwrite)
name = Field(str, max_length=100)
topic = Field(str, max_length=1024)
nsfw = Field(bool)
last_message_id = Field(Snowflake)
bitrate = Field(int)
user_limit = Field(int)
rate_limit_per_user = Field(int)
recipients = ListField(User)
icon = Field(str)
owner_id = Field(Snowflake)
application_id = Field(Snowflake)
parent_id = Field(Snowflake)
last_pin_timestamp = Field(Timestamp)
| gpl-3.0 | 4,696,457,051,982,362,000 | 26.348837 | 88 | 0.668367 | false |
gnowledge/OTM2 | opentreemap/treemap/util.py | 1 | 7219 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import datetime
from collections import OrderedDict
from urlparse import urlparse
from django.shortcuts import get_object_or_404, resolve_url
from django.http import HttpResponse
from django.utils.encoding import force_str, force_text
from django.utils.functional import Promise
from django.core.serializers.json import DjangoJSONEncoder
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.conf import settings
from django.core.exceptions import ValidationError, MultipleObjectsReturned
from django.utils.translation import ugettext_lazy as trans
from django.db.models.fields.files import ImageFieldFile
from django.contrib.gis.geos import Point
from opentreemap.util import dict_pop
from treemap.instance import Instance
def safe_get_model_class(model_string):
"""
In a couple of cases we want to be able to convert a string
into a valid django model class. For instance, if we have
'Plot' we want to get the actual class for 'treemap.models.Plot'
in a safe way.
This function returns the class represented by the given model
if it exists in 'treemap.models'
"""
from treemap.models import MapFeature
# All of our models live in 'treemap.models', so
# we can start with that namespace
models_module = __import__('treemap.models')
if hasattr(models_module.models, model_string):
return getattr(models_module.models, model_string)
elif MapFeature.has_subclass(model_string):
return MapFeature.get_subclass(model_string)
else:
raise ValidationError(
trans('invalid model type: "%s"') % model_string)
def add_visited_instance(request, instance):
if not (hasattr(request, 'session') and request.session):
return
# get the visited instances as a list of tuples, read into
# OrderedDict. OrderedDict has nice convenience methods for this
# purpose, but doesn't serialize well, so we pass it through.
visited_instances = request.session.get('visited_instances', [])
visited_instances = OrderedDict(visited_instances)
# delete the existing entry for this instance so it can be
# reinserted as the most recent entry.
if instance.pk in visited_instances:
del visited_instances[instance.pk]
stamp = datetime.datetime.now().isoformat()
visited_instances[instance.pk] = stamp
# turn back into a list of tuples
request.session['visited_instances'] = visited_instances.items()
request.session.modified = True
def get_last_visited_instance(request):
if not hasattr(request, 'session'):
instance = None
else:
visited_instances = request.session.get('visited_instances', [])
if not visited_instances:
instance = None
else:
# get the first tuple member of the last entry
# visited_instances have entries '(<pk>, <timestamp>)'
instance_id = visited_instances[-1][0]
try:
instance = Instance.objects.get(pk=instance_id)
except (Instance.DoesNotExist, MultipleObjectsReturned):
instance = None
return instance
def login_redirect(request):
# Reference: django/contrib/auth/decorators.py
path = request.build_absolute_uri()
# urlparse chokes on lazy objects in Python 3, force to str
resolved_login_url = force_str(
resolve_url(settings.LOGIN_URL))
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if (not login_scheme or login_scheme == current_scheme)\
and (not login_netloc or login_netloc == current_netloc): # NOQA
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, REDIRECT_FIELD_NAME)
def get_instance_or_404(**kwargs):
url_name, found = dict_pop(kwargs, 'url_name')
if found:
kwargs['url_name__iexact'] = url_name
return get_object_or_404(Instance, **kwargs)
def package_field_errors(model_name, validation_error):
"""
validation_error contains a dictionary of error messages of the form
{fieldname1: [messages], fieldname2: [messages]}.
Return a version keyed by "objectname.fieldname" instead of "fieldname".
"""
dict = {'%s.%s' % (to_object_name(model_name), field): msgs
for (field, msgs) in validation_error.message_dict.iteritems()}
return dict
# https://docs.djangoproject.com/en/dev/topics/serialization/#id2
class LazyEncoder(DjangoJSONEncoder):
def default(self, obj):
if isinstance(obj, Promise):
return force_text(obj)
elif hasattr(obj, 'dict'):
return obj.dict()
elif isinstance(obj, set):
return list(obj)
elif hasattr(obj, 'as_dict'):
return obj.as_dict()
elif isinstance(obj, Point):
srid = 4326
obj.transform(srid)
return {'x': obj.x, 'y': obj.y, 'srid': srid}
# TODO: Handle S3
elif isinstance(obj, ImageFieldFile):
if obj:
return obj.url
else:
return None
else:
return super(LazyEncoder, self).default(obj)
def all_subclasses(cls):
"""Return all subclasses of given class"""
subclasses = set(cls.__subclasses__())
return subclasses | {clz for s in subclasses for clz in all_subclasses(s)}
def leaf_subclasses(cls):
"""Return all leaf subclasses of given class"""
all = all_subclasses(cls)
leaves = {s for s in all if not s.__subclasses__()}
return leaves
def to_object_name(model_name):
"""BenefitCurrencyConversion -> benefitCurrencyConversion"""
return model_name[0].lower() + model_name[1:]
def to_model_name(object_name):
"""benefitCurrencyConversion -> BenefitCurrencyConversion"""
return object_name[0].upper() + object_name[1:]
def get_filterable_audit_models():
from treemap.models import MapFeature
map_features = [c.__name__ for c in leaf_subclasses(MapFeature)]
models = map_features + ['Tree']
return {model.lower(): model for model in models}
def get_csv_response(filename):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s;' % filename
response['Cache-Control'] = 'no-cache'
# add BOM to support CSVs in MS Excel
# http://en.wikipedia.org/wiki/Byte_order_mark
response.write(u'\ufeff'.encode('utf8'))
return response
def get_json_response(filename):
response = HttpResponse(content_type='application/json')
response['Content-Disposition'] = 'attachment; filename=%s;' % filename
response['Cache-Control'] = 'no-cache'
return response
def can_read_as_super_admin(request):
if not hasattr(request.user, 'is_super_admin'):
return False
else:
return request.user.is_super_admin() and request.method == 'GET'
| gpl-3.0 | -7,807,075,686,139,737,000 | 33.706731 | 78 | 0.677102 | false |
Ra93POL/VKAPI | __init__.py | 1 | 4277 | # -* coding: utf-8 -*-
import VKAPI, dataMngt, time
vk = None
one_account = {'vk.com': True, 'ok.ru': True, 'disk.yandex.ru': True}
number_account = dataMngt.get_number_account()
def check_app_data(one_account, res_auth, site):
if res_auth == 'frozen':
print 'Account of "'+vk.user_data[site][1]+'" is frozen'
if one_account[site] == False: reauthorize(site, account='next')
elif not vk.app_data[site].has_key('access_token'):
print 'Access token for "'+vk.user_data[site][1]+'" wasn\'t given!'
if one_account[site] == False: reauthorize(site, account='next')
def reauthorize(site, account='next'):
global vk, number_account
time.sleep(10)
if account == 'same': number_account[site] -= 1
dataMngt.reload_user_data(vk.user_data, number_account, site)
res_auth = vk.do_authorize(site)
check_app_data(one_account, res_auth, site)
def authorize(*sites):
global vk, one_account, number_account
user_data = dataMngt.load_user_data(one_account, number_account)
vk = VKAPI.VK(user_data)
for site in sites:
res_auth = vk.do_authorize(site)
check_app_data(one_account, res_auth, site)
return vk
################# ------ OK.RU ----- ################
def ok_usersSetStatus(status):
return vk.api('ok.ru', 'users.setStatus', {'status': status})[1]
def ok_usersGetInfo(uid, fields, emptyPictures='false'):
params = {
'uid': uid,
'fields': fields,
'emptyPictures': emptyPictures}
return vk.api('ok.ru', 'users.getInfo', params)[1]
def ok_photosEditPhoto(photo_id, description):
params = {
'photo_id': photo_id,
'description': description}
return vk.api('ok.ru', 'photos.editPhoto', params)[1]
def ok_photosGetPhotos(uid, fid='', aid=''):
params = {
'uid': uid,
'fid': fid,
'aid': aid}
return vk.api('ok.ru', 'photos.getPhotos', params)[1]
################# ------ VK.COM ----- ################
def proccessing_error(cond, res):
global one_account
if cond == 'success': return res
elif cond == 'error':
code = res['code']
msg = res['msg']
oa = one_account['vk.com']
print code, msg
if code == 5:
reauthorize('vk.com', 'next')
print '\n Connected to', vk.user_data['vk.com'][1], '\n'
return 'reauthed'
elif code == 15: pass
elif code == 220: # защита от спама
if oa == False:
reauthorize('vk.com', 'next')
print '\n Connected to', vk.user_data['vk.com'][1], '\n'
return 'reauthed'
def vk_usersGet(user_ids, fields, name_case='nom'):
params = {
'user_ids': user_ids,
'fields': fields,
'name_case': name_case}
cond, res = vk.api('vk.com', 'users.get', params)
return proccessing_error(cond, res)
def vk_wallPost(owner_id, message, attachments='', from_group=0):
params = {
'owner_id': owner_id,
'message': message,
'attachments': attachments,
'from_group': from_group}
cond, res = vk.api('vk.com', 'wall.post', params)
return proccessing_error(cond, res)
def vk_newsfeedSearch(q, count, start_from='', end_time='', extended=0):
params = {
'q': q,
'count': count,
'start_from': start_from,
'end_time': end_time,
'extended': extended}
cond, res = vk.api('vk.com', 'newsfeed.search', params)
return proccessing_error(cond, res)
def vk_groupsSearch(q, count, offset=0, city_id=''):
parametrs = {
'q': q, 'offset': offset, 'count': count,
'sort': 2, 'city_id': city_id}
cond, res = vk.api('vk.com', 'groups.search', parametrs)
return proccessing_error(cond, res)
def vk_groupsGetById(group_id, fields=''):
parametrs = {'group_id': group_id, 'fields': fields}
cond, res = vk.api('vk.com', 'groups.getById', parametrs)
return proccessing_error(cond, res)
def vk_groupsGetMembers(group_id, count, offset=0, fields=''):
parametrs = {
'group_id': group_id,
'fields': fields,
'offset': offset,
'count': count}
cond, res = vk.api('vk.com', 'groups.getMembers', parametrs)
return proccessing_error(cond, res)
| gpl-3.0 | -2,256,723,707,737,997,300 | 33.112 | 75 | 0.579737 | false |
chrislit/abydos | abydos/distance/_lcprefix.py | 1 | 4129 | # Copyright 2018-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._lcprefix.
Longest common prefix
"""
from os.path import commonprefix
from typing import List, cast
from ._distance import _Distance
__all__ = ['LCPrefix']
class LCPrefix(_Distance):
"""Longest common prefix.
.. versionadded:: 0.4.0
"""
def lcprefix(self, strings: List[str]) -> str:
"""Return the longest common prefix of a list of strings.
Longest common prefix (LCPrefix).
Parameters
----------
strings : list of strings
Strings for comparison
Returns
-------
str
The longest common prefix
Examples
--------
>>> pfx = LCPrefix()
>>> pfx.lcprefix(['cat', 'hat'])
''
>>> pfx.lcprefix(['Niall', 'Neil'])
'N'
>>> pfx.lcprefix(['aluminum', 'Catalan'])
''
>>> pfx.lcprefix(['ATCG', 'TAGC'])
''
.. versionadded:: 0.4.0
"""
return cast(str, commonprefix(strings))
def dist_abs(self, src: str, tar: str, *args: str) -> int:
"""Return the length of the longest common prefix of the strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
*args : strs
Additional strings for comparison
Raises
------
ValueError
All arguments must be of type str
Returns
-------
int
The length of the longest common prefix
Examples
--------
>>> pfx = LCPrefix()
>>> pfx.dist_abs('cat', 'hat')
0
>>> pfx.dist_abs('Niall', 'Neil')
1
>>> pfx.dist_abs('aluminum', 'Catalan')
0
>>> pfx.dist_abs('ATCG', 'TAGC')
0
.. versionadded:: 0.4.0
"""
strings = [src, tar]
for arg in args:
if isinstance(arg, str):
strings.append(arg)
else:
raise TypeError('All arguments must be of type str')
return len(self.lcprefix(strings))
def sim(self, src: str, tar: str, *args: str) -> float:
r"""Return the longest common prefix similarity of two or more strings.
Longest common prefix similarity (:math:`sim_{LCPrefix}`).
This employs the LCPrefix function to derive a similarity metric:
:math:`sim_{LCPrefix}(s,t) = \frac{|LCPrefix(s,t)|}{max(|s|, |t|)}`
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
*args : strs
Additional strings for comparison
Returns
-------
float
LCPrefix similarity
Examples
--------
>>> pfx = LCPrefix()
>>> pfx.sim('cat', 'hat')
0.0
>>> pfx.sim('Niall', 'Neil')
0.2
>>> pfx.sim('aluminum', 'Catalan')
0.0
>>> pfx.sim('ATCG', 'TAGC')
0.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
elif not src or not tar:
return 0.0
dist = self.dist_abs(src, tar, *args)
maxlen = max(len(src), len(tar), *[len(arg) for arg in args])
return dist / maxlen
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 | 6,304,868,237,515,560,000 | 23.873494 | 79 | 0.534996 | false |
renalreg/radar | tests/api/serializers/test_salt_wasting_clinical_features_serializer.py | 1 | 7742 | from datetime import date
from cornflake.exceptions import ValidationError
import pytest
from radar.api.serializers.salt_wasting import SaltWastingClinicalFeaturesSerializer
from radar.models.patient_demographics import PatientDemographics
from radar.models.patients import Patient
from radar.models.users import User
@pytest.fixture
def patient():
patient = Patient()
patient_demographics = PatientDemographics()
patient_demographics.date_of_birth = date(2000, 1, 1)
patient.patient_demographics.append(patient_demographics)
return patient
@pytest.fixture
def clinical_features(patient):
return {
'patient': patient,
'normal_pregnancy': False,
'abnormal_pregnancy_text': 'Foo',
'neurological_problems': True,
'seizures': True,
'abnormal_gait': True,
'deafness': True,
'other_neurological_problem': True,
'other_neurological_problem_text': 'Bar',
'joint_problems': True,
'joint_problems_age': 21,
'x_ray_abnormalities': True,
'chondrocalcinosis': True,
'other_x_ray_abnormality': True,
'other_x_ray_abnormality_text': 'Baz'
}
def test_valid(clinical_features):
obj = valid(clinical_features)
assert obj.normal_pregnancy is False
assert obj.abnormal_pregnancy_text == 'Foo'
assert obj.neurological_problems is True
assert obj.seizures is True
assert obj.abnormal_gait is True
assert obj.deafness is True
assert obj.other_neurological_problem is True
assert obj.other_neurological_problem_text == 'Bar'
assert obj.joint_problems is True
assert obj.joint_problems_age == 21
assert obj.x_ray_abnormalities is True
assert obj.chondrocalcinosis is True
assert obj.other_x_ray_abnormality is True
assert obj.other_x_ray_abnormality_text == 'Baz'
def test_normal_pregnancy_true(clinical_features):
clinical_features['normal_pregnancy'] = True
obj = valid(clinical_features)
assert obj.abnormal_pregnancy_text is None
def test_normal_pregnancy_true_none(clinical_features):
clinical_features['normal_pregnancy'] = None
valid(clinical_features)
def test_normal_pregnancy_true_text_none(clinical_features):
clinical_features['normal_pregnancy'] = True
clinical_features['abnormal_pregnancy_text'] = None
obj = valid(clinical_features)
assert obj.abnormal_pregnancy_text is None
def test_normal_pregnancy_true_text_blank(clinical_features):
clinical_features['normal_pregnancy'] = True
clinical_features['abnormal_pregnancy_text'] = ''
obj = valid(clinical_features)
assert obj.abnormal_pregnancy_text is None
def test_normal_pregnancy_false_text_none(clinical_features):
clinical_features['abnormal_pregnancy_text'] = None
invalid(clinical_features)
def test_normal_pregnancy_false_text_blank(clinical_features):
clinical_features['abnormal_pregnancy_text'] = ''
invalid(clinical_features)
def test_neurological_problems_false(clinical_features):
obj = valid(clinical_features)
obj.seizures = None
obj.abnormal_gait = None
obj.deafness = None
obj.other_neurological_problem = None
obj.other_neurological_problem_text = None
def test_neurological_problems_none(clinical_features):
clinical_features['neurological_problems'] = None
valid(clinical_features)
def test_neurological_problems_true_seizures_none(clinical_features):
clinical_features['seizures'] = None
invalid(clinical_features)
def test_neurological_problems_false_seizures_none(clinical_features):
clinical_features['neurological_problems'] = False
clinical_features['seizures'] = None
valid(clinical_features)
def test_neurological_problems_true_abnormal_gait_none(clinical_features):
clinical_features['abnormal_gait'] = None
invalid(clinical_features)
def test_neurological_problems_false_abnormal_gait_none(clinical_features):
clinical_features['neurological_problems'] = False
clinical_features['abnormal_gait'] = None
valid(clinical_features)
def test_neurological_problems_true_deafness_none(clinical_features):
clinical_features['deafness'] = None
invalid(clinical_features)
def test_neurological_problems_false_deafness_none(clinical_features):
clinical_features['neurological_problems'] = False
clinical_features['deafness'] = None
valid(clinical_features)
def test_neurological_problems_true_other_neurological_problem_none(clinical_features):
clinical_features['other_neurological_problem'] = None
invalid(clinical_features)
def test_other_neurological_problem_false_text_none(clinical_features):
clinical_features['other_neurological_problem'] = False
clinical_features['other_neurological_problem_text'] = None
valid(clinical_features)
def test_other_neurological_problem_true_text_blank(clinical_features):
clinical_features['other_neurological_problem_text'] = ''
invalid(clinical_features)
def test_other_neurological_problem_true_text_none(clinical_features):
clinical_features['other_neurological_problem_text'] = None
invalid(clinical_features)
def test_joint_problems_false(clinical_features):
clinical_features['joint_problems'] = False
obj = valid(clinical_features)
assert obj.joint_problems_age is None
assert obj.x_ray_abnormalities is None
assert obj.chondrocalcinosis is None
assert obj.other_x_ray_abnormality is None
assert obj.other_x_ray_abnormality_text is None
def test_joint_problems_none(clinical_features):
clinical_features['neurological_problems'] = None
valid(clinical_features)
def test_joint_problems_true_joint_problems_age_none(clinical_features):
clinical_features['joint_problems_age'] = None
invalid(clinical_features)
def test_joint_problems_false_joint_problems_age_none(clinical_features):
clinical_features['joint_problems'] = False
clinical_features['joint_problems_age'] = None
valid(clinical_features)
def test_joint_problems_true_joint_problems_age_too_young(clinical_features):
clinical_features['joint_problems_age'] = -1
invalid(clinical_features)
def test_joint_problems_true_joint_problems_age_too_old(clinical_features):
clinical_features['x_ray_abnormalities'] = 121
invalid(clinical_features)
def test_joint_problems_true_x_ray_abnormalities_none(clinical_features):
clinical_features['x_ray_abnormalities'] = None
invalid(clinical_features)
def test_joint_problems_false_x_ray_abnormalities_none(clinical_features):
clinical_features['joint_problems'] = False
clinical_features['x_ray_abnormalities'] = None
valid(clinical_features)
def test_joint_problems_true_chondrocalcinosis_none(clinical_features):
clinical_features['chondrocalcinosis'] = None
invalid(clinical_features)
def test_joint_problems_false_chondrocalcinosis_none(clinical_features):
clinical_features['joint_problems'] = False
clinical_features['chondrocalcinosis'] = None
valid(clinical_features)
def test_joint_problems_true_other_x_ray_abnormality_none(clinical_features):
clinical_features['other_x_ray_abnormality'] = None
invalid(clinical_features)
def test_joint_problems_false_other_x_ray_abnormality_none(clinical_features):
clinical_features['joint_problems'] = False
clinical_features['other_x_ray_abnormality'] = None
valid(clinical_features)
def invalid(data):
with pytest.raises(ValidationError) as e:
valid(data)
return e
def valid(data):
serializer = SaltWastingClinicalFeaturesSerializer(data=data, context={'user': User(is_admin=True)})
serializer.is_valid(raise_exception=True)
return serializer.save()
| agpl-3.0 | -9,060,480,136,143,703,000 | 30.991736 | 104 | 0.740765 | false |
AlexeyKruglov/Skeinforge-fabmetheus | skeinforge_application/skeinforge_plugins/craft_plugins/inset.py | 1 | 21880 | #! /usr/bin/env python
"""
This page is in the table of contents.
Inset will inset the outside outlines by half the edge width, and outset the inside outlines by the same amount.
The inset manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Inset
==Settings==
===Add Custom Code for Temperature Reading===
Default is on.
When selected, the M105 custom code for temperature reading will be added at the beginning of the file.
===Infill in Direction of Bridge===
Default is on.
When selected, the infill will be in the direction of any bridge across a gap, so that the fill will be able to span a bridge easier.
===Loop Order Choice===
Default loop order choice is 'Ascending Area'.
When overlap is to be removed, for each loop, the overlap is checked against the list of loops already extruded. If the latest loop overlaps an already extruded loop, the overlap is removed from the latest loop. The loops are ordered according to their areas.
====Ascending Area====
When selected, the loops will be ordered in ascending area. With thin walled parts, if overlap is being removed the outside of the container will not be extruded. Holes will be the correct size.
====Descending Area====
When selected, the loops will be ordered in descending area. With thin walled parts, if overlap is being removed the inside of the container will not be extruded. Holes will be missing the interior wall so they will be slightly wider than model size.
===Overlap Removal Width over Perimeter Width===
Default is 0.6.
Defines the ratio of the overlap removal width over the edge width. Any part of the extrusion that comes within the overlap removal width of another is removed. This is to prevent the extruder from depositing two extrusions right beside each other. If the 'Overlap Removal Width over Perimeter Width' is less than 0.2, the overlap will not be removed.
===Turn Extruder Heater Off at Shut Down===
Default is on.
When selected, the M104 S0 gcode line will be added to the end of the file to turn the extruder heater off by setting the extruder heater temperature to 0.
===Volume Fraction===
Default: 0.93
The 'Volume Fraction' is the estimated volume of the thread compared to the box defined by the layer height and infill width. This is used in dwindle, splodge, and statistic. It is in inset because inset is a required extrusion tool, earlier in the chain than dwindle and splodge. In dwindle and splodge it is used to determine the filament volume, in statistic it is used to determine the extrusion diameter.
==Examples==
The following examples inset the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and inset.py.
> python inset.py
This brings up the inset dialog.
> python inset.py Screw Holder Bottom.stl
The inset tool is parsing the file:
Screw Holder Bottom.stl
..
The inset tool has created the file:
.. Screw Holder Bottom_inset.gcode
"""
from __future__ import absolute_import
try:
import psyco
psyco.full()
except:
pass
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import cmath
import math
import os
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def addAlreadyFilledArounds( alreadyFilledArounds, loop, radius ):
"Add already filled loops around loop to alreadyFilledArounds."
radius = abs(radius)
alreadyFilledLoop = []
slightlyGreaterThanRadius = intercircle.globalIntercircleMultiplier * radius
muchGreaterThanRadius = 2.5 * radius
centers = intercircle.getCentersFromLoop( loop, slightlyGreaterThanRadius )
for center in centers:
alreadyFilledInset = intercircle.getSimplifiedInsetFromClockwiseLoop( center, radius )
if intercircle.isLargeSameDirection( alreadyFilledInset, center, radius ):
alreadyFilledLoop.append( alreadyFilledInset )
if len( alreadyFilledLoop ) > 0:
alreadyFilledArounds.append( alreadyFilledLoop )
def addSegmentOutline( isThick, outlines, pointBegin, pointEnd, width ):
"Add a diamond or hexagonal outline for a line segment."
width = abs( width )
exclusionWidth = 0.6 * width
slope = 0.2
if isThick:
slope = 3.0
exclusionWidth = 0.8 * width
segment = pointEnd - pointBegin
segmentLength = abs(segment)
if segmentLength == 0.0:
return
normalizedSegment = segment / segmentLength
outline = []
segmentYMirror = complex(normalizedSegment.real, -normalizedSegment.imag)
pointBeginRotated = segmentYMirror * pointBegin
pointEndRotated = segmentYMirror * pointEnd
along = 0.05
alongLength = along * segmentLength
if alongLength > 0.1 * exclusionWidth:
along *= 0.1 * exclusionWidth / alongLength
alongEnd = 1.0 - along
remainingToHalf = 0.5 - along
alongToWidth = exclusionWidth / slope / segmentLength
pointBeginIntermediate = euclidean.getIntermediateLocation( along, pointBeginRotated, pointEndRotated )
pointEndIntermediate = euclidean.getIntermediateLocation( alongEnd, pointBeginRotated, pointEndRotated )
outline.append( pointBeginIntermediate )
verticalWidth = complex( 0.0, exclusionWidth )
if alongToWidth > 0.9 * remainingToHalf:
verticalWidth = complex( 0.0, slope * remainingToHalf * segmentLength )
middle = ( pointBeginIntermediate + pointEndIntermediate ) * 0.5
middleDown = middle - verticalWidth
middleUp = middle + verticalWidth
outline.append( middleUp )
outline.append( pointEndIntermediate )
outline.append( middleDown )
else:
alongOutsideBegin = along + alongToWidth
alongOutsideEnd = alongEnd - alongToWidth
outsideBeginCenter = euclidean.getIntermediateLocation( alongOutsideBegin, pointBeginRotated, pointEndRotated )
outsideBeginCenterDown = outsideBeginCenter - verticalWidth
outsideBeginCenterUp = outsideBeginCenter + verticalWidth
outsideEndCenter = euclidean.getIntermediateLocation( alongOutsideEnd, pointBeginRotated, pointEndRotated )
outsideEndCenterDown = outsideEndCenter - verticalWidth
outsideEndCenterUp = outsideEndCenter + verticalWidth
outline.append( outsideBeginCenterUp )
outline.append( outsideEndCenterUp )
outline.append( pointEndIntermediate )
outline.append( outsideEndCenterDown )
outline.append( outsideBeginCenterDown )
outlines.append( euclidean.getRotatedComplexes( normalizedSegment, outline ) )
def getBridgeDirection(belowLoops, layerLoops, radius):
'Get span direction for the majority of the overhanging extrusion edge, if any.'
if len(belowLoops) < 1:
return None
belowOutsetLoops = intercircle.getInsetLoopsFromLoops(belowLoops, -radius)
bridgeRotation = complex()
for loop in layerLoops:
for pointIndex, point in enumerate(loop):
previousIndex = (pointIndex + len(loop) - 1) % len(loop)
bridgeRotation += getOverhangDirection(belowOutsetLoops, loop[previousIndex], point)
if abs(bridgeRotation) < 0.75 * radius:
return None
else:
return cmath.sqrt(bridgeRotation / abs(bridgeRotation))
def getCraftedText( fileName, text='', repository=None):
"Inset the preface file or text."
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText(gcodeText, repository=None):
"Inset the preface gcode text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'inset'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( InsetRepository() )
return InsetSkein().getCraftedGcode(gcodeText, repository)
def getDoubledRoundZ( overhangingSegment, segmentRoundZ ):
'Get doubled plane angle around z of the overhanging segment.'
endpoint = overhangingSegment[0]
roundZ = endpoint.point - endpoint.otherEndpoint.point
roundZ *= segmentRoundZ
if abs( roundZ ) == 0.0:
return complex()
if roundZ.real < 0.0:
roundZ *= - 1.0
roundZLength = abs( roundZ )
return roundZ * roundZ / roundZLength
def getInteriorSegments(loops, segments):
'Get segments inside the loops.'
interiorSegments = []
for segment in segments:
center = 0.5 * (segment[0].point + segment[1].point)
if euclidean.getIsInFilledRegion(loops, center):
interiorSegments.append(segment)
return interiorSegments
def getIsIntersectingWithinList(loop, loopList):
"Determine if the loop is intersecting or is within the loop list."
leftPoint = euclidean.getLeftPoint(loop)
for otherLoop in loopList:
if euclidean.getNumberOfIntersectionsToLeft(otherLoop, leftPoint) % 2 == 1:
return True
return euclidean.isLoopIntersectingLoops(loop, loopList)
def getNewRepository():
'Get new repository.'
return InsetRepository()
def getOverhangDirection( belowOutsetLoops, segmentBegin, segmentEnd ):
'Add to span direction from the endpoint segments which overhang the layer below.'
segment = segmentEnd - segmentBegin
normalizedSegment = euclidean.getNormalized( complex( segment.real, segment.imag ) )
segmentYMirror = complex(normalizedSegment.real, -normalizedSegment.imag)
segmentBegin = segmentYMirror * segmentBegin
segmentEnd = segmentYMirror * segmentEnd
solidXIntersectionList = []
y = segmentBegin.imag
solidXIntersectionList.append( euclidean.XIntersectionIndex( - 1.0, segmentBegin.real ) )
solidXIntersectionList.append( euclidean.XIntersectionIndex( - 1.0, segmentEnd.real ) )
for belowLoopIndex in xrange( len( belowOutsetLoops ) ):
belowLoop = belowOutsetLoops[ belowLoopIndex ]
rotatedOutset = euclidean.getRotatedComplexes( segmentYMirror, belowLoop )
euclidean.addXIntersectionIndexesFromLoopY( rotatedOutset, belowLoopIndex, solidXIntersectionList, y )
overhangingSegments = euclidean.getSegmentsFromXIntersectionIndexes( solidXIntersectionList, y )
overhangDirection = complex()
for overhangingSegment in overhangingSegments:
overhangDirection += getDoubledRoundZ( overhangingSegment, normalizedSegment )
return overhangDirection
def getSegmentsFromLoopListsPoints( loopLists, pointBegin, pointEnd ):
"Get endpoint segments from the beginning and end of a line segment."
normalizedSegment = pointEnd - pointBegin
normalizedSegmentLength = abs( normalizedSegment )
if normalizedSegmentLength == 0.0:
return []
normalizedSegment /= normalizedSegmentLength
segmentYMirror = complex(normalizedSegment.real, -normalizedSegment.imag)
pointBeginRotated = segmentYMirror * pointBegin
pointEndRotated = segmentYMirror * pointEnd
rotatedLoopLists = []
for loopList in loopLists:
rotatedLoopLists.append(euclidean.getRotatedComplexLists(segmentYMirror, loopList))
xIntersectionIndexList = []
xIntersectionIndexList.append( euclidean.XIntersectionIndex( - 1, pointBeginRotated.real ) )
xIntersectionIndexList.append( euclidean.XIntersectionIndex( - 1, pointEndRotated.real ) )
euclidean.addXIntersectionIndexesFromLoopListsY( rotatedLoopLists, xIntersectionIndexList, pointBeginRotated.imag )
segments = euclidean.getSegmentsFromXIntersectionIndexes( xIntersectionIndexList, pointBeginRotated.imag )
for segment in segments:
for endpoint in segment:
endpoint.point *= normalizedSegment
return segments
def isCloseToLast( paths, point, radius ):
"Determine if the point is close to the last point of the last path."
if len(paths) < 1:
return False
lastPath = paths[-1]
return abs( lastPath[-1] - point ) < radius
def isIntersectingItself( loop, width ):
"Determine if the loop is intersecting itself."
outlines = []
for pointIndex in xrange(len(loop)):
pointBegin = loop[pointIndex]
pointEnd = loop[(pointIndex + 1) % len(loop)]
if euclidean.isLineIntersectingLoops( outlines, pointBegin, pointEnd ):
return True
addSegmentOutline( False, outlines, pointBegin, pointEnd, width )
return False
def isIntersectingWithinLists( loop, loopLists ):
"Determine if the loop is intersecting or is within the loop lists."
for loopList in loopLists:
if getIsIntersectingWithinList( loop, loopList ):
return True
return False
def writeOutput(fileName, shouldAnalyze=True):
"Inset the carving of a gcode file."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'inset', shouldAnalyze)
class InsetRepository:
"A class to handle the inset settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.inset.html', self)
self.baseNameSynonymDictionary = {
'Infill in Direction of Bridge' : 'carve.csv',
'Infill Width over Thickness (ratio):' : 'fill.csv'}
self.fileNameInput = settings.FileNameInput().getFromFileName(fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Inset', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Inset')
self.addCustomCodeForTemperatureReading = settings.BooleanSetting().getFromValue('Add Custom Code for Temperature Reading', self, True)
self.infillInDirectionOfBridge = settings.BooleanSetting().getFromValue('Infill in Direction of Bridge', self, True)
self.infillWidthOverThickness = settings.FloatSpin().getFromValue(1.3, 'Infill Width over Thickness (ratio):', self, 1.7, 1.5)
self.loopOrderChoice = settings.MenuButtonDisplay().getFromName('Loop Order Choice:', self )
self.loopOrderAscendingArea = settings.MenuRadio().getFromMenuButtonDisplay(self.loopOrderChoice, 'Ascending Area', self, True)
self.loopOrderDescendingArea = settings.MenuRadio().getFromMenuButtonDisplay(self.loopOrderChoice, 'Descending Area', self, False)
self.overlapRemovalWidthOverEdgeWidth = settings.FloatSpin().getFromValue(0.3, 'Overlap Removal Width over Perimeter Width (ratio):', self, 0.9, 0.6)
self.turnExtruderHeaterOffAtShutDown = settings.BooleanSetting().getFromValue('Turn Extruder Heater Off at Shut Down', self, True)
self.volumeFraction = settings.FloatSpin().getFromValue(0.7, 'Volume Fraction (ratio):', self, 1.0, 0.93)
self.executeTitle = 'Inset'
def execute(self):
"Inset button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class InsetSkein:
"A class to inset a skein of extrusions."
def __init__(self):
'Initialize.'
self.belowLoops = []
self.boundary = None
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.layerCount = settings.LayerCount()
self.lineIndex = 0
self.loopLayer = None
def addGcodeFromPerimeterPaths(self, isIntersectingSelf, loop, loopLayer, loopLists, radius):
"Add the edge paths to the output."
segments = []
outlines = []
thickOutlines = []
allLoopLists = loopLists[:] + [thickOutlines]
aroundLists = loopLists
for pointIndex in xrange(len(loop)):
pointBegin = loop[pointIndex]
pointEnd = loop[(pointIndex + 1) % len(loop)]
if isIntersectingSelf:
if euclidean.isLineIntersectingLoops(outlines, pointBegin, pointEnd):
segments += getSegmentsFromLoopListsPoints(allLoopLists, pointBegin, pointEnd)
else:
segments += getSegmentsFromLoopListsPoints(loopLists, pointBegin, pointEnd)
addSegmentOutline(False, outlines, pointBegin, pointEnd, self.overlapRemovalWidth)
addSegmentOutline(True, thickOutlines, pointBegin, pointEnd, self.overlapRemovalWidth)
else:
segments += getSegmentsFromLoopListsPoints(loopLists, pointBegin, pointEnd)
edgePaths = []
path = []
muchSmallerThanRadius = 0.1 * radius
segments = getInteriorSegments(loopLayer.loops, segments)
for segment in segments:
pointBegin = segment[0].point
if not isCloseToLast(edgePaths, pointBegin, muchSmallerThanRadius):
path = [pointBegin]
edgePaths.append(path)
path.append(segment[1].point)
if len(edgePaths) > 1:
firstPath = edgePaths[0]
lastPath = edgePaths[-1]
if abs(lastPath[-1] - firstPath[0]) < 0.1 * muchSmallerThanRadius:
connectedBeginning = lastPath[: -1] + firstPath
edgePaths[0] = connectedBeginning
edgePaths.remove(lastPath)
muchGreaterThanRadius = 6.0 * radius
for edgePath in edgePaths:
if euclidean.getPathLength(edgePath) > muchGreaterThanRadius:
self.distanceFeedRate.addGcodeFromThreadZ(edgePath, loopLayer.z)
def addGcodeFromRemainingLoop(self, loop, loopLayer, loopLists, radius):
"Add the remainder of the loop which does not overlap the alreadyFilledArounds loops."
centerOutset = intercircle.getLargestCenterOutsetLoopFromLoopRegardless(loop, radius)
euclidean.addNestedRingBeginning(self.distanceFeedRate, centerOutset.outset, loopLayer.z)
self.addGcodePerimeterBlockFromRemainingLoop(centerOutset.center, loopLayer, loopLists, radius)
self.distanceFeedRate.addLine('(</boundaryPerimeter>)')
self.distanceFeedRate.addLine('(</nestedRing>)')
def addGcodePerimeterBlockFromRemainingLoop(self, loop, loopLayer, loopLists, radius):
"Add the perimter block remainder of the loop which does not overlap the alreadyFilledArounds loops."
if self.repository.overlapRemovalWidthOverEdgeWidth.value < 0.2:
self.distanceFeedRate.addPerimeterBlock(loop, loopLayer.z)
return
isIntersectingSelf = isIntersectingItself(loop, self.overlapRemovalWidth)
if isIntersectingWithinLists(loop, loopLists) or isIntersectingSelf:
self.addGcodeFromPerimeterPaths(isIntersectingSelf, loop, loopLayer, loopLists, radius)
else:
self.distanceFeedRate.addPerimeterBlock(loop, loopLayer.z)
addAlreadyFilledArounds(loopLists, loop, self.overlapRemovalWidth)
def addInitializationToOutput(self):
"Add initialization gcode to the output."
if self.repository.addCustomCodeForTemperatureReading.value:
self.distanceFeedRate.addLine('M105') # Custom code for temperature reading.
def addInset(self, loopLayer):
"Add inset to the layer."
alreadyFilledArounds = []
extrudateLoops = intercircle.getInsetLoopsFromLoops(loopLayer.loops, self.halfEdgeWidth)
if self.repository.infillInDirectionOfBridge.value:
bridgeRotation = getBridgeDirection(self.belowLoops, extrudateLoops, self.halfEdgeWidth)
if bridgeRotation != None:
self.distanceFeedRate.addTagBracketedLine('bridgeRotation', bridgeRotation)
self.belowLoops = loopLayer.loops
triangle_mesh.sortLoopsInOrderOfArea(not self.repository.loopOrderAscendingArea.value, extrudateLoops)
for extrudateLoop in extrudateLoops:
self.addGcodeFromRemainingLoop(extrudateLoop, loopLayer, alreadyFilledArounds, self.halfEdgeWidth)
def getCraftedGcode(self, gcodeText, repository):
"Parse gcode text and store the bevel gcode."
self.repository = repository
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization()
for line in self.lines[self.lineIndex :]:
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(<decimalPlacesCarried>':
self.addInitializationToOutput()
elif firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('inset')
return
elif firstWord == '(<layerHeight>':
layerHeight = float(splitLine[1])
self.infillWidth = self.repository.infillWidthOverThickness.value * layerHeight
self.distanceFeedRate.addTagRoundedLine('infillWidth', self.infillWidth)
self.distanceFeedRate.addTagRoundedLine('volumeFraction', self.repository.volumeFraction.value)
elif firstWord == '(<edgeWidth>':
self.edgeWidth = float(splitLine[1])
self.halfEdgeWidth = 0.5 * self.edgeWidth
self.overlapRemovalWidth = self.edgeWidth * self.repository.overlapRemovalWidthOverEdgeWidth.value
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
"Parse a gcode line and add it to the inset skein."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == '(<boundaryPoint>':
location = gcodec.getLocationFromSplitLine(None, splitLine)
self.boundary.append(location.dropAxis())
elif firstWord == '(</crafting>)':
self.distanceFeedRate.addLine(line)
if self.repository.turnExtruderHeaterOffAtShutDown.value:
self.distanceFeedRate.addLine('M104 S0') # Turn extruder heater off.
return
elif firstWord == '(<layer>':
self.layerCount.printProgressIncrement('inset')
self.loopLayer = euclidean.LoopLayer(float(splitLine[1]))
self.distanceFeedRate.addLine(line)
elif firstWord == '(</layer>)':
self.addInset(self.loopLayer)
self.loopLayer = None
elif firstWord == '(<nestedRing>)':
self.boundary = []
self.loopLayer.loops.append(self.boundary)
if self.loopLayer == None:
self.distanceFeedRate.addLine(line)
def main():
"Display the inset dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
| agpl-3.0 | 3,616,575,776,640,460,000 | 44.774059 | 409 | 0.782038 | false |
wking/swc-amy | workshops/migrations/0054_self_organized_host.py | 1 | 1799 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import django
from django.db import models, migrations
from django.db.models import Q
def add_self_organized_host(apps, schema_editor):
"""Make new host: self-organized."""
Host = apps.get_model('workshops', 'Host')
Host.objects.create(domain='self-organized', fullname='self-organized',
country='W3')
def update_administrator_to_self_organized(apps, schema_editor):
"""Find all events that were self-organized and set administrator for them
to be "self-organized"."""
Host = apps.get_model('workshops', 'Host')
self_org = Host.objects.get(fullname='self-organized')
Event = apps.get_model('workshops', 'Event')
Event.objects.filter(administrator__isnull=True) \
.filter(
Q(invoice_status='na-self-org') |
Q(notes__contains='self-organized') |
Q(notes__contains='self organized')
) \
.update(administrator=self_org)
class Migration(migrations.Migration):
dependencies = [
('workshops', '0053_merge'),
]
operations = [
# some missing migration, totally healthy (changes only validators for the field)
migrations.AlterField(
model_name='event',
name='url',
field=models.CharField(validators=[django.core.validators.RegexValidator(re.compile('https?://github\\.com/(?P<name>[^/]+)/(?P<repo>[^/]+)/?', 32), inverse_match=True)], unique=True, max_length=100, help_text='Setting this and startdate "publishes" the event.<br />Use link to the event\'s website.', blank=True, null=True),
),
migrations.RunPython(add_self_organized_host),
migrations.RunPython(update_administrator_to_self_organized),
]
| mit | 1,508,810,104,354,600,000 | 35.714286 | 336 | 0.645359 | false |
jackuess/listmodel | listmodel/models.py | 1 | 6658 | import re
try:
import ujson as json
except ImportError:
import json
try:
import jsonpath_rw
except ImportError:
jsonpath_rw = None
try:
import lxml.etree
except ImportError:
lxml = None
try:
import yaml
except ImportError:
yaml = None
class QueryAttr(object):
def __init__(self, query, factory=None):
self.query = query
self.factory = factory
def __get__(self, obj, cls):
if obj:
return self.create(obj, obj.__document__.execute_query(self.query))
else:
return self
def __call__(self, func):
self.create = func
return self
def create(self, obj, value):
if self.factory:
return self.factory(value)
else:
return value
class CsvRow(object):
class DocumentProxy(object):
def __init__(self, row, header_map):
self.row = row
self.header_map = header_map
def execute_query(self, column):
if isinstance(column, int):
return self.row[column]
else:
assert self.header_map
return self.row[self.header_map[column]]
def __init__(self, docproxy):
self.__document__ = docproxy
@classmethod
def fromfile(cls, file, separator=",", read_header=False):
if read_header:
row = next(file)
cols = row.strip().split(separator)
header_map = {col: pos for pos, col in enumerate(cols)}
else:
header_map = None
for row in file:
yield cls(cls.DocumentProxy(row.rstrip().split(separator),
header_map))
class XMLDoc(object):
class DocumentProxy(object):
@classmethod
def create_parser(cls):
return lxml.etree.XMLParser()
def __init__(self, doc):
self.doc = doc
@classmethod
def fromfile(cls, file):
cls.assert_lxml()
return cls(lxml.etree.parse(file, cls.create_parser()))
@classmethod
def fromstring(cls, str):
cls.assert_lxml()
return cls(lxml.etree.fromstring(str, cls.create_parser()))
@classmethod
def assert_lxml(cls):
assert lxml, "'lxml' module required"
def execute_query(self, xpath):
# if xpath.startswith("//"):
# xpath = ".{}".format(xpath)
nodes = self.doc.xpath(xpath)
if nodes:
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def set_iterables(self, query):
self.iterables = iter(self.doc.xpath(query))
def get_next_iterable(self):
return next(self.iterables)
def __init__(self, docproxy):
self.__document__ = docproxy
@classmethod
def fromfile(cls, file):
return cls(docproxy=cls.DocumentProxy.fromfile(file))
@classmethod
def fromstring(cls, str):
return cls(docproxy=cls.DocumentProxy.fromstring(str))
def __iter__(self):
self.__document__.set_iterables(self.Iterable.__query__)
return self
def __next__(self):
iterable = self.__document__.get_next_iterable()
return self.Iterable(self.DocumentProxy(iterable))
next = __next__ # Python 2 compatibility
def __repr__(self):
cls = self.__class__
query_attributes = ["{}={!r}".format(attr, getattr(self, attr))
for attr in dir(cls)
if isinstance(getattr(cls, attr), QueryAttr)]
return "<{class_name} ({query_attributes})>".format(
class_name=cls.__name__,
query_attributes=", ".join(query_attributes)
)
class HTMLDoc(XMLDoc):
class DocumentProxy(XMLDoc.DocumentProxy):
@classmethod
def create_parser(cls):
return lxml.etree.HTMLParser()
class JSONDoc(XMLDoc):
class DocumentProxy(object):
def __init__(self, doc):
self.doc = doc
@classmethod
def fromfile(cls, file):
return cls(json.load(file))
@classmethod
def fromstring(cls, str):
return cls(json.loads(str))
def execute_query(self, json_path):
assert jsonpath_rw, "'jsonpath_rw' module required"
path_expr = jsonpath_rw.parse(json_path)
values = [match.value for match in path_expr.find(self.doc)]
if values:
if len(values) > 1:
return values
else:
return values[0]
def set_iterables(self, query):
self.iterables = iter(self.execute_query(query))
def get_next_iterable(self):
return next(self.iterables)
class YAMLDoc(JSONDoc):
class DocumentProxy(JSONDoc.DocumentProxy):
@classmethod
def fromfile(cls, file):
assert yaml, "'yaml' module required"
return cls(yaml.load(file))
@classmethod
def fromstring(cls, string):
return cls.fromfile(string)
class TextDoc(XMLDoc):
class DocumentProxy(object):
def __init__(self, doc):
self.doc = doc
@classmethod
def fromfile(cls, doc):
return cls(doc.read())
@classmethod
def fromstring(cls, doc):
return cls(doc)
def execute_query(self, regexp):
def groupdict_or_groups(match):
groupdict = match.groupdict()
if groupdict:
return match.groupdict()
return match.groups()
matches = list(re.finditer(regexp, self.doc, re.DOTALL))
if matches:
if len(matches) == 1:
return first_or_all(groupdict_or_groups(matches[0]))
else:
return map(first_or_all, [groupdict_or_groups(match)
for match in matches])
def set_iterables(self, regexp):
self.iterables = re.finditer(regexp, self.doc, re.DOTALL)
def get_next_iterable(self):
next_match = next(self.iterables)
try:
return next_match.group(1)
except IndexError:
return next_match.group(0)
def first_or_all(subject):
if len(subject) == 1:
return subject[0]
return subject
def set_name(name):
def decorator(decorated):
decorated.__name__ = name
return decorated
return decorator
| lgpl-3.0 | -9,084,589,181,227,222,000 | 26.399177 | 79 | 0.542055 | false |
johnwilmes/py-data-structures | py_data_structures/trie.py | 1 | 8045 | """A simple trie, or prefix tree, data structure."""
import itertools
import collections.abc
class Trie(collections.abc.MutableSet):
"""A simple prefix tree data structure.
A Trie is data structure for storing sequences of "names," which can be
aribtrary hashable objects. In the prototypical trie, names are characters
from an alphabet, and the trie is used to store words (see the subclass
StringTrie). The Trie is implemented internally as a tree, each node of
which is a Trie.Node object.
Args:
contents (optional): a collection of sequences of names to initially
populate the Trie
"""
class Node(object):
"""A node of a Trie object.
An instance represents a single node of a trie, corresponding a
specific prefix sequence of names, which may or may not be a complete
sequence. All attributes must be maintained by the user (Trie).
Attributes:
children (dict): mapping from names to child Nodes
terminal (bool): True if a complete sequence ends here,
False otherwise
size (int): the number of complete sequences for which this is a
prefix
"""
def __init__(self):
self.children = dict()
self.terminal = False
self.size = 0
def __len__(self):
return self.size
def __iter__(self):
"""Iterate over complete suffixes from `self`."""
if self.terminal:
yield iter(())
for name, child in self.children.items():
for suffix in child:
yield itertools.chain((name,), suffix)
def __contains__(self, seq):
"""Check if `seq` is a complete suffix from `self`
Returns:
True if `seq` is a valid suffix of `self, False otherwise.
"""
node = self
for name in seq:
if name not in node.children:
return False
node = node.children[name]
return node.terminal
class View(collections.abc.Set):
"""A view of a sub-trie of a Trie object.
This class allows accessing (but not modifying) the sequences in the
Trie completing a given prefix.
Args:
trie_root: the root node of the original Trie object of which this
is a sub-trie
prefix: the sequence of names prefixing everything in this
sub-trie, corresponding to the path from the root of the
original Trie to this sub-trie
"""
def __init__(self, trie_root, prefix):
self.prefix = prefix
self._trie_root = trie_root
# The root node of this sub-trie, corresponding to prefix. It will
# be found when needed
self._prefix_root = None
def _validate_root(self):
"""Ensure that `self._prefix_root` is valid for `self._trie_root`
and `self.prefix`.
If the entire sub-Trie at `self._prefix_root` is removed, then
`self._prefix_root` will no longer be a descendant of
`self._trie_root`. If a sequence with prefix `self.prefix` is
added back into the Trie, it will use a new Trie.Node in place of
self._prefix_root. We need to find that node and use it in place of
self._prefix_root.
"""
root = self._prefix_root
# check if root is still okay
if root is not None and (root.children or root.terminal):
return # everything is still okay
# self._root is invalid; check for a replacement node
self._prefix_root = None
node = self._trie_root
for name in self.prefix:
if name not in node.children:
return
node = node.children[name]
self._prefix_root = node
def __iter__(self):
self._validate_root()
if self._prefix_root is None:
return
for suffix in self._prefix_root:
yield itertools.chain(self.prefix, suffix)
def __len__(self):
self._validate_root()
if self._prefix_root is not None:
return self._prefix_root.size
return 0
def __contains__(self, seq):
self._validate_root()
if self._prefix_root is None:
return False
seq = iter(seq)
for name in self.prefix:
if name != next(seq):
return False
return seq in self._prefix_root
def __init__(self, contents=None):
self._root = self.Node() # root node corresponding to empty prefix
if contents is not None:
for seq in contents:
self.add(seq)
def __len__(self):
return self._root.size
def __iter__(self):
"""Iterate over complete suffixes from `self`."""
return iter(self._root)
def __contains__(self, seq):
"""Check if `seq` is a complete sequence in the Trie.
Returns:
True if `seq` is a valid suffix of `self, False otherwise.
"""
return seq in self._root
def add(self, seq):
"""Insert a sequence into the Trie.
After insertion, `seq` will be a valid suffix of `self`.
Args:
seq: an iterable of names to be inserted"""
parent_stack = list()
node = self._root
for name in seq:
parent_stack.append(node)
if name not in node.children:
node.children[name] = self.Node()
node = node.children[name]
if node.terminal:
return
node.terminal = True
node.size += 1
while parent_stack:
parent_stack.pop().size += 1
def discard(self, seq):
"""Remove `seq` from the Trie.
Prunes the trie to remove all prefixes for which `seq` is the only
valid completion
Args:
seq: an iterable of names to be removed
"""
parent_stack = list()
node = self._root
# Traverse to node representing `seq`
for name in seq:
parent_stack.append((node, name))
if name not in node.children:
return
node = node.children[name]
if not node.terminal:
return
node.terminal = False
descendents = node.children
while parent_stack and not descendents:
node, child_name = parent_stack.pop()
del node.children[child_name]
descendents = node.children
def __getitem__(self, prefix):
"""Get a view of the Trie corresponding to `prefix`.
`prefix` does not necessarily need to currently be in Trie. This view
will be dynamically updated as sequences are added or removed from
`self`.
Args:
prefix: a container (not a single-use iterator) with the sequence
of names identifying the sub-Trie to be viewed.
"""
if prefix is iter(prefix):
raise ValueError('prefix must be a container, not an iterator')
return self.View(self._root, prefix)
class StringTrie(Trie):
"""A Trie class specialized for storing strings, rather than arbitrary
sequences of objects."""
class View(Trie.View):
"""A view of a sub-trie of a StringTrie object.
This class specializes the Trie.View class to yield strings as
appropriate, rather than generic iterators.
"""
def __iter__(self):
for word in super().__iter__():
yield ''.join(word)
def __iter__(self):
"""Override the default iterator to yield strings instead of
iterators"""
for word in super().__iter__():
yield ''.join(word)
| mit | 6,371,751,390,622,885,000 | 33.978261 | 79 | 0.558484 | false |
alex/changes | changes/jobs/create_job.py | 1 | 1548 | from flask import current_app
from changes.backends.base import UnrecoverableException
from changes.config import db
from changes.constants import Status, Result
from changes.jobs.sync_job import sync_job
from changes.models import Job, JobPlan
from changes.queue.task import tracked_task
def abort_create(task):
job = Job.query.get(task.kwargs['job_id'])
job.status = Status.finished
job.result = Result.aborted
db.session.add(job)
db.session.commit()
current_app.logger.exception('Unrecoverable exception creating job %s', job.id)
@tracked_task(on_abort=abort_create, max_retries=10)
def create_job(job_id):
job = Job.query.get(job_id)
if not job:
return
# we might already be marked as finished for various reasons
# (such as aborting the task)
if job.status == Status.finished:
return
jobplan, implementation = JobPlan.get_build_step_for_job(job_id=job.id)
if implementation is None:
# TODO(dcramer): record a FailureReason?
job.status = Status.finished
job.result = Result.failed
current_app.logger.exception('No build plan set %s', job_id)
return
try:
implementation.execute(job=job)
except UnrecoverableException:
job.status = Status.finished
job.result = Result.aborted
current_app.logger.exception('Unrecoverable exception creating %s', job_id)
return
sync_job.delay(
job_id=job.id.hex,
task_id=job.id.hex,
parent_task_id=job.build_id.hex,
)
| apache-2.0 | -5,937,665,670,571,321,000 | 29.352941 | 83 | 0.684755 | false |
adamatan/polycircles | docs/conf.py | 1 | 8304 | # -*- coding: utf-8 -*-
#
# Polycircles documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 21 13:22:59 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Polycircles'
copyright = u'2014, Adam Matan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Polycirclesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Polycircles.tex', u'Polycircles Documentation',
u'Adam Matan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'polycircles', u'Polycircles Documentation',
[u'Adam Matan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Polycircles', u'Polycircles Documentation',
u'Adam Matan', 'Polycircles', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -1,545,247,187,615,187,200 | 30.454545 | 79 | 0.706888 | false |
willprice/python-omxplayer-wrapper | omxplayer/player.py | 1 | 27179 | import subprocess
import time
import os
import signal
import logging
import threading
import atexit
import sys
try: # python 3
from pathlib import Path
except ImportError: # python2
from pathlib2 import Path
from decorator import decorator
from dbus import DBusException, Int64, String, ObjectPath
import dbus.types
from omxplayer.bus_finder import BusFinder
from omxplayer.dbus_connection import DBusConnection, \
DBusConnectionError
from evento import Event
# CONSTANTS
RETRY_DELAY = 0.05
# FILE GLOBAL OBJECTS
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def _check_player_is_active(fn):
# wraps is a decorator that improves debugging wrapped methods
def wrapped(fn, self, *args, **kwargs):
logger.debug('Checking if process is still alive')
# poll determines whether the process has terminated,
# if it hasn't it returns None.
if self._process.poll() is None:
logger.debug('OMXPlayer is running, so execute %s' %
fn.__name__)
return fn(self, *args, **kwargs)
else:
raise OMXPlayerDeadError('Process is no longer alive, can\'t run command')
return decorator(wrapped, fn)
def _from_dbus_type(fn):
def from_dbus_type(dbusVal):
def from_dbus_dict(dbusDict):
d = dict()
for dbusKey, dbusVal in dbusDict.items():
d[from_dbus_type(dbusKey)] = from_dbus_type(dbusVal)
return d
typeUnwrapper = {
dbus.types.Dictionary: from_dbus_dict,
dbus.types.Array: lambda x: list(map(from_dbus_type, x)),
dbus.types.Double: float,
dbus.types.Boolean: bool,
dbus.types.Byte: int,
dbus.types.Int16: int,
dbus.types.Int32: int,
dbus.types.Int64: int,
dbus.types.UInt32: int,
dbus.types.UInt64: int,
dbus.types.ByteArray: str,
dbus.types.ObjectPath: str,
dbus.types.Signature: str,
dbus.types.String: str
}
try:
return typeUnwrapper[type(dbusVal)](dbusVal)
except KeyError:
return dbusVal
def wrapped(fn, self, *args, **kwargs):
return from_dbus_type(fn(self, *args, **kwargs))
return decorator(wrapped, fn)
# CLASSES
class FileNotFoundError(Exception):
pass
class OMXPlayerDeadError(Exception):
pass
class OMXPlayer(object):
"""
OMXPlayer controller
This works by speaking to OMXPlayer over DBus sending messages.
Args:
source (str): Path to the file (as ~/Videos/my-video.mp4) or URL you wish to play
args (list/str): used to pass option parameters to omxplayer. see: https://github.com/popcornmix/omxplayer#synopsis
Multiple argument example:
>>> OMXPlayer('path.mp4', args=['--no-osd', '--no-keys', '-b'])
>>> OMXPlayer('path.mp4', args='--no-osd --no-keys -b')
>>> OMXPlayer('path.mp4', dbus_name='org.mpris.MediaPlayer2.omxplayer2')
"""
def __init__(self, source,
args=None,
bus_address_finder=None,
Connection=None,
dbus_name=None,
pause=False):
logger.debug('Instantiating OMXPlayer')
if args is None:
self.args = []
elif isinstance(args, str):
import shlex
self.args = shlex.split(args)
else:
self.args = list(map(str, args))
self._is_playing = True
self._source = Path(source)
self._dbus_name = dbus_name
self._Connection = Connection if Connection else DBusConnection
self._bus_address_finder = bus_address_finder if bus_address_finder else BusFinder()
#: Event called on pause ``callback(player)``
self.pauseEvent = Event()
#: Event called on play ``callback(player)``
self.playEvent = Event()
#: Event called on stop ``callback(player)``
self.stopEvent = Event()
#: Event called on exit ``callback(player, exit_status)``
self.exitEvent = Event()
#: Event called on seek ``callback(player, relative_position)``
self.seekEvent = Event()
#: Event called on setting position ``callback(player, absolute_position)``
self.positionEvent = Event()
self._process = None
self._connection = None
self.load(source, pause=pause)
def _load_source(self, source):
if self._process:
self.quit()
self._process = self._setup_omxplayer_process(source)
self._rate = 1.0
self._is_muted = False
self._connection = self._setup_dbus_connection(self._Connection, self._bus_address_finder)
def _run_omxplayer(self, source, devnull):
def on_exit(self, exit_status):
logger.info("OMXPlayer process is dead, all DBus calls from here "
"will fail")
self.exitEvent(self, exit_status)
def monitor(self, process, on_exit):
process.wait()
on_exit(self, process.returncode)
try:
source = str(source.resolve())
except AttributeError:
pass
command = ['omxplayer'] + self.args + [source]
if self._dbus_name:
command += ['--dbus_name', self._dbus_name]
logger.debug("Opening omxplayer with the command: %s" % command)
# By running os.setsid in the fork-ed process we create a process group
# which is used to kill the subprocesses the `omxplayer` script
# (it is a bash script itself that calls omxplayer.bin) creates. Without
# doing this we end up in a scenario where we kill the shell script, but not
# the forked children of the shell script.
# See https://pymotw.com/2/subprocess/#process-groups-sessions for examples on this
process = subprocess.Popen(command,
stdin=devnull,
stdout=devnull,
preexec_fn=os.setsid)
try:
self._process_monitor = threading.Thread(target=monitor,
args=(self, process, on_exit))
self._process_monitor.start()
return process
except:
# Make sure to not leave any dangling process on failure
self._terminate_process(process)
raise
def _setup_omxplayer_process(self, source):
logger.debug('Setting up OMXPlayer process')
with open(os.devnull, 'w') as devnull:
process = self._run_omxplayer(source, devnull)
logger.debug('Process opened with PID %s' % process.pid)
atexit.register(self.quit)
return process
def _terminate_process(self, process):
try:
process_group_id = os.getpgid(process.pid)
os.killpg(process_group_id, signal.SIGTERM)
logger.debug('SIGTERM Sent to pid: %s' % process_group_id)
except OSError:
logger.error('Could not find the process to kill')
def _setup_dbus_connection(self, Connection, bus_address_finder):
logger.debug('Trying to connect to OMXPlayer via DBus')
tries = 0
while tries < 50:
logger.debug('DBus connect attempt: {}'.format(tries))
try:
connection = Connection(bus_address_finder.get_address(), self._dbus_name)
logger.debug(
'Connected to OMXPlayer at DBus address: %s' % connection)
return connection
except (DBusConnectionError, IOError):
logger.debug('Failed to connect to OMXPlayer DBus address')
tries += 1
time.sleep(RETRY_DELAY)
raise SystemError('DBus cannot connect to the OMXPlayer process')
""" Utilities """
def load(self, source, pause=False):
"""
Loads a new source (as a file) from ``source`` (a file path or URL)
by killing the current ``omxplayer`` process and forking a new one.
Args:
source (string): Path to the file to play or URL
"""
self._source = source
try:
self._load_source(source)
if pause:
time.sleep(0.5) # Wait for the DBus interface to be initialised
self.pause()
except:
# Make sure we do not leave any dangling process
if self._process:
self._terminate_process(self._process)
self._process = None
raise
""" ROOT INTERFACE PROPERTIES """
@_check_player_is_active
@_from_dbus_type
def can_quit(self):
"""
Returns:
bool: whether the player can quit or not """
return self._root_interface_property('CanQuit')
@_check_player_is_active
@_from_dbus_type
def fullscreen(self):
"""
Returns:
bool: whether the player is fullscreen or not """
return self._root_interface_property('Fullscreen')
@_check_player_is_active
@_from_dbus_type
def can_set_fullscreen(self):
"""
Returns:
bool: whether the player can go fullscreen """
return self._root_interface_property('CanSetFullscreen')
@_check_player_is_active
@_from_dbus_type
def can_raise(self):
"""
Returns:
bool: whether the player can raise the display window atop of all other windows"""
return self._root_interface_property('CanRaise')
@_check_player_is_active
@_from_dbus_type
def has_track_list(self):
"""
Returns:
bool: whether the player has a track list or not"""
return self._root_interface_property('HasTrackList')
@_check_player_is_active
@_from_dbus_type
def identity(self):
"""
Returns:
str: Returns `omxplayer`, the name of the player
"""
return self._root_interface_property('Identity')
@_check_player_is_active
@_from_dbus_type
def supported_uri_schemes(self):
"""
Returns:
str: list of supported URI schemes
Examples:
>>> player.supported_uri_schemes()
["file", "http", "rtsp", "rtmp"]
"""
return self._root_interface_property('SupportedUriSchemes')
""" ROOT INTERFACE METHODS """
""" PLAYER INTERFACE PROPERTIES """
@_check_player_is_active
@_from_dbus_type
def can_go_next(self):
"""
Returns:
bool: whether the player can move to the next item in the playlist
"""
return self._player_interface_property('CanGoNext')
@_check_player_is_active
@_from_dbus_type
def can_go_previous(self):
"""
Returns:
bool: whether the player can move to the previous item in the
playlist
"""
return self._player_interface_property('CanGoPrevious')
@_check_player_is_active
@_from_dbus_type
def can_seek(self):
"""
Returns:
bool: whether the player can seek """
return self._player_interface_property('CanSeek')
@_check_player_is_active
@_from_dbus_type
def can_control(self):
"""
Returns:
bool: whether the player can be controlled"""
return self._player_interface_property('CanControl')
@_check_player_is_active
@_from_dbus_type
def can_play(self):
"""
Returns:
bool: whether the player can play"""
return self._player_interface_property('CanPlay')
@_check_player_is_active
@_from_dbus_type
def can_pause(self):
"""
Returns:
bool: whether the player can pause"""
return self._player_interface_property('CanPause')
@_check_player_is_active
@_from_dbus_type
def playback_status(self):
"""
Returns:
str: one of ("Playing" | "Paused" | "Stopped")
"""
return self._player_interface_property('PlaybackStatus')
@_check_player_is_active
@_from_dbus_type
def volume(self):
"""
Returns:
float: current player volume
"""
if self._is_muted:
return 0
return self._player_interface_property('Volume')
@_check_player_is_active
@_from_dbus_type
def set_volume(self, volume):
"""
Args:
float: volume in the interval [0, 10]
"""
# 0 isn't handled correctly so we have to set it to a very small value to achieve the same purpose
if volume == 0:
volume = 1e-10
return self._player_interface_property('Volume', dbus.Double(volume))
@_check_player_is_active
@_from_dbus_type
def _position_us(self):
"""
Returns:
int: position in microseconds
"""
return self._player_interface_property('Position')
def position(self):
"""
Returns:
int: position in seconds
"""
return self._position_us() / (1000.0 * 1000.0)
@_check_player_is_active
@_from_dbus_type
def minimum_rate(self):
"""
Returns:
float: minimum playback rate (as proportion of normal rate)
"""
return self._player_interface_property('MinimumRate')
@_check_player_is_active
@_from_dbus_type
def maximum_rate(self):
"""
Returns:
float: maximum playback rate (as proportion of normal rate)
"""
return self._player_interface_property('MaximumRate')
@_check_player_is_active
@_from_dbus_type
def rate(self):
"""
Returns:
float: playback rate, 1 is the normal rate, 2 would be double speed.
"""
return self._rate
@_check_player_is_active
@_from_dbus_type
def set_rate(self, rate):
"""
Set the playback rate of the video as a multiple of the default playback speed
Examples:
>>> player.set_rate(2)
# Will play twice as fast as normal speed
>>> player.set_rate(0.5)
# Will play half speed
"""
self._rate = self._player_interface_property('Rate', dbus.Double(rate))
return self._rate
@_check_player_is_active
@_from_dbus_type
def metadata(self):
"""
Returns:
dict: containing track information ('URI', 'length')
Examples:
>>> player.metadata()
{
'mpris:length': 19691000,
'xesam:url': 'file:///home/name/path/to/media/file.mp4'
}
"""
return self._player_interface_property('Metadata')
""" PLAYER INTERFACE NON-STANDARD PROPERTIES """
@_check_player_is_active
@_from_dbus_type
def aspect_ratio(self):
"""
Returns:
float: aspect ratio
"""
return self._player_interface_property('Aspect')
@_check_player_is_active
@_from_dbus_type
def video_stream_count(self):
"""
Returns:
int: number of video streams
"""
return self._player_interface_property('VideoStreamCount')
@_check_player_is_active
@_from_dbus_type
def width(self):
"""
Returns:
int: video width in px
"""
return self._player_interface_property('ResWidth')
@_check_player_is_active
@_from_dbus_type
def height(self):
"""
Returns:
int: video height in px
"""
return self._player_interface_property('ResHeight')
@_check_player_is_active
@_from_dbus_type
def _duration_us(self):
"""
Returns:
int: total length in microseconds
"""
return self._player_interface_property('Duration')
@_check_player_is_active
def duration(self):
"""
Returns:
float: duration in seconds
"""
return self._duration_us() / (1000.0 * 1000.0)
""" PLAYER INTERFACE METHODS """
@_check_player_is_active
def pause(self):
"""
Pause playback
"""
self._player_interface.Pause()
self._is_playing = False
self.pauseEvent(self)
@_check_player_is_active
def play_pause(self):
"""
Pause playback if currently playing, otherwise start playing if currently paused.
"""
self._player_interface.PlayPause()
self._is_playing = not self._is_playing
if self._is_playing:
self.playEvent(self)
else:
self.pauseEvent(self)
@_check_player_is_active
@_from_dbus_type
def stop(self):
"""
Stop the player, causing it to quit
"""
self._player_interface.Stop()
self.stopEvent(self)
@_check_player_is_active
@_from_dbus_type
def seek(self, relative_position):
"""
Seek the video by `relative_position` seconds
Args:
relative_position (float): The position in seconds to seek to.
"""
self._player_interface.Seek(Int64(1000.0 * 1000 * relative_position))
self.seekEvent(self, relative_position)
@_check_player_is_active
@_from_dbus_type
def set_position(self, position):
"""
Set the video to playback position to `position` seconds from the start of the video
Args:
position (float): The position in seconds.
"""
self._player_interface.SetPosition(ObjectPath("/not/used"), Int64(position * 1000.0 * 1000))
self.positionEvent(self, position)
@_check_player_is_active
@_from_dbus_type
def set_layer(self, layer):
"""
Set the layer of the Video (default 0). Higher layers are above lower layers
Args:
layer (int): The Layer to switch to.
"""
self._player_interface.SetLayer(Int64(layer))
@_check_player_is_active
@_from_dbus_type
def set_alpha(self, alpha):
"""
Set the transparency of the video overlay
Args:
alpha (float): The transparency (0..255)
"""
self._player_interface.SetAlpha(ObjectPath('/not/used'), Int64(alpha))
@_check_player_is_active
def mute(self):
"""
Mute audio. If already muted, then this does not do anything
"""
self._is_muted = True
self._player_interface.Mute()
@_check_player_is_active
def unmute(self):
"""
Unmutes the video. If already unmuted, then this does not do anything
"""
self._is_muted = False
self._player_interface.Unmute()
@_check_player_is_active
@_from_dbus_type
def set_aspect_mode(self, mode):
"""
Set the aspect mode of the video
Args:
mode (str): One of ("letterbox" | "fill" | "stretch")
"""
self._player_interface.SetAspectMode(ObjectPath('/not/used'), String(mode))
@_check_player_is_active
@_from_dbus_type
def set_video_pos(self, x1, y1, x2, y2):
"""
Set the video position on the screen
Args:
x1 (int): Top left x coordinate (px)
y1 (int): Top left y coordinate (px)
x2 (int): Bottom right x coordinate (px)
y2 (int): Bottom right y coordinate (px)
"""
position = "%s %s %s %s" % (str(x1),str(y1),str(x2),str(y2))
self._player_interface.VideoPos(ObjectPath('/not/used'), String(position))
@_check_player_is_active
def video_pos(self):
"""
Returns:
(int, int, int, int): Video spatial position (x1, y1, x2, y2) where (x1, y1) is top left,
and (x2, y2) is bottom right. All values in px.
"""
position_string = self._player_interface.VideoPos(ObjectPath('/not/used'))
return list(map(int, position_string.split(" ")))
@_check_player_is_active
@_from_dbus_type
def set_video_crop(self, x1, y1, x2, y2):
"""
Args:
x1 (int): Top left x coordinate (px)
y1 (int): Top left y coordinate (px)
x2 (int): Bottom right x coordinate (px)
y2 (int): Bottom right y coordinate (px)
"""
crop = "%s %s %s %s" % (str(x1),str(y1),str(x2),str(y2))
self._player_interface.SetVideoCropPos(ObjectPath('/not/used'), String(crop))
@_check_player_is_active
def hide_video(self):
"""
Hides the video overlays
"""
self._player_interface.HideVideo()
@_check_player_is_active
def show_video(self):
"""
Shows the video (to undo a `hide_video`)
"""
self._player_interface.UnHideVideo()
@_check_player_is_active
@_from_dbus_type
def list_audio(self):
"""
Returns:
[str]: A list of all known audio streams, each item is in the
format: ``<index>:<language>:<name>:<codec>:<active>``
"""
return self._player_interface.ListAudio()
@_check_player_is_active
@_from_dbus_type
def list_video(self):
"""
Returns:
[str]: A list of all known video streams, each item is in the
format: ``<index>:<language>:<name>:<codec>:<active>``
"""
return self._player_interface.ListVideo()
@_check_player_is_active
@_from_dbus_type
def list_subtitles(self):
"""
Returns:
[str]: A list of all known subtitles, each item is in the
format: ``<index>:<language>:<name>:<codec>:<active>``
"""
return self._player_interface.ListSubtitles()
@_check_player_is_active
def select_subtitle(self, index):
"""
Enable a subtitle specified by the index it is listed in :class:`list_subtitles`
Args:
index (int): index of subtitle listing returned by :class:`list_subtitles`
"""
return self._player_interface.SelectSubtitle(dbus.Int32(index))
@_check_player_is_active
def select_audio(self, index):
"""
Select audio stream specified by the index of the stream in :class:`list_audio`
Args:
index (int): index of audio stream returned by :class:`list_audio`
"""
return self._player_interface.SelectAudio(dbus.Int32(index))
@_check_player_is_active
def show_subtitles(self):
"""
Shows subtitles after :class:`hide_subtitles`
"""
return self._player_interface.ShowSubtitles()
@_check_player_is_active
def hide_subtitles(self):
"""
Hide subtitles
"""
return self._player_interface.HideSubtitles()
@_check_player_is_active
@_from_dbus_type
def action(self, code):
"""
Executes a keyboard command via a code
Args:
code (int): The key code you wish to emulate
refer to ``keys.py`` for the possible keys
"""
self._player_interface.Action(code)
@_check_player_is_active
@_from_dbus_type
def is_playing(self):
"""
Returns:
bool: Whether the player is playing
"""
self._is_playing = (self.playback_status() == "Playing")
logger.info("Playing?: %s" % self._is_playing)
return self._is_playing
@_check_player_is_active
@_from_dbus_type
def play_sync(self):
"""
Play the video and block whilst the video is playing
"""
self.play()
logger.info("Playing synchronously")
try:
time.sleep(0.05)
logger.debug("Wait for playing to start")
while self.is_playing():
time.sleep(0.05)
except DBusException:
logger.error(
"Cannot play synchronously any longer as DBus calls timed out."
)
@_check_player_is_active
@_from_dbus_type
def play(self):
"""
Play the video asynchronously returning control immediately to the calling code
"""
if not self.is_playing():
self.play_pause()
self._is_playing = True
self.playEvent(self)
@_check_player_is_active
@_from_dbus_type
def next(self):
"""
Skip to the next chapter
Returns:
bool: Whether the player skipped to the next chapter
"""
return self._player_interface.Next()
@_check_player_is_active
@_from_dbus_type
def previous(self):
"""
Skip to the previous chapter
Returns:
bool: Whether the player skipped to the previous chapter
"""
return self._player_interface.Previous()
@property
def _root_interface(self):
return self._connection.root_interface
@property
def _player_interface(self):
return self._connection.player_interface
@property
def _properties_interface(self):
return self._connection.properties_interface
def _interface_property(self, interface, prop, val):
if val:
return self._properties_interface.Set(interface, prop, val)
else:
return self._properties_interface.Get(interface, prop)
def _root_interface_property(self, prop, val=None):
return self._interface_property(self._root_interface.dbus_interface, prop, val)
def _player_interface_property(self, prop, val=None):
return self._interface_property(self._player_interface.dbus_interface, prop, val)
def quit(self):
"""
Quit the player, blocking until the process has died
"""
if self._process is None:
logger.debug('Quit was called after self._process had already been released')
return
logger.debug('Quitting OMXPlayer')
self._terminate_process(self._process)
self._process_monitor.join()
self._process = None
@_check_player_is_active
@_from_dbus_type
def get_source(self):
"""
Get the source URI of the currently playing media
Returns:
str: source currently playing
"""
return self._source
# For backward compatibility
@_check_player_is_active
@_from_dbus_type
def get_filename(self):
"""
Returns:
str: source currently playing
.. deprecated:: 0.2.0
Use: :func:`get_source` instead.
"""
return self.get_source()
# MediaPlayer2.Player types:
# Track_Id: DBus ID of track
# Plaback_Rate: Multiplier for playback speed (1 = normal speed)
# Volume: 0--1, 0 is muted and 1 is full volume
# Time_In_Us: Time in microseconds
# Playback_Status: Playing|Paused|Stopped
# Loop_Status: None|Track|Playlist
| lgpl-3.0 | 3,440,031,027,014,566,400 | 28.965821 | 124 | 0.570845 | false |
kyubifire/softlayer-python | SoftLayer/CLI/image/export.py | 1 | 1270 | """Export an image."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import helpers
@click.command()
@click.argument('identifier')
@click.argument('uri')
@click.option('--ibm-api-key',
default=None,
help="The IBM Cloud API Key with access to IBM Cloud Object "
"Storage instance. For help creating this key see "
"https://console.bluemix.net/docs/services/cloud-object-"
"storage/iam/users-serviceids.html#serviceidapikeys")
@environment.pass_env
def cli(env, identifier, uri, ibm_api_key):
"""Export an image to object storage.
The URI for an object storage object (.vhd/.iso file) of the format:
swift://<objectStorageAccount>@<cluster>/<container>/<objectPath>
or cos://<regionName>/<bucketName>/<objectPath> if using IBM Cloud
Object Storage
"""
image_mgr = SoftLayer.ImageManager(env.client)
image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')
result = image_mgr.export_image_to_uri(image_id, uri, ibm_api_key)
if not result:
raise exceptions.CLIAbort("Failed to export Image")
| mit | -1,651,164,738,944,447,200 | 34.277778 | 77 | 0.680315 | false |
skeletalbassman/pytix | wrappers/trello.py | 1 | 10975 | '''wrapper class for Trello REST API'''
import requests
import yaml
import datetime
BASE = "https://api.trello.com/1/"
class Trello():
def __init__(self, project=None, username=None, password=None):
self._key = None
self._token = None
self._authorize()
if project:
self._board = self.setProject(project)
else:
try:
with open("projects.yaml", "r") as f:
data = f.read()
boards = yaml.load(data)
self._board = boards["trello"]
except IOError:
print "If you have not previously set a Trello board as your current project, you must\nspecify a board name."
board_name = raw_input("Board name: ")
self._board = self.setProject(board_name)
def _authorize(self):
try:
with open("credentials.yaml", "r") as f:
data = f.read()
creds = yaml.load(data)
except IOError:
creds = {}
if not "trello" in creds:
print "Your API key was not found on file."
print "Navigate to the following link to obtain your API key\nand paste it into the terminal below. Make sure you are logged into Trello before following the link."
print "Link: https://trello.com/app-key"
key = raw_input("API key: ")
print "\nNow please follow the link below and click 'Allow'."
print "Copy and paste the resulting token back into the terminal. Pytix will\ncache this key and token for future use. This is a one-time procedure."
print "https://trello.com/1/authorize?expiration=never&scope=read%2Cwrite&name=pytix&key={}&response_type=token".format(key)
token = raw_input("API token: ")
self._key = key
self._token = token
new_creds = {}
new_creds["key"] = key
new_creds["token"] = token
creds["trello"] = new_creds
with open("credentials.yaml", "w") as f:
f.write(yaml.dump(creds))
def _getCreds(self):
with open("credentials.yaml", "r") as f:
data = f.read()
creds = yaml.load(data)
key = creds["trello"]["key"]
token = creds["trello"]["token"]
return key, token
def setProject(self, proj_name):
key, token = self._getCreds()
url = BASE + "members/me?&boards=all&key={0}&token={1}".format(key, token)
response = requests.get(url)
boards = response.json()["boards"]
for board in boards:
print board
if board["name"] == proj_name:
self._board = board["id"]
try:
with open("projects.yaml", "r") as f:
data = f.read()
projs = yaml.load(data)
except IOError:
projs = {}
projs["trello"] = board["id"]
with open("projects.yaml", "w") as f:
f.write(yaml.dump(projs))
return board["id"]
def getProject(self):
key, token = self._getCreds()
board = self._board
url = BASE + "boards/{0}?lists=open&cards=open&key={1}&token={2}".format(board, key, token)
response = requests.get(url)
#TODO deal with the response here
#what do we want to show the user about the board?
json = response.json()
lists = json["lists"]
cards = json["cards"]
list_stats = {}
max_length = 0
for item in lists:
cur_length = len(item["name"])
if cur_length > max_length:
max_length = cur_length
list_stats[item["id"]] = {
"name": item["name"],
"no. of cards": 0
}
for card in cards:
list_stats[card["idList"]]["no. of cards"] += 1
left_side = " List Name "
right_side = " No. of Cards ".format("no. of cards")
if len(left_side)-2 > max_length:
max_length = len(left_side)-2
print "\n"+json["name"]
print "\nStatistics:"
print "-"*(19+max_length)
print "|{0:{1}}|{2}|".format(left_side, max_length+2, right_side)
print "-"*(19+max_length)
for key in list_stats:
name = " {} ".format(list_stats[key]["name"])
num = " {} ".format(str(list_stats[key]["no. of cards"]))
print "|{0:{1}}|{2:14}|".format(
name,
max_length+2,
num)
print "-"*(19+max_length)
def getList(self, name):
key, token = self._getCreds()
board = self._board
url = BASE + "boards/{0}?lists=open&key={1}&token={2}".format(board, key, token)
response = requests.get(url)
json = response.json()
for item in json["lists"]:
if item["name"] == name:
list_id = item["id"]
if list_id:
url = BASE + "lists/{0}?cards=open&key={1}&token={2}".format(list_id, key, token)
response = requests.get(url)
json = response.json()
cards = {}
max_name_len = 0
max_id_len = 0
for card in json["cards"]:
if len(card["name"]) > max_name_len:
max_name_len = len(card["name"])
if len(card["id"]) > max_id_len:
max_id_len = len(card["id"])
cards[card["id"]] = {
"name": card["name"],
"id": card["id"]
}
left_side = " Card Name "
right_side = " Card ID "
if len(left_side)-2 > max_name_len:
max_name_len = len(left_side)-2
if len(right_side)-2 > max_id_len:
max_id_len = len(right_side)-2
print "\n"+json["name"]
print "-"*(7+max_id_len+max_name_len)
print "|{0:{1}}|{2:{3}}|".format(left_side, max_name_len+2, right_side,
max_id_len+2)
print "-"*(7+max_id_len+max_name_len)
for key in cards:
name = " {} ".format(cards[key]["name"])
ID = " {} ".format(cards[key]["id"])
print "|{0:{1}}|{2:{3}}|".format(
name,
max_name_len+2,
ID,
max_id_len+2)
print "-"*(7+max_id_len+max_name_len)
else:
print "List not found. Check your spelling."
def getTask(self, name=None, ID=None):
if not name and not ID:
print "You must specify either a card name or a card ID."
return None
key, token = self._getCreds()
board = self._board
url = BASE + "boards/{0}?cards=open&key={1}&token={2}".format(board, key, token)
response = requests.get(url)
json = response.json()
card_id = None
if ID:
card_id = ID
else:
for card in json["cards"]:
if card["name"] == name:
card_id = card["id"]
if card_id:
url = BASE + "cards/{0}?actions=commentCard&key={1}&token={2}".format(card_id, key, token)
response = requests.get(url)
json = response.json()
comments = {}
max_name_len = 0
max_text_len = 0
max_date_len = 0
for comment in json["actions"]:
if len(comment["memberCreator"]["username"])-2 > max_name_len:
max_name_len = len(comment["memberCreator"]["username"])
if len(comment["data"]["text"])-2 > max_text_len:
max_text_len = len(comment["data"]["text"])
date = comment["date"].split("T")[0]
if len(date)-2 > max_date_len:
max_date_len = len(date)
comments[comment["id"]] = {
"username": comment["memberCreator"]["username"],
"text": comment["data"]["text"],
"date": date
}
name = json["name"]
name_label = " Username "
text_label = " Comment Text "
date_label = " Date "
if len(name_label)-2 > max_name_len:
max_name_len = len(name_label)-2
if len(text_label)-2 > max_text_len:
max_text_len = len(text_label)-2
print "\n"+name
print "-"*(10+max_text_len+max_name_len+max_date_len)
print "|{0:{1}}|{2:{3}}|{4:{5}}|".format(name_label, max_name_len+2, text_label,
max_text_len+2, date_label, max_date_len+2)
print "-"*(10+max_text_len+max_name_len+max_date_len)
#TODO need to handle comments where overall table width > 80 chars
for key in comments:
name = " {} ".format(comments[key]["username"])
text = " {} ".format(comments[key]["text"])
date = " {} ".format(comments[key]["date"])
print "|{0:{1}}|{2:{3}}|{4:{5}}|".format(
name,
max_name_len+2,
text,
max_text_len+2,
date,
max_date_len+2)
print "-"*(10+max_text_len+max_name_len+max_date_len)
else:
print "Card not found. Check your spelling."
def moveTask(self, name, from_list, to_list):
key, token = self._getCreds()
board = self._board
board_url = BASE + "boards/{0}?lists=open&key={1}&token={2}".format(board, key, token)
response = requests.get(board_url)
json = response.json()
from_id = to_id = None
for item in json["lists"]:
if item["name"] == from_list:
from_id = item["id"]
elif item["name"] == to_list:
to_id = item["id"]
if not from_id:
print "Source board not found."
return None
if not to_id:
print "Destination board not found."
return None
url1 = BASE + "lists/{0}?cards=open&key={1}&token={2}".format(from_id, key, token)
response = requests.get(url1)
json = response.json()
card_id = None
for card in json["cards"]:
if card["name"] == name:
card_id = card["id"]
if not card_id:
print "Card not found."
return None
url = BASE + "cards/{0}?idList={1}&pos=bottom&key={2}&token={3}".format(card_id, to_id, key, token)
response = requests.put(url)
json = response.json()
print "'{0}' moved to list '{1}'".format(json["name"], to_list)
def addTask(self, name, to_list):
key, token = self._getCreds()
board = self._board
board_url = BASE + "boards/{0}?lists=open&key={1}&token={2}".format(board, key, token)
response = requests.get(board_url)
json = response.json()
to_id = None
for item in json["lists"]:
if item["name"] == to_list:
to_id = item["id"]
if not to_id:
print "Destination list not found."
return None
url = BASE + "cards?name={0}&idList={1}&due=null&key={2}&token={3}".format(name,
to_id, key, token)
response = requests.post(url, data={})
json = response.json()
print "'{0}' added to list '{1}'".format(json["name"], to_list)
def commentTask(self, name, text):
if not name and not ID:
print "You must specify either a card name or a card ID."
return None
key, token = self._getCreds()
board = self._board
url = BASE + "boards/{0}?cards=open&key={1}&token={2}".format(board, key, token)
response = requests.get(url)
json = response.json()
card_id = None
for card in json["cards"]:
if card["name"] == name:
card_id = card["id"]
if not card_id:
print "Card not found."
return None
url = BASE + "cards/{0}/actions/comments?key={1}&token={2}".format(card_id, key, token)
data = {
"text": text
}
response = requests.post(url, data=data)
json = response.json()
if text == json["display"]["entities"]["comment"]["text"]:
print "Comment added successfully."
else:
print "There was an error in processing your comment."
def deleteTask(self, name):
if not name and not ID:
print "You must specify either a card name or a card ID."
return None
key, token = self._getCreds()
board = self._board
url = BASE + "boards/{0}?cards=open&key={1}&token={2}".format(board, key, token)
response = requests.get(url)
json = response.json()
card_id = None
for card in json["cards"]:
if card["name"] == name:
card_id = card["id"]
if not card_id:
print "Card not found."
return None
url = BASE + "cards/{0}?key={1}&token={2}".format(card_id, key, token)
response = requests.delete(url, data={})
json = response.json()
if "_value" in json:
if json["_value"] == None:
print "Card deleted successfully."
else:
print "Card could not be deleted."
if __name__ == "__main__":
trello = Trello()
#trello.getList("Current Sprint")
trello.deleteTask("Test Card") | mit | -8,722,405,353,362,999,000 | 31.093567 | 167 | 0.613759 | false |
enki/muXTCP | scapyLink.py | 1 | 1540 | #!/usr/bin/python
from muxlib.scapy import *
import sys
from twisted.internet import base, fdesc, reactor, protocol
import socket
import iptables
class ScapyLink(base.BasePort):
def __init__(self, interface=None, plusIPs=[]):
base.BasePort.__init__(self, reactor)
self.protocols = []
self.interface = interface
if interface:
self.listenIPs = [get_if_addr(interface)]
self.listenIPs += plusIPs
self.listenOnWire()
def getHandle(self):
return self.socket
def listenOnWire(self):
# self.socket = scapy.L3RawSocket(iface=self.interface, promisc=True, filter='')
self.socket = L2Socket(iface=self.interface)
reactor.addReader(self)
def fileno(self):
return self.socket.ins.fileno()
def doRead(self):
packet = self.socket.recv(MTU)
for protocol in self.protocols:
protocol.packetReceived(packet)
def registerProtocol(self, protocol):
if protocol not in self.protocols:
self.protocols.append(protocol)
# protocol.startProtocol()
else:
raise "Registered Protocol", protocol, "twice"
protocol.setTransport(self)
def unRegisterProtocol(self, protocol):
if protocol in self.protocols:
protocol.setTransport(None)
self.protocols.remove(protocol)
else:
raise "Removed Protocol", protocol, "that isn't registered"
def send(self, packet):
self.socket.send(packet)
| mit | 3,709,210,830,107,390,500 | 27.518519 | 87 | 0.634416 | false |
eduNEXT/edunext-platform | common/djangoapps/util/course.py | 1 | 2804 | """
Utility methods related to course
"""
import logging
import six
from django.conf import settings
from django.utils.timezone import now
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger(__name__)
COURSE_SHARING_UTM_PARAMETERS = {
'facebook': {
'utm_medium': 'social',
'utm_campaign': 'social-sharing-db',
'utm_source': 'facebook',
},
'twitter': {
'utm_medium': 'social',
'utm_campaign': 'social-sharing-db',
'utm_source': 'twitter',
},
}
def get_encoded_course_sharing_utm_params():
"""
Returns encoded Course Sharing UTM Parameters.
"""
return {
utm_source: six.moves.urllib.parse.urlencode(utm_params)
for utm_source, utm_params in six.iteritems(COURSE_SHARING_UTM_PARAMETERS)
}
def get_link_for_about_page(course):
"""
Arguments:
course: This can be either a course overview object or a course descriptor.
Returns the course sharing url, this can be one of course's social sharing url, marketing url, or
lms course about url.
"""
is_social_sharing_enabled = configuration_helpers.get_value(
'SOCIAL_SHARING_SETTINGS',
getattr(settings, 'SOCIAL_SHARING_SETTINGS', {})
).get('CUSTOM_COURSE_URLS')
if is_social_sharing_enabled and course.social_sharing_url:
course_about_url = course.social_sharing_url
elif settings.FEATURES.get('ENABLE_MKTG_SITE') and getattr(course, 'marketing_url', None):
course_about_url = course.marketing_url
else:
about_base = configuration_helpers.get_value_for_org(
course.id.org,
'LMS_ROOT_URL',
settings.LMS_ROOT_URL
)
course_about_url = u'{about_base_url}/courses/{course_key}/about'.format(
about_base_url=about_base,
course_key=six.text_type(course.id),
)
return course_about_url
def has_certificates_enabled(course):
"""
Arguments:
course: This can be either a course overview object or a course descriptor.
Returns a boolean if the course has enabled certificates
"""
if not settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
return False
return course.cert_html_view_enabled
def should_display_grade(course_overview):
"""
Returns True or False depending upon either certificate available date
or course-end-date
"""
course_end_date = course_overview.end_date
cert_available_date = course_overview.certificate_available_date
current_date = now().replace(hour=0, minute=0, second=0, microsecond=0)
if cert_available_date:
return cert_available_date < current_date
return course_end_date and course_end_date < current_date
| agpl-3.0 | 3,148,199,023,560,329,000 | 29.150538 | 101 | 0.662981 | false |
sugartom/tensorflow-alien | tensorflow/contrib/layers/python/layers/layers.py | 1 | 95215 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Higher level ops for building layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.layers import convolutional as convolutional_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.layers import normalization as normalization_layers
from tensorflow.python.layers import pooling as pooling_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training import moving_averages
# TODO(b/28426988): Replace legacy_* fns migrated from slim.
# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.
__all__ = ['avg_pool2d',
'batch_norm',
'bias_add',
'conv2d',
'conv2d_in_plane',
'conv2d_transpose',
'convolution',
'convolution2d',
'convolution2d_in_plane',
'convolution2d_transpose',
'dropout',
'flatten',
'fully_connected',
'layer_norm',
'linear',
'pool',
'max_pool2d',
'one_hot_encoding',
'relu',
'relu6',
'repeat',
'separable_conv2d',
'separable_convolution2d',
'softmax',
'stack',
'unit_norm',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu']
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
@add_arg_scope
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'AvgPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = pooling_layers.AveragePooling2D(pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _fused_batch_norm(
inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: When is_training is True the moving_mean and moving_variance need to be
updated, by default the update_ops are placed in `tf.GraphKeys.UPDATE_OPS` so
they need to be added as a dependency to the `train_op`, example:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
One can set updates_collections=None to force the updates in place, but that
can have speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance.
center: If True, add offset of `beta` to normalized tensor. If False,
`beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If the rank of `inputs` is neither 2 or 4.
ValueError: If rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(
scope, 'BatchNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
original_shape = inputs.get_shape()
original_rank = original_shape.ndims
if original_rank is None:
raise ValueError('Inputs %s has undefined rank' % inputs.name)
elif original_rank not in [2, 4]:
raise ValueError('Inputs %s has unsupported rank.'
' Expected 2 or 4 but got %d' % (
inputs.name, original_rank))
if original_rank == 2:
channels = inputs.get_shape()[-1].value
if channels is None:
raise ValueError('`C` dimension must be known but is None')
new_shape = [-1, 1, 1, channels]
if data_format == DATA_FORMAT_NCHW:
new_shape = [-1, channels, 1, 1]
inputs = array_ops.reshape(inputs, new_shape)
inputs_shape = inputs.get_shape()
dtype = inputs.dtype.base_dtype
if data_format == DATA_FORMAT_NHWC:
params_shape = inputs_shape[-1:]
else:
params_shape = inputs_shape[1:2]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined `C` dimension %s.' %
(inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
trainable_beta = trainable and center
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable_beta)
trainable_gamma = trainable and scale
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable_gamma)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections.
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs, gamma, beta, epsilon=epsilon, data_format=data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=moving_mean,
variance=moving_variance,
epsilon=epsilon,
is_training=False,
data_format=data_format)
outputs, mean, variance = utils.smart_cond(is_training,
_fused_batch_norm_training,
_fused_batch_norm_inference)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `need_updates` will be true.
is_training_value = utils.constant_value(is_training)
need_updates = is_training_value is None or is_training_value
if need_updates:
if updates_collections is None:
no_updates = lambda: outputs
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies(
[update_moving_mean, update_moving_variance]):
return array_ops.identity(outputs)
outputs = utils.smart_cond(is_training, _force_updates, no_updates)
else:
moving_vars_fn = lambda: (moving_mean, moving_variance)
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
outputs.set_shape(inputs_shape)
if original_shape.ndims == 2:
outputs = array_ops.reshape(outputs, original_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
batch_weights=None,
fused=False,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: When is_training is True the moving_mean and moving_variance need to be
updated, by default the update_ops are placed in `tf.GraphKeys.UPDATE_OPS` so
they need to be added as a dependency to the `train_op`, example:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
One can set updates_collections=None to force the updates in place, but that
can have speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance. Try zero_debias_moving_mean=True for improved stability.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
param_regularizers: Optional regularizer for beta and gamma.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
batch_weights: An optional tensor of shape `[batch_size]`,
containing a frequency weight for each batch item. If present,
then the batch normalization uses weighted mean and
variance. (This can be used to correct for bias in training
example selection.)
fused: Use nn.fused_batch_norm if True, nn.batch_normalization otherwise.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean. It creates a new
pair of variables 'moving_mean/biased' and 'moving_mean/local_step'.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `batch_weights` is not None and `fused` is True.
ValueError: If `param_regularizers` is not None and `fused` is True.
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or channels dimension of `inputs` is undefined.
"""
if fused:
if batch_weights is not None:
raise ValueError('Weighted mean and variance is not currently '
'supported for fused batch norm.')
if param_regularizers is not None:
raise ValueError('Regularizers are not currently '
'supported for fused batch norm.')
return _fused_batch_norm(
inputs,
decay=decay,
center=center,
scale=scale,
epsilon=epsilon,
activation_fn=activation_fn,
param_initializers=param_initializers,
updates_collections=updates_collections,
is_training=is_training,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean,
scope=scope)
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter()
with variable_scope.variable_scope(
scope, 'BatchNorm', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
# Determine whether we can use the core layer class.
if (batch_weights is None and
updates_collections is ops.GraphKeys.UPDATE_OPS and
not zero_debias_moving_mean):
# Use the core layer class.
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
if not param_regularizers:
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
layer = normalization_layers.BatchNormalization(
axis=axis,
momentum=decay,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
trainable=trainable,
name=sc.name,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs, training=is_training)
# Add variables to collections.
_add_variable_to_collections(
layer.moving_mean, variables_collections, 'moving_mean')
_add_variable_to_collections(
layer.moving_variance, variables_collections, 'moving_variance')
if layer.beta:
_add_variable_to_collections(layer.beta, variables_collections, 'beta')
if layer.gamma:
_add_variable_to_collections(
layer.gamma, variables_collections, 'gamma')
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
# Not supported by layer class: batch_weights argument,
# and custom updates_collections. In that case, use the legacy BN
# implementation.
# Custom updates collections are not supported because the update logic
# is different in this case, in particular w.r.t. "forced updates" and
# update op reuse.
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if batch_weights is not None:
batch_weights = ops.convert_to_tensor(batch_weights)
inputs_shape[0:1].assert_is_compatible_with(batch_weights.get_shape())
# Reshape batch weight values so they broadcast across inputs.
nshape = [-1] + [1 for _ in range(inputs_rank - 1)]
batch_weights = array_ops.reshape(batch_weights, nshape)
if data_format == DATA_FORMAT_NCHW:
moments_axes = [0] + list(range(2, inputs_rank))
params_shape = inputs_shape[1:2]
# For NCHW format, rather than relying on implicit broadcasting, we
# explicitly reshape the params to params_shape_broadcast when computing
# the moments and the batch normalization.
params_shape_broadcast = list(
[1, inputs_shape[1].value] + [1 for _ in range(2, inputs_rank)])
else:
moments_axes = list(range(inputs_rank - 1))
params_shape = inputs_shape[-1:]
params_shape_broadcast = None
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined channels dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if not param_initializers:
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections. We disable variable partitioning while creating
# them, because assign_moving_average is not yet supported for partitioned
# variables.
partitioner = variable_scope.get_variable_scope().partitioner
try:
variable_scope.get_variable_scope().set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
finally:
variable_scope.get_variable_scope().set_partitioner(partitioner)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `needs_moments` will be true.
is_training_value = utils.constant_value(is_training)
need_moments = is_training_value is None or is_training_value
if need_moments:
# Calculate the moments based on the individual batch.
if batch_weights is None:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.moments(inputs, moments_axes, keep_dims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.moments(inputs, moments_axes)
else:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.weighted_moments(inputs, moments_axes,
batch_weights, keep_dims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.weighted_moments(inputs, moments_axes,
batch_weights)
moving_vars_fn = lambda: (moving_mean, moving_variance)
if updates_collections is None:
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies([update_moving_mean,
update_moving_variance]):
return array_ops.identity(mean), array_ops.identity(variance)
mean, variance = utils.smart_cond(is_training,
_force_updates,
moving_vars_fn)
else:
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
# Use computed moments during training and moving_vars otherwise.
vars_fn = lambda: (mean, variance)
mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
mean, variance = moving_mean, moving_variance
if data_format == DATA_FORMAT_NCHW:
mean = array_ops.reshape(mean, params_shape_broadcast)
variance = array_ops.reshape(variance, params_shape_broadcast)
beta = array_ops.reshape(beta, params_shape_broadcast)
if gamma is not None:
gamma = array_ops.reshape(gamma, params_shape_broadcast)
# Compute batch_normalization.
outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma,
epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer(),
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: A tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
data_format: A string. 'NHWC' and 'NCHW' are supported.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the result of adding biases to the inputs.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `data_format` is `NCHW` and rank of `inputs` is not 4.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(scope, 'BiasAdd', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Dims of shape must be known but is None')
elif inputs_rank != 4 and data_format == DATA_FORMAT_NCHW:
raise ValueError('Data format NCHW only supports 4D Tensor')
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
num_features = inputs_shape[axis].value
if num_features is None:
raise ValueError('`C` dimension must be known but is None')
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable('biases',
shape=[num_features,],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
# TODO(jbms): change `rate` parameter to `dilation_rate` for consistency with
# underlying op.
@add_arg_scope
def convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs a'trous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, currently the only valid value is "NDHWC".
rate: A sequence of N positive integers specifying the dilation rate to use
for a'trous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with variable_scope.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank == 3:
layer_class = convolutional_layers.Convolution1D
elif input_rank == 4:
layer_class = convolutional_layers.Convolution2D
elif input_rank == 5:
layer_class = convolutional_layers.Convolution3D
else:
raise ValueError('Convolution not supported for input with rank',
input_rank)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = layer_class(filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
convolution2d = convolution
@add_arg_scope
def convolution2d_in_plane(
inputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Performs the same in-plane convolution to each channel independently.
This is useful for performing various simple channel-independent convolution
operations such as image gradients:
image = tf.constant(..., shape=(16, 240, 320, 3))
vert_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[2, 1])
horz_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[1, 2])
Args:
inputs: A 4-D tensor with dimensions [batch_size, height, width, channels].
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same.
stride: A list of length 2 `[stride_height, stride_width]`.
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding type to use, either 'SAME' or 'VALID'.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'ConvInPlane', [inputs], reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, 1, 1]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
depthwise_weights = array_ops.tile(weights, [1, 1, num_filters_in, 1])
outputs = nn.depthwise_conv2d(inputs, depthwise_weights,
[1, stride_h, stride_w, 1], padding)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_filters_in,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def convolution2d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NHWC,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution2d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `batch_norm_params` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
num_outputs: Integer, the number of output filters.
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If 'kernel_size' is not a list of length 2.
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `C` dimension of `inputs` is None.
"""
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with variable_scope.variable_scope(
scope, 'Conv2d_transpose', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = convolutional_layers.Convolution2DTranspose(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def dropout(inputs,
keep_prob=0.5,
noise_shape=None,
is_training=True,
outputs_collections=None,
scope=None):
"""Returns a dropout op applied to the input.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
inputs: The tensor to pass to the nn.dropout op.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
is_training: A bool `Tensor` indicating whether or not the model
is in training mode. If so, dropout is applied and values scaled.
Otherwise, inputs is returned.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
A tensor representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dropout(rate=1 - keep_prob,
noise_shape=noise_shape,
name=sc.name,
_scope=sc)
outputs = layer.apply(inputs, training=is_training)
return utils.collect_named_outputs(
outputs_collections, sc.original_name_scope, outputs)
@add_arg_scope
def flatten(inputs,
outputs_collections=None,
scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: A tensor of size [batch_size, ...].
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
A flattened tensor with shape [batch_size, k].
Raises:
ValueError: If inputs rank is unknown or less than 2.
"""
with ops.name_scope(scope, 'Flatten', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_rank = inputs.get_shape().ndims
if (inputs_rank is None) or (inputs_rank < 2):
raise ValueError('Inputs must have a least 2 dimensions.')
inputs_shape = array_ops.shape(inputs)
batch_dim = array_ops.slice(inputs_shape, [0], [1])
spatial_dims = array_ops.slice(inputs_shape, [1], [inputs_rank - 1])
flat_spatial_dim = math_ops.reduce_prod(spatial_dims)
flat_spatial_dim = array_ops.expand_dims(flat_spatial_dim, 0)
flat_shape = array_ops.concat([batch_dim, flat_spatial_dim], 0)
outputs = array_ops.reshape(inputs, flat_shape)
# Attempt to propagate shape information, if it is defined.
input_shape = inputs.get_shape().as_list()
batch_dim, spatial_dims = input_shape[0], input_shape[1:]
if all(spatial_dims):
outputs.set_shape([batch_dim,
functools.reduce(lambda x, y: x * y, spatial_dims)])
else:
outputs.set_shape([batch_dim, None])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _sparse_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
outer_dimensions = inputs.dense_shape[:new_rank - 1]
inner_dimensions = inputs.dense_shape[new_rank - 1:]
new_shape = array_ops.concat((outer_dimensions,
[math_ops.reduce_prod(inner_dimensions)]), 0)
flattened = sparse_ops.sparse_reshape(inputs, new_shape)
return flattened
def _dense_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
rank_assertion = check_ops.assert_rank_at_least(
inputs, new_rank, message='inputs has rank less than new_rank')
with ops.control_dependencies([rank_assertion]):
outer_dimensions = array_ops.strided_slice(
array_ops.shape(inputs), [0], [new_rank - 1])
new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
reshaped = array_ops.reshape(inputs, new_shape)
# if `new_rank` is an integer, try to calculate new shape.
if isinstance(new_rank, six.integer_types):
static_shape = inputs.get_shape()
if static_shape is not None and static_shape.dims is not None:
static_shape = static_shape.as_list()
static_outer_dims = static_shape[:new_rank - 1]
static_inner_dims = static_shape[new_rank - 1:]
flattened_dimension = 1
for inner_dim in static_inner_dims:
if inner_dim is None:
flattened_dimension = None
break
flattened_dimension *= inner_dim
reshaped.set_shape(static_outer_dims + [flattened_dimension])
return reshaped
@add_arg_scope
def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
"""Flattens inner dimensions of `inputs`, returns a Tensor with `new_rank`.
For example:
'''
x = tf.random_uniform(shape=[1, 2, 3, 4, 5, 6])
y = _inner_flatten(x, 4)
assert y.get_shape().as_list() == [1, 2, 3, (4 * 5 * 6)]
'''
This layer will fail at run time if `new_rank` is greater than the current
rank of `inputs`.
Args:
inputs: A `Tensor` or `SparseTensor`.
new_rank: The desired rank of the returned `Tensor` or `SparseTensor`.
output_collections: Collection to which the outputs will be added.
scope: Optional scope for `name_scope`.
Returns:
A `Tensor` or `SparseTensor` conataining the same values as `inputs`, but
with innermost dimensions flattened to obtain rank `new_rank`.
Raises:
TypeError: `inputs` is not a `Tensor` or `SparseTensor`.
"""
with ops.name_scope(scope, 'InnerFlatten', [inputs, new_rank]) as sc:
if isinstance(inputs, sparse_tensor.SparseTensor):
flattened = _sparse_inner_flatten(inputs, new_rank)
else:
inputs = ops.convert_to_tensor(inputs)
flattened = _dense_inner_flatten(inputs, new_rank)
return utils.collect_named_outputs(output_collections, sc, flattened)
def _model_variable_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None, trainable=True,
collections=None, caching_device=None,
partitioner=None, rename=None, use_resource=None,
**_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
name_components = name.split('/')
name_components[-1] = rename[short_name]
name = '/'.join(name_components)
return variables.model_variable(
name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, collections=collections, trainable=trainable,
caching_device=caching_device, partitioner=partitioner,
custom_getter=getter, use_resource=use_resource)
def _build_variable_getter(rename=None):
"""Build a model variable getter that respects scope getter and renames."""
# VariableScope will nest the getters
def layer_variable_getter(getter, *args, **kwargs):
kwargs['rename'] = rename
return _model_variable_getter(getter, *args, **kwargs)
return layer_variable_getter
def _add_variable_to_collections(variable, collections_set, collections_name):
"""Adds variable (or all its parts) to all collections with that name."""
collections = utils.get_variable_collections(
collections_set, collections_name) or []
variables_list = [variable]
if isinstance(variable, tf_variables.PartitionedVariable):
variables_list = [v for v in variable]
for collection in collections:
for var in variables_list:
if var not in ops.get_collection(collection):
ops.add_to_collection(collection, var)
@add_arg_scope
def fully_connected(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a fully connected layer.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of at least rank 2 and static value for the last dimension;
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
The tensor variable representing the result of the series of operations.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, six.integer_types):
raise ValueError('num_outputs should be int or long, got %s.', num_outputs)
layer_variable_getter = _build_variable_getter({'bias': 'biases',
'kernel': 'weights'})
with variable_scope.variable_scope(
scope, 'fully_connected', [inputs],
reuse=reuse, custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dense(
units=num_outputs,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
# Apply normalizer function / layer.
if normalizer_fn is not None:
if not normalizer_params:
normalizer_params = {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(
outputs_collections, sc.original_name_scope, outputs)
@add_arg_scope
def layer_norm(inputs,
center=True,
scale=True,
activation_fn=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a Layer Normalization layer from https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: A tensor with 2 or more dimensions. The normalization
occurs over all but the first dimension.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If rank or last dimension of `inputs` is undefined.
"""
with variable_scope.variable_scope(scope, 'LayerNorm', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
axis = list(range(1, inputs_rank))
params_shape = inputs_shape[-1:]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined last dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer(),
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer(),
collections=gamma_collections,
trainable=trainable)
# Calculate the moments on the last axis (layer activations).
mean, variance = nn.moments(inputs, axis, keep_dims=True)
# Compute layer normalization using the batch_normalization function.
variance_epsilon = 1E-12
outputs = nn.batch_normalization(
inputs, mean, variance, beta, gamma, variance_epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
outputs)
@add_arg_scope
def max_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D Max Pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If 'kernel_size' is not a 2-D list
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'MaxPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = pooling_layers.MaxPooling2D(pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def pool(inputs,
kernel_size,
pooling_type,
padding='VALID',
data_format=None,
dilation_rate=1,
stride=1,
outputs_collections=None,
scope=None):
# pylint: disable=line-too-long
"""Adds a pooling op.
Args:
inputs: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
kernel_size: Sequence of N ints >= 1. Can also be a single integer to
specify the same value for all spatial dimensions.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, currently the only valid value is "NDHWC".
dilation_rate: Optional. Dilation rate. Sequence of N ints >= 1. Defaults
to [1]*N. Can also be a single integer to specify the same value for all
spatial dimensions. If any value of dilation_rate is > 1, then all values
of stride must be 1.
stride: Optional. Sequence of N ints >= 1. Defaults to [1]*N. Can also be
a single integer to specify the same value for all spatial dimensions. If
any value of stride is > 1, then all values of dilation_rate must be 1.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If arguments are invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(scope, '%s_pool' %
(pooling_type.lower()), [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank is None:
raise ValueError('Rank of inputs must be known')
if input_rank < 3:
raise ValueError('Rank of inputs must be >= 3')
num_spatial_dims = input_rank - 2
output = nn.pool(
input=inputs,
window_shape=utils.n_positive_integers(num_spatial_dims, kernel_size),
pooling_type=pooling_type,
padding=padding,
data_format=data_format,
dilation_rate=utils.n_positive_integers(num_spatial_dims,
dilation_rate),
strides=utils.n_positive_integers(num_spatial_dims, stride),
name=sc)
return utils.collect_named_outputs(outputs_collections, sc, output)
@add_arg_scope
def one_hot_encoding(labels,
num_classes,
on_value=1.0,
off_value=0.0,
outputs_collections=None,
scope=None):
"""Transform numeric labels into onehot_labels using `tf.one_hot`.
Args:
labels: [batch_size] target labels.
num_classes: Total number of classes.
on_value: A scalar defining the on-value.
off_value: A scalar defining the off-value.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
One-hot encoding of the labels.
"""
with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc:
labels = ops.convert_to_tensor(labels)
if labels.dtype == dtypes.int32:
labels = standard_ops.to_int64(labels)
outputs = standard_ops.one_hot(labels,
num_classes,
on_value=on_value,
off_value=off_value)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _apply_activation(y, activation_fn, output_collections):
if activation_fn is not None:
y = activation_fn(y)
ops.add_to_collections(list(output_collections or []) +
[ops.GraphKeys.ACTIVATIONS], y)
return y
def repeat(inputs, repetitions, layer, *args, **kwargs):
"""Applies the same layer with the same arguments repeatedly.
```python
y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1')
# It is equivalent to:
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1')
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2')
y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
repetitions: Int, number of repetitions.
layer: A layer with arguments `(inputs, *args, **kwargs)`
*args: Extra args for the layer.
**kwargs: Extra kwargs for the layer.
Returns:
A tensor result of applying the layer, repetitions times.
Raises:
ValueError: If the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with variable_scope.variable_scope(scope, 'Repeat', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'repeat'
outputs = inputs
for i in range(repetitions):
kwargs['scope'] = scope + '_' + str(i+1)
outputs = layer(outputs, *args, **kwargs)
return outputs
@add_arg_scope
def separable_convolution2d(
inputs,
num_outputs,
kernel_size,
depth_multiplier,
stride=1,
padding='SAME',
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a depth-separable 2D convolution with optional batch_norm layer.
This op first performs a depthwise convolution that acts separately on
channels, creating a variable called `depthwise_weights`. If `num_outputs`
is not None, it adds a pointwise convolution that mixes channels, creating a
variable called `pointwise_weights`. Then, if `batch_norm_params` is None,
it adds bias to the result, creating a variable called 'biases', otherwise
it adds a batch normalization layer. It finally applies an activation function
to produce the end result.
Args:
inputs: A tensor of size [batch_size, height, width, channels].
num_outputs: The number of pointwise convolution output filters. If is
None, then we skip the pointwise convolution stage.
kernel_size: A list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
stride: A list of length 2: [stride_height, stride_width], specifying the
depthwise convolution stride. Can be an int if both strides are the same.
padding: One of 'VALID' or 'SAME'.
rate: A list of length 2: [rate_height, rate_width], specifying the dilation
rates for a'trous convolution. Can be an int if both rates are the same.
If any value is larger than one, then both stride values need to be one.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` representing the output of the operation.
"""
layer_variable_getter = _build_variable_getter(
{'bias': 'biases',
'depthwise_kernel': 'depthwise_weights',
'pointwise_kernel': 'pointwise_weights'})
with variable_scope.variable_scope(
scope, 'SeparableConv2d', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
if num_outputs is not None:
# Apply separable conv using the SeparableConvolution2D layer.
layer = convolutional_layers.SeparableConvolution2D(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format='channels_last',
dilation_rate=utils.two_element_tuple(rate),
activation=None,
depth_multiplier=depth_multiplier,
use_bias=not normalizer_fn and biases_initializer,
depthwise_initializer=weights_initializer,
pointwise_initializer=weights_initializer,
bias_initializer=biases_initializer,
depthwise_regularizer=weights_regularizer,
pointwise_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.depthwise_kernel,
variables_collections, 'weights')
_add_variable_to_collections(layer.pointwise_kernel,
variables_collections, 'weights')
if layer.bias:
_add_variable_to_collections(layer.bias,
variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
# Actually apply depthwise conv instead of separable conv.
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
depthwise_shape = [kernel_h, kernel_w,
num_filters_in, depth_multiplier]
depthwise_weights = variables.model_variable(
'depthwise_weights',
shape=depthwise_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
strides = [1, stride_h, stride_w, 1]
outputs = nn.depthwise_conv2d(inputs, depthwise_weights, strides, padding,
rate=utils.two_element_tuple(rate))
num_outputs = depth_multiplier * num_filters_in
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def softmax(logits, scope=None):
"""Performs softmax on Nth dimension of N-dimensional logit tensor.
For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
needs to have a specified number of elements (number of classes).
Args:
logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` with same shape and type as logits.
"""
# TODO(jrru): Add axis argument which defaults to last dimension.
with variable_scope.variable_scope(scope, 'softmax', [logits]):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [-1, num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
predictions.set_shape(logits.get_shape())
return predictions
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
y = stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
y = fully_connected(x, 128, scope='fc/fc_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer with arguments `(inputs, *args, **kwargs)`
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
A `Tensor` result of applying the stacked layers.
Raises:
ValueError: If the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_scope(scope, 'Stack', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'stack'
outputs = inputs
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i+1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
@add_arg_scope
def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
"""Normalizes the given input across the specified dimension to unit length.
Note that the rank of `input` must be known.
Args:
inputs: A `Tensor` of arbitrary size.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
scope: Optional scope for variable_scope.
Returns:
The normalized `Tensor`.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
"""
with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_rank = len(inputs.get_shape().as_list())
if dim < 0 or dim >= input_rank:
raise ValueError(
'dim must be positive but smaller than the input rank.')
lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum(
math_ops.square(inputs), dim, True))
multiples = []
if dim > 0:
multiples.append(array_ops.ones([dim], dtypes.int32))
multiples.append(
array_ops.strided_slice(array_ops.shape(inputs), [dim], [dim + 1]))
if dim < (input_rank - 1):
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
multiples = array_ops.concat(multiples, 0)
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
def legacy_fully_connected(x,
num_output_units,
activation_fn=None,
weight_init=initializers.xavier_initializer(),
bias_init=init_ops.zeros_initializer(),
name=None,
weight_collections=(ops.GraphKeys.WEIGHTS,),
bias_collections=(ops.GraphKeys.BIASES,),
output_collections=(ops.GraphKeys.ACTIVATIONS,),
trainable=True,
weight_regularizer=None,
bias_regularizer=None):
# pylint: disable=anomalous-backslash-in-string
r"""Adds the parameters for a fully connected layer and returns the output.
A fully connected layer is generally defined as a matrix multiply:
`y = f(w * x + b)` where `f` is given by `activation_fn`. If
`activation_fn` is `None`, the result of `y = w * x + b` is
returned.
If `x` has shape [\\\(\\text{dim}_0, \\text{dim}_1, ..., \\text{dim}_n\\\)]
with more than 2 dimensions (\\\(n > 1\\\)), then we repeat the matrix
multiply along the first dimensions. The result r is a tensor of shape
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`],
where \\\( r_{i_0, ..., i_{n-1}, k} =
\\sum_{0 \\leq j < \\text{dim}_n} x_{i_0, ... i_{n-1}, j} \cdot w_{j, k}\\\).
This is accomplished by reshaping `x` to 2-D
[\\\(\\text{dim}_0 \\cdot ... \\cdot \\text{dim}_{n-1}, \\text{dim}_n\\\)]
before the matrix multiply and afterwards reshaping it to
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`].
This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting
`bias_init` to `None`.
The variable creation is compatible with `tf.variable_scope` and so can be
reused with `tf.variable_scope` or `tf.make_template`.
Most of the details of variable creation can be controlled by specifying the
initializers (`weight_init` and `bias_init`) and in which collections to place
the created variables (`weight_collections` and `bias_collections`; note that
the variables are always added to the `VARIABLES` collection). The output of
the layer can be placed in custom collections using `output_collections`.
The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,
respectively.
A per layer regularization can be specified by setting `weight_regularizer`
and `bias_regularizer`, which are applied to the weights and biases
respectively, and whose output is added to the `REGULARIZATION_LOSSES`
collection.
Args:
x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
weight_init: An optional weight initialization, defaults to
`xavier_initializer`.
bias_init: An initializer for the bias, defaults to 0. Set to `None` in
order to disable bias.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.variable_scope` for details.
weight_collections: List of graph collections to which weights are added.
bias_collections: List of graph collections to which biases are added.
output_collections: List of graph collections to which outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
weight_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for weights.
bias_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for biases.
Returns:
The output of the fully connected layer.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
with variable_scope.variable_scope(name, 'fully_connected', [x]):
x = ops.convert_to_tensor(x)
dims = x.get_shape().dims
if dims is None:
raise ValueError('dims of x must be known but is None')
if len(dims) < 2:
raise ValueError('rank of x must be at least 2 not: %d' % len(dims))
num_input_units = dims[-1].value
if num_input_units is None:
raise ValueError('last dimension of x must be known but is None')
dtype = x.dtype.base_dtype
weight_collections = set(list(weight_collections or []) +
[ops.GraphKeys.GLOBAL_VARIABLES])
w = variable_scope.get_variable('weights',
shape=[num_input_units, num_output_units],
dtype=dtype,
initializer=weight_init,
collections=weight_collections,
regularizer=weight_regularizer,
trainable=trainable)
x_2_dim = x if len(dims) <= 2 else array_ops.reshape(x,
[-1, num_input_units])
y = standard_ops.matmul(x_2_dim, w)
if bias_init is not None:
bias_collections = set(list(bias_collections or []) +
[ops.GraphKeys.GLOBAL_VARIABLES])
b = variable_scope.get_variable('bias',
shape=[num_output_units],
dtype=dtype,
initializer=bias_init,
collections=bias_collections,
regularizer=bias_regularizer,
trainable=trainable)
y = nn.bias_add(y, b)
if len(dims) > 2:
out_shape = array_ops.unstack(array_ops.shape(x))
out_shape[-1] = num_output_units
y = array_ops.reshape(y, array_ops.stack(out_shape))
static_shape = x.get_shape().as_list()
static_shape[-1] = num_output_units
y.set_shape(static_shape)
return _apply_activation(y, activation_fn, output_collections)
# TODO(eiderm): Verify and fix autocomplete in colab (also relu6).
# Simple aliases which remove the activation_fn parameter.
legacy_relu = functools.partial(legacy_fully_connected, activation_fn=nn.relu)
legacy_linear = functools.partial(legacy_fully_connected, activation_fn=None)
relu = functools.partial(fully_connected, activation_fn=nn.relu)
relu6 = functools.partial(fully_connected, activation_fn=nn.relu6)
linear = functools.partial(fully_connected, activation_fn=None)
# Simple alias.
conv2d = convolution2d
conv2d_transpose = convolution2d_transpose
conv2d_in_plane = convolution2d_in_plane
separable_conv2d = separable_convolution2d
| apache-2.0 | -7,573,405,888,971,894,000 | 42.89811 | 80 | 0.645266 | false |
dataflow/DataStage | datastage/dataset/longliving/sword_statement_check.py | 1 | 4734 | import logging
import time
import thread
import urllib2
import sys
import datetime
from django_longliving.base import LonglivingThread
from datastage.dataset import SUBMISSION_QUEUE
from datastage.web.dataset.models import DatasetSubmission
from datastage.web.dataset import openers
from sword2 import Connection, UrlLib2Layer
logger = logging.getLogger(__name__)
# list of all the error states that we can see in the statement that we want
# to be able to react to
ERROR_STATES = [
"http://databank.ox.ac.uk/errors/UnzippingIssue"
]
# NOTE: this thread is resistant to being stopped. A KeyboardInterrupt will
# NOT suffice, it will need to be killed with a "kill <pid>" on the command
# line
class SwordStatementCheckThread(LonglivingThread):
# FIXME: not quite sure how the __init__ function on LonglivingThread,
# so setting this as a class variable for the time being
# this is how long the thread will sleep between requests (in seconds)
throttle = 5
# this is how long the thread will sleep between retrys (in seconds)
retry_delay = 3
# This is how long the thread will sleep between entire batches of updates.
# This is particularly useful if the total number of submissions is quite
# small - it will stop the while True loop just spinning aimlessly most of
# the time. (in seconds)
batch_throttle = 120
# this is how many times the thread will re-try contacting the server if
# it suffers a major exception (i.e. not a sword exception, but something
# network related)
retry_count = 10
# this is the gap between attempts to check a specific item. If the item
# has been checked more recently than this amount of time ago, it will not
# be checked again on the current run. Specified in seconds (here it is
# set to once per day).
check_gap = 86400
def run(self):
# just keep going until the thread is killed
while True:
self._check_all_datasets()
time.sleep(SwordStatementCheckThread.batch_throttle)
def _check_all_datasets(self):
dss = DatasetSubmission.objects.all()
for dataset_submission in dss:
if not self._checkable(dataset_submission):
continue
self._check_dataset(dataset_submission)
def _checkable(self, dataset_submission):
last_checked = dataset_submission.last_accessed
if last_checked is None:
return True
now = datetime.datetime.now()
minimum = datetime.timedelta(0, SwordStatementCheckThread.check_gap)
gap = now - last_checked
return gap > minimum
def _check_dataset(self, dataset_submission):
retry_counter = 0
exception = None
while retry_counter < SwordStatementCheckThread.retry_count:
try:
# logger.info("Checking state of dataset at " + dataset_submission.remote_url)
opener = openers.get_opener(dataset_submission.repository,
dataset_submission.submitting_user)
conn = Connection(error_response_raises_exceptions=False, http_impl=UrlLib2Layer(opener))
receipt = conn.get_deposit_receipt(dataset_submission.remote_url)
statement = conn.get_ore_sword_statement(receipt.ore_statement_iri)
for state_uri, state_desc in statement.states:
logger.info("Dataset has state URI: " + state_uri)
if state_uri in ERROR_STATES:
dataset_submission.status = 'error'
logger.info("URI: " + state_uri + " is an error state ... setting 'error' state on submission record")
break
dataset_submission.last_accessed = datetime.datetime.now()
dataset_submission.save()
time.sleep(SwordStatementCheckThread.throttle)
except urllib2.URLError as e:
# if we get an exception, try again up to the limit
logger.info("Got error connecting to the server ... retrying " + str(retry_counter + 1) + " of " + str(SwordStatementCheckThread.retry_count))
retry_counter += 1
exception = e
time.sleep(SwordStatementCheckThread.retry_delay)
continue
else:
# if we don't get an exception, we're done
return
# if we don't return from the else statement above, it means the retries
# all failed, and we have a problem. Raise the last thrown exception.
raise exception
| mit | 5,942,489,631,099,173,000 | 40.165217 | 158 | 0.636671 | false |
laurent-george/weboob | modules/cmso/web/pages.py | 1 | 3661 | # -*- coding: utf-8 -*-
# Copyright(C) 2014 smurail
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import datetime
from weboob.browser.pages import HTMLPage, LoggedPage, pagination
from weboob.browser.elements import ListElement, ItemElement, method
from weboob.browser.filters.standard import CleanText, CleanDecimal, Regexp, DateGuesser, Env
from weboob.browser.filters.html import Link
from weboob.capabilities.bank import Account
from ..transaction import Transaction
__all__ = ['LoginPage']
class LoginPage(HTMLPage):
def login(self, username, password):
form = self.get_form('//form[@id="formAuth"]')
form['noPersonne'] = username
form['motDePasse'] = password
form.submit()
class CmsoListElement(ListElement):
item_xpath = '//table[@class="Tb" and tr[1][@class="LnTit"]]/tr[@class="LnA" or @class="LnB"]'
class AccountsPage(LoggedPage, HTMLPage):
@method
class iter_accounts(CmsoListElement):
class item(ItemElement):
klass = Account
obj__history_url = Link('./td[1]/a')
obj_label = CleanText('./td[1]')
obj_id = obj__history_url & Regexp(pattern="indCptSelectionne=(\d+)") | None
obj_balance = CleanDecimal('./td[2]', replace_dots=True)
def validate(self, obj):
if obj.id is None:
obj.id = obj.label.replace(' ', '')
return True
class CmsoTransactionElement(ItemElement):
klass = Transaction
def condition(self):
return len(self.el) >= 5 and not self.el.get('id', '').startswith('libelleLong')
class HistoryPage(LoggedPage, HTMLPage):
def iter_history(self, *args, **kwargs):
if self.doc.xpath('//a[@href="1-situationGlobaleProfessionnel.act"]'):
return self.iter_history_rest_page(*args, **kwargs)
return self.iter_history_first_page(*args, **kwargs)
@method
class iter_history_first_page(CmsoListElement):
class item(CmsoTransactionElement):
def validate(self, obj):
return obj.date >= datetime.date.today().replace(day=1)
def date(selector):
return DateGuesser(CleanText(selector), Env('date_guesser')) | Transaction.Date(selector)
obj_date = date('./td[1]')
obj_vdate = date('./td[2]')
# Each row is followed by a "long labelled" version
obj_raw = Transaction.Raw('./following-sibling::tr[1][starts-with(@id, "libelleLong")]/td[3]')
obj_amount = Transaction.Amount('./td[5]', './td[4]')
@pagination
@method
class iter_history_rest_page(CmsoListElement):
next_page = Link('//span[has-class("Rappel")]/following-sibling::*[1][@href]')
class item(CmsoTransactionElement):
obj_date = Transaction.Date('./td[2]')
obj_vdate = Transaction.Date('./td[1]')
obj_raw = Transaction.Raw('./td[3]')
obj_amount = Transaction.Amount('./td[5]', './td[4]', replace_dots=False)
| agpl-3.0 | 2,973,286,327,672,497,700 | 34.892157 | 106 | 0.643267 | false |
r0balo/pelisalacarta | python/main-classic/channels/yaske.py | 1 | 68313 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para yaske
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re, sys, urllib, urlparse
from core import config
from core import logger
from core import httptools
from core import scrapertools
from core import servertools
from core import channeltools
from core import tmdb
from core.item import Item
HOST = 'http://www.yaske.ro'
parameters= channeltools.get_channel_parameters('yaske')
fanart_host= parameters['fanart']
thumbnail_host= parameters['thumbnail']
color1, color2, color3 = ['0xFFA5F6AF','0xFF5FDA6D','0xFF11811E']
def mainlist(item):
logger.info()
itemlist = []
item.url = HOST
item.text_color = color2
item.fanart = fanart_host
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
itemlist.append(item.clone(title="Novedades", action="peliculas", text_blod= True, viewcontent='movies',
thumbnail= thumbnail % 'novedades', viewmode = "movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_blod=True,
url= HOST + "/genero/premieres", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(Item(channel=item.channel, title="Filtrar por:", fanart=fanart_host, folder=False,
text_color=color3, text_blod= True, thumbnail=thumbnail_host))
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="gender", thumbnail=thumbnail % 'generos', viewmode = "thumbnails" ))
itemlist.append(item.clone(title=" Idioma", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="language", thumbnail=thumbnail % 'idiomas'))
itemlist.append(item.clone(title=" Calidad", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="quality", thumbnail=thumbnail % 'calidad'))
itemlist.append(item.clone(title=" Año", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="year", thumbnail=thumbnail % 'year'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar') )
return itemlist
def search(item,texto):
logger.info()
itemlist = []
try:
item.url = HOST + "/search/%s" % texto.replace(' ', '+')
item.extra = ""
itemlist.extend(peliculas(item))
if itemlist[-1].title == ">> Página siguiente":
item_pag = itemlist[-1]
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
itemlist.append(item_pag)
else:
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = HOST+"/"
elif categoria == 'infantiles':
item.url = HOST+"/custom/?gender=animation"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
url_next_page = ""
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
patron = '<li class="item-movies.*?'
patron += '<a class="image-block" href="([^"]+)" title="([^"]+)">'
patron += '<img src="([^"]+).*?'
patron += '<div class="moSinopsis">.*?</b>([^<]+).*?'
patron += '<div class="moYear">.*?</b>([^<]+).*?'
patron += '<ul class="bottombox">.*?<li>(<img.*?)</li>.*?</ul>'
patron += '<div class="quality">([^<]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
# Paginacion
if item.next_page != 'b':
if len(matches) > 20:
url_next_page = item.url
matches = matches [:20]
next_page = 'b'
else:
matches = matches[20:]
next_page = 'a'
patron_next_page = "<a href='([^']+)'>\»\;</a>"
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot, year, idiomas, calidad in matches:
patronidiomas = "<img src='[^']+' title='([^']+)'"
matchesidiomas = re.compile(patronidiomas,re.DOTALL).findall(idiomas)
idiomas_disponibles = ""
if matchesidiomas:
idiomas_disponibles = "[" + "/".join(matchesidiomas).strip() + "]"
contentTitle = decodeHtmlentities(scrapedtitle.strip())
title = "%s %s [%s]" %(contentTitle, idiomas_disponibles, calidad)
plot = decodeHtmlentities(scrapedplot)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, contentQuality=calidad,
thumbnail=scrapedthumbnail, plot=plot, contentTitle=contentTitle,
infoLabels={"year":year}, text_color = color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
# Si es necesario añadir paginacion
if url_next_page:
itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_next_page, next_page=next_page, folder=True, text_color = color3, text_blod=True))
return itemlist
def menu_buscar_contenido(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<select name="'+item.extra+'"(.*?)</select>')
# Extrae las entradas
patron = "<option value='([^']+)'>([^<]+)</option>"
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
adult_mode = config.get_setting("adult_mode")
for scrapedurl,scrapedtitle in matches:
thumbnail = ""
if item.extra == 'gender':
if scrapedtitle in ['Proximos', 'Series', 'Noticia'] or (scrapedtitle == 'Adultos' and adult_mode == "false"):
continue
url = HOST + "/genero/" + scrapedurl
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" \
% scrapedtitle.lower().replace(' ','%20')
else:
url = HOST+"/custom/?"+item.extra+"="+scrapedurl
thumbnail = item.thumbnail
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, text_color = color1,
thumbnail=thumbnail, contentType='movie', folder=True, viewmode="movie_with_plot") )
if item.extra in ['gender', 'language']:
return sorted(itemlist, key=lambda i: i.title.lower())
else:
return itemlist
def findvideos(item):
logger.info()
langdict = {}
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
if not item.plot:
item.plot = scrapertools.find_single_match(data,'<meta name="sinopsis" content="([^"]+)"')
item.plot = decodeHtmlentities(item.plot)
patron = '<tr bgcolor=(.*?)</tr>'
matches = re.compile(patron,re.DOTALL).findall(data)
for tr in matches:
try:
url = scrapertools.find_single_match(tr, '<a.*?href="([^"]+)"')
if not url.startswith("http") or "olimpo.link" in url:
continue
title = scrapertools.find_single_match(tr,'<i class="icon-([^"]+)')
server = scrapertools.find_single_match(tr,'"http\://www.google.com[^>]+>([^<]+)')
idioma = scrapertools.find_single_match(tr,
'<img src="http://www.yaske.[a-z]+/theme/01/data/images/flags/([a-z_]+).png"[^>]+>[^<]*<')
subtitulos = scrapertools.find_single_match(tr,
'<img src="http://www.yaske.[a-z]+/theme/01/data/images/flags/[^"]+"[^>]+>([^<]*)<')
thumbnail = servertools.guess_server_thumbnail(server) # TODO: esto tarda un mundo, a ver si lo cambiamos
if not thumbnail:
thumbnail = thumbnail_host
if title == 'play':
title = " Ver en %s" % server
elif title == 'download':
title = " Descargar de %s" % server
else:
title = " %s en %s" % (title, server)
sublist = langdict.get(idioma, list())
sublist.append(item.clone(action="play", title=title, url=url, server=server,
thumbnail=thumbnail, folder=False, text_color=color1))
langdict[idioma] = sublist
except:
import traceback
logger.info("Excepcion: "+traceback.format_exc())
# Añadir servidores encontrados, agrupandolos por idioma
lang_trans = {"es_es": "Español:", "la_la": "Latino:", "en_es": "Subtitulado:", "en_en": "Ingles:"}
for k in ["es_es", "la_la", "en_es", "en_en"]:
if k in langdict:
itemlist.append(Item(channel=item.channel, title=lang_trans[k], fanart=item.fanart, folder=False,
text_color=color2, text_blod=True, thumbnail=thumbnail_host))
itemlist.extend(langdict.pop(k))
# Otros idiomas
for k, v in langdict.items():
if subtitulos:
title = "%s/s%:" % (k, subtitulos)
else:
title = "%s:" % k
itemlist.append(Item(channel=item.channel, title=title, fanart=fanart_host, folder=False,
text_color=color2, text_blod=True, thumbnail=thumbnail_host))
itemlist.extend(langdict.pop(k))
# Insertar items "Buscar trailer" y "Añadir a la biblioteca"
if itemlist and item.extra != "library":
title = "%s [%s]" % (item.contentTitle, item.contentQuality)
itemlist.insert(0, item.clone(channel = "trailertools", action="buscartrailer",
text_color=color3, title=title, viewmode="list"))
if config.get_library_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist
def play(item):
logger.info("item.url="+item.url)
itemlist=[]
data = urllib.unquote(item.url)
newdata = scrapertools.find_single_match(data,'olo.gg/s/[a-zA-Z0-9]+.s.(.*?)$')
if newdata:
data = urllib.unquote(newdata)
logger.info("item.url=" + data)
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(data, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(data)
if devuelve:
#logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
return itemlist
# TODO: Esto es temporal hasta q se modifique scrapertools
def decodeHtmlentities(data):
entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8})(;?)")
# maps the HTML5 named character references to the equivalent Unicode character(s)
html5 = {'CupCap;': '\u224d', 'minusdu;': '\u2a2a', 'aring': '\xe5', 'Ubreve;': '\u016c', 'lcedil;': '\u013c',
'Zacute;': '\u0179', 'NotVerticalBar;': '\u2224', 'bbrk;': '\u23b5', 'ThinSpace;': '\u2009',
'nwarhk;': '\u2923', 'rlm;': '\u200f', 'DoubleDownArrow;': '\u21d3', 'RightDownVectorBar;': '\u2955',
'jukcy;': '\u0454', 'frac12;': '\xbd', 'subrarr;': '\u2979', 'rsquo;': '\u2019', 'aacute;': '\xe1',
'Integral;': '\u222b', 'oS;': '\u24c8', 'eqslantgtr;': '\u2a96', 'Uuml': '\xdc', 'piv;': '\u03d6',
'iinfin;': '\u29dc', 'Ubrcy;': '\u040e', 'lhblk;': '\u2584', 'uml': '\xa8', 'backcong;': '\u224c',
'capdot;': '\u2a40', 'harr;': '\u2194', 'lsquor;': '\u201a', 'iscr;': '\U0001d4be', 'Lsh;': '\u21b0',
'Implies;': '\u21d2', 'Oacute': '\xd3', 'reg': '\xae', 'vsupnE;': '\u2acc\ufe00', 'Pcy;': '\u041f',
'nang;': '\u2220\u20d2', 'Kcy;': '\u041a', 'GT': '>', 'eacute;': '\xe9', 'breve;': '\u02d8',
'mfr;': '\U0001d52a', 'bnot;': '\u2310', 'racute;': '\u0155', 'dtrif;': '\u25be', 'cedil': '\xb8',
'gesdotol;': '\u2a84', 'sc;': '\u227b', 'npreceq;': '\u2aaf\u0338', 'NotTildeTilde;': '\u2249',
'nlE;': '\u2266\u0338', 'trianglerighteq;': '\u22b5', 'gfr;': '\U0001d524', 'odblac;': '\u0151',
'wedge;': '\u2227', 'solb;': '\u29c4', 'isinE;': '\u22f9', 'middot;': '\xb7', 'nshortparallel;': '\u2226',
'cudarrr;': '\u2935', 'loarr;': '\u21fd', 'UnderBar;': '_', 'mstpos;': '\u223e', 'Oacute;': '\xd3',
'ltdot;': '\u22d6', 'gacute;': '\u01f5', 'Tcy;': '\u0422', 'Jcy;': '\u0419', 'wr;': '\u2240',
'Amacr;': '\u0100', 'gtrdot;': '\u22d7', 'rarrap;': '\u2975', 'boxtimes;': '\u22a0', 'nearr;': '\u2197',
'ecaron;': '\u011b', 'angmsdad;': '\u29ab', 'ropf;': '\U0001d563', 'uacute;': '\xfa', 'nsucc;': '\u2281',
'nvap;': '\u224d\u20d2', 'udblac;': '\u0171', 'range;': '\u29a5', 'udhar;': '\u296e', 'nwarr;': '\u2196',
'lneq;': '\u2a87', 'Uuml;': '\xdc', 'Tab;': '\t', 'Lmidot;': '\u013f', 'Tfr;': '\U0001d517',
'TScy;': '\u0426', 'nvge;': '\u2265\u20d2', 'mp;': '\u2213', 'gl;': '\u2277', 'YAcy;': '\u042f',
'CenterDot;': '\xb7', 'iopf;': '\U0001d55a', 'varsigma;': '\u03c2', 'lbrack;': '[', 'icy;': '\u0438',
'boxDR;': '\u2554', 'nsubseteq;': '\u2288', 'Ocy;': '\u041e', 'integers;': '\u2124', 'THORN': '\xde',
'cwint;': '\u2231', 'downharpoonright;': '\u21c2', 'capbrcup;': '\u2a49', 'nGtv;': '\u226b\u0338',
'nge;': '\u2271', 'angmsdac;': '\u29aa', 'ropar;': '\u2986', 'boxdl;': '\u2510', 'bigcup;': '\u22c3',
'lsim;': '\u2272', 'gtquest;': '\u2a7c', 'lrhar;': '\u21cb', 'Aring': '\xc5', 'Cap;': '\u22d2',
'twoheadrightarrow;': '\u21a0', 'ngsim;': '\u2275', 'plus;': '+', 'LeftArrowBar;': '\u21e4',
'lesseqqgtr;': '\u2a8b', 'softcy;': '\u044c', 'ne;': '\u2260', 'Agrave': '\xc0', 'SmallCircle;': '\u2218',
'andd;': '\u2a5c', 'LeftArrow;': '\u2190', 'napE;': '\u2a70\u0338', 'iuml': '\xef', 'Lscr;': '\u2112',
'gla;': '\u2aa5', 'yicy;': '\u0457', 'bsime;': '\u22cd', 'gtreqqless;': '\u2a8c', 'female;': '\u2640',
'cupdot;': '\u228d', 'pound': '\xa3', 'yacy;': '\u044f', 'varkappa;': '\u03f0', 'lambda;': '\u03bb',
'circledcirc;': '\u229a', 'circlearrowleft;': '\u21ba', 'Beta;': '\u0392', 'REG': '\xae',
'drbkarow;': '\u2910', 'boxhu;': '\u2534', 'xvee;': '\u22c1', 'boxv;': '\u2502', 'igrave;': '\xec',
'SquareSupersetEqual;': '\u2292', 'Afr;': '\U0001d504', 'lacute;': '\u013a', 'Yacute;': '\xdd',
'xrArr;': '\u27f9', 'mnplus;': '\u2213', 'shchcy;': '\u0449', 'Hopf;': '\u210d', 'ucirc': '\xfb',
'tau;': '\u03c4', 'TSHcy;': '\u040b', 'Icirc': '\xce', 'imath;': '\u0131', 'qprime;': '\u2057',
'uhblk;': '\u2580', 'lbarr;': '\u290c', 'Hstrok;': '\u0126', 'NotLessGreater;': '\u2278',
'vsubne;': '\u228a\ufe00', 'DoubleLeftRightArrow;': '\u21d4', 'larrtl;': '\u21a2',
'LessEqualGreater;': '\u22da', 'boxVl;': '\u2562', 'csupe;': '\u2ad2', 'gesdoto;': '\u2a82',
'lEg;': '\u2a8b', 'zhcy;': '\u0436', 'icirc': '\xee', 'rmoust;': '\u23b1', 'RoundImplies;': '\u2970',
'subE;': '\u2ac5', 'zwj;': '\u200d', 'VerticalLine;': '|', 'ell;': '\u2113', 'larrbfs;': '\u291f',
'OpenCurlyDoubleQuote;': '\u201c', 'Hfr;': '\u210c', 'ddotseq;': '\u2a77', 'orderof;': '\u2134',
'Element;': '\u2208', 'circledast;': '\u229b', 'larrpl;': '\u2939', 'longmapsto;': '\u27fc',
'lessapprox;': '\u2a85', 'nLtv;': '\u226a\u0338', 'ast;': '*', 'DiacriticalTilde;': '\u02dc',
'lrm;': '\u200e', 'imagpart;': '\u2111', 'Ropf;': '\u211d', 'scE;': '\u2ab4', 'deg': '\xb0',
'll;': '\u226a', 'mopf;': '\U0001d55e', 'ograve;': '\xf2', 'notnivc;': '\u22fd', 'prnap;': '\u2ab9',
'CircleDot;': '\u2299', 'blank;': '\u2423', 'NotLeftTriangleEqual;': '\u22ec', 'num;': '#',
'langle;': '\u27e8', 'scaron;': '\u0161', 'subne;': '\u228a', 'prE;': '\u2ab3', 'Tau;': '\u03a4',
'trie;': '\u225c', 'times': '\xd7', 'eg;': '\u2a9a', 'rightharpoonup;': '\u21c0', 'nearhk;': '\u2924',
'pointint;': '\u2a15', 'Pscr;': '\U0001d4ab', 'quot': '"', 'Iacute;': '\xcd', 'dcy;': '\u0434',
'upsi;': '\u03c5', 'MediumSpace;': '\u205f', 'DownLeftVectorBar;': '\u2956', 'supdsub;': '\u2ad8',
'Ccirc;': '\u0108', 'luruhar;': '\u2966', 'LT': '<', 'chcy;': '\u0447', 'lsimg;': '\u2a8f',
'ljcy;': '\u0459', 'complexes;': '\u2102', 'dagger;': '\u2020', 'isinv;': '\u2208', 'PartialD;': '\u2202',
'prod;': '\u220f', 'subplus;': '\u2abf', 'digamma;': '\u03dd', 'Ccedil': '\xc7', 'blacktriangle;': '\u25b4',
'veeeq;': '\u225a', 'lesdotor;': '\u2a83', 'gcy;': '\u0433', 'ntgl;': '\u2279', 'Ouml': '\xd6',
'eparsl;': '\u29e3', 'xsqcup;': '\u2a06', 'glE;': '\u2a92', 'bowtie;': '\u22c8',
'SquareIntersection;': '\u2293', 'RightFloor;': '\u230b', 'Efr;': '\U0001d508',
'DownLeftRightVector;': '\u2950', 'hercon;': '\u22b9', 'ecy;': '\u044d', 'DoubleDot;': '\xa8', 'rcub;': '}',
'asympeq;': '\u224d', 'NotTildeFullEqual;': '\u2247', 'Gg;': '\u22d9', 'gtreqless;': '\u22db',
'Sscr;': '\U0001d4ae', 'cularrp;': '\u293d', 'DoubleUpArrow;': '\u21d1', 'sect': '\xa7', 'map;': '\u21a6',
'Del;': '\u2207', 'ctdot;': '\u22ef', 'Umacr;': '\u016a', 'copf;': '\U0001d554', 'minus;': '\u2212',
'smte;': '\u2aac', 'zfr;': '\U0001d537', 'measuredangle;': '\u2221', 'male;': '\u2642',
'angrtvbd;': '\u299d', 'NestedGreaterGreater;': '\u226b', 'uuml;': '\xfc', 'ograve': '\xf2',
'Alpha;': '\u0391', 'QUOT;': '"', 'timesd;': '\u2a30', 'hyphen;': '\u2010', 'dopf;': '\U0001d555',
'Backslash;': '\u2216', 'utrif;': '\u25b4', 'ntrianglerighteq;': '\u22ed', 'Hat;': '^', 'between;': '\u226c',
'zacute;': '\u017a', 'geqslant;': '\u2a7e', 'elinters;': '\u23e7', 'lvertneqq;': '\u2268\ufe00',
'Yscr;': '\U0001d4b4', 'NotPrecedesEqual;': '\u2aaf\u0338', 'otilde': '\xf5', 'rtriltri;': '\u29ce',
'SucceedsSlantEqual;': '\u227d', 'bsim;': '\u223d', 'dscy;': '\u0455', 'cirmid;': '\u2aef',
'gnapprox;': '\u2a8a', 'uharl;': '\u21bf', 'sqsube;': '\u2291', 'YIcy;': '\u0407', 'forall;': '\u2200',
'ogt;': '\u29c1', 'Vopf;': '\U0001d54d', 'ffllig;': '\ufb04', 'loz;': '\u25ca', 'Atilde;': '\xc3',
'ntlg;': '\u2278', 'vangrt;': '\u299c', 'it;': '\u2062', 'GreaterTilde;': '\u2273', 'rarrhk;': '\u21aa',
'smid;': '\u2223', 'kappa;': '\u03ba', 'Diamond;': '\u22c4', 'ngeq;': '\u2271', 'DownArrowBar;': '\u2913',
'expectation;': '\u2130', 'sup3': '\xb3', 'frasl;': '\u2044', 'Bscr;': '\u212c', 'geqq;': '\u2267',
'lat;': '\u2aab', 'macr;': '\xaf', 'longrightarrow;': '\u27f6', 'Gcirc;': '\u011c', 'Wcirc;': '\u0174',
'horbar;': '\u2015', 'dharr;': '\u21c2', 'DownRightTeeVector;': '\u295f', 'GreaterEqual;': '\u2265',
'rBarr;': '\u290f', 'precsim;': '\u227e', 'iuml;': '\xef', 'ZHcy;': '\u0416', 'vnsub;': '\u2282\u20d2',
'UnderParenthesis;': '\u23dd', 'RuleDelayed;': '\u29f4', 'bull;': '\u2022', 'swArr;': '\u21d9',
'nrtri;': '\u22eb', 'apE;': '\u2a70', 'nLt;': '\u226a\u20d2', 'LeftDownVectorBar;': '\u2959',
'succnapprox;': '\u2aba', 'szlig': '\xdf', 'vcy;': '\u0432', 'wcirc;': '\u0175', 'utri;': '\u25b5',
'Zeta;': '\u0396', 'Hcirc;': '\u0124', 'NotRightTriangle;': '\u22eb', 'NotGreaterEqual;': '\u2271',
'larrb;': '\u21e4', 'ecolon;': '\u2255', 'ascr;': '\U0001d4b6', 'RightUpVectorBar;': '\u2954',
'divide': '\xf7', 'npolint;': '\u2a14', 'nexist;': '\u2204', 'plusb;': '\u229e', 'boxvl;': '\u2524',
'searhk;': '\u2925', 'oror;': '\u2a56', 'tdot;': '\u20db', 'bigotimes;': '\u2a02', 'phone;': '\u260e',
'Gscr;': '\U0001d4a2', 'bumpe;': '\u224f', 'ang;': '\u2220', 'ltquest;': '\u2a7b',
'rightharpoondown;': '\u21c1', 'rdca;': '\u2937', 'cross;': '\u2717', 'Kopf;': '\U0001d542',
'IEcy;': '\u0415', 'leq;': '\u2264', 'rarrw;': '\u219d', 'rcy;': '\u0440', 'Mu;': '\u039c',
'nopf;': '\U0001d55f', 'Aopf;': '\U0001d538', 'CloseCurlyDoubleQuote;': '\u201d', 'lbrace;': '{',
'triangleq;': '\u225c', 'curlyeqprec;': '\u22de', 'LeftDownTeeVector;': '\u2961', 'subset;': '\u2282',
'xscr;': '\U0001d4cd', 'brvbar;': '\xa6', 'nles;': '\u2a7d\u0338', 'circeq;': '\u2257', 'boxVH;': '\u256c',
'lE;': '\u2266', 'zeta;': '\u03b6', 'congdot;': '\u2a6d', 'emsp13;': '\u2004', 'uogon;': '\u0173',
'xcap;': '\u22c2', 'eta;': '\u03b7', 'lAarr;': '\u21da', 'thicksim;': '\u223c', 'boxDl;': '\u2556',
'rmoustache;': '\u23b1', 'Sopf;': '\U0001d54a', 'uarr;': '\u2191', 'Otimes;': '\u2a37', 'boxvH;': '\u256a',
'lparlt;': '\u2993', 'nsime;': '\u2244', 'sqcaps;': '\u2293\ufe00', 'SquareUnion;': '\u2294',
'Rsh;': '\u21b1', 'Zcy;': '\u0417', 'ycirc;': '\u0177', 'rbrkslu;': '\u2990', 'Proportional;': '\u221d',
'Sup;': '\u22d1', 'curlyvee;': '\u22ce', 'rceil;': '\u2309', 'Xfr;': '\U0001d51b', 'minusd;': '\u2238',
'angmsdab;': '\u29a9', 'DiacriticalDoubleAcute;': '\u02dd', 'par;': '\u2225', 'lpar;': '(', 'lcy;': '\u043b',
'Nu;': '\u039d', 'euml;': '\xeb', 'CircleMinus;': '\u2296', 'lfloor;': '\u230a', 'Rightarrow;': '\u21d2',
'rect;': '\u25ad', 'dzigrarr;': '\u27ff', 'tcy;': '\u0442', 'vartheta;': '\u03d1', 'Idot;': '\u0130',
'Lleftarrow;': '\u21da', 'GT;': '>', 'emsp14;': '\u2005', 'vert;': '|', 'boxHu;': '\u2567',
'Rarrtl;': '\u2916', 'nprcue;': '\u22e0', 'para': '\xb6', 'nsucceq;': '\u2ab0\u0338', 'nhArr;': '\u21ce',
'ClockwiseContourIntegral;': '\u2232', 'Downarrow;': '\u21d3', 'Otilde': '\xd5', 'umacr;': '\u016b',
'varsubsetneq;': '\u228a\ufe00', 'cup;': '\u222a', 'longleftrightarrow;': '\u27f7', 'gg;': '\u226b',
'Barv;': '\u2ae7', 'Map;': '\u2905', 'Im;': '\u2111', 'ltcir;': '\u2a79', 'gdot;': '\u0121',
'Cayleys;': '\u212d', 'timesbar;': '\u2a31', 'Gdot;': '\u0120', 'Ucirc': '\xdb', 'bigvee;': '\u22c1',
'QUOT': '"', 'lang;': '\u27e8', 'Yfr;': '\U0001d51c', 'Larr;': '\u219e', 'leg;': '\u22da', 'cuesc;': '\u22df',
'rArr;': '\u21d2', 'mumap;': '\u22b8', 'RightVector;': '\u21c0', 'nisd;': '\u22fa', 'crarr;': '\u21b5',
'leftthreetimes;': '\u22cb', 'Fcy;': '\u0424', 'xotime;': '\u2a02', 'odash;': '\u229d', 'agrave;': '\xe0',
'LeftFloor;': '\u230a', 'scpolint;': '\u2a13', 'Pfr;': '\U0001d513', 'nvHarr;': '\u2904', 'quot;': '"',
'comp;': '\u2201', 'imagline;': '\u2110', 'telrec;': '\u2315', 'Sqrt;': '\u221a', 'supsub;': '\u2ad4',
'rarr;': '\u2192', 'gvertneqq;': '\u2269\ufe00', 'nbumpe;': '\u224f\u0338', 'Uacute': '\xda',
'gsim;': '\u2273', 'coprod;': '\u2210', 'ncongdot;': '\u2a6d\u0338', 'sscr;': '\U0001d4c8',
'lstrok;': '\u0142', 'TripleDot;': '\u20db', 'topfork;': '\u2ada', 'yacute': '\xfd', 'nrightarrow;': '\u219b',
'VerticalBar;': '\u2223', 'LeftDownVector;': '\u21c3', 'angzarr;': '\u237c', 'nsupset;': '\u2283\u20d2',
'rdldhar;': '\u2969', 'deg;': '\xb0', 'DoubleRightArrow;': '\u21d2', 'macr': '\xaf', 'ldca;': '\u2936',
'jcirc;': '\u0135', 'uml;': '\xa8', 'cupor;': '\u2a45', 'egrave': '\xe8', 'boxur;': '\u2514',
'Esim;': '\u2a73', 'hybull;': '\u2043', 'DownBreve;': '\u0311', 'order;': '\u2134', 'Vscr;': '\U0001d4b1',
'ApplyFunction;': '\u2061', 'Mellintrf;': '\u2133', 'ufisht;': '\u297e', 'Ycirc;': '\u0176',
'nedot;': '\u2250\u0338', 'Ugrave;': '\xd9', 'npar;': '\u2226', 'RightArrowLeftArrow;': '\u21c4',
'xnis;': '\u22fb', 'sharp;': '\u266f', 'twixt;': '\u226c', 'midcir;': '\u2af0', 'real;': '\u211c',
'npr;': '\u2280', 'oopf;': '\U0001d560', 'Ouml;': '\xd6', 'urtri;': '\u25f9', 'SucceedsTilde;': '\u227f',
'ngeqslant;': '\u2a7e\u0338', 'Eopf;': '\U0001d53c', 'LowerLeftArrow;': '\u2199', 'sqsubseteq;': '\u2291',
'preccurlyeq;': '\u227c', 'RightTriangle;': '\u22b3', 'ReverseUpEquilibrium;': '\u296f',
'simplus;': '\u2a24', 'Aogon;': '\u0104', 'NotGreater;': '\u226f', 'rpargt;': '\u2994', 'curarrm;': '\u293c',
'THORN;': '\xde', 'smtes;': '\u2aac\ufe00', 'Ntilde': '\xd1', 'Zscr;': '\U0001d4b5', 'Nscr;': '\U0001d4a9',
'sigma;': '\u03c3', 'Atilde': '\xc3', 'checkmark;': '\u2713', 'spades;': '\u2660', 'ycy;': '\u044b',
'shortmid;': '\u2223', 'NotLeftTriangleBar;': '\u29cf\u0338', 'SuchThat;': '\u220b', 'amacr;': '\u0101',
'bigcirc;': '\u25ef', 'Gt;': '\u226b', 'xopf;': '\U0001d569', 'puncsp;': '\u2008', 'Fscr;': '\u2131',
'gel;': '\u22db', 'sect;': '\xa7', 'cudarrl;': '\u2938', 'Iuml': '\xcf', 'squarf;': '\u25aa',
'seswar;': '\u2929', 'Eacute': '\xc9', 'scy;': '\u0441', 'subnE;': '\u2acb', 'Sacute;': '\u015a',
'doublebarwedge;': '\u2306', 'rnmid;': '\u2aee', 'djcy;': '\u0452', 'Odblac;': '\u0150', 'duhar;': '\u296f',
'nVDash;': '\u22af', 'NotPrecedes;': '\u2280', 'frac45;': '\u2158', 'ubrcy;': '\u045e', 'empty;': '\u2205',
'nbsp;': '\xa0', 'comma;': ',', 'RightArrow;': '\u2192', 'notnivb;': '\u22fe', 'nrarrw;': '\u219d\u0338',
'downdownarrows;': '\u21ca', 'ngE;': '\u2267\u0338', 'lcub;': '{', 'Kscr;': '\U0001d4a6', 'Utilde;': '\u0168',
'pertenk;': '\u2031', 'sstarf;': '\u22c6', 'bdquo;': '\u201e', 'psi;': '\u03c8', 'NotLeftTriangle;': '\u22ea',
'Jscr;': '\U0001d4a5', 'UpEquilibrium;': '\u296e', 'succneqq;': '\u2ab6', 'drcrop;': '\u230c',
'csube;': '\u2ad1', 'plusdu;': '\u2a25', 'nvlArr;': '\u2902', 'RightTeeArrow;': '\u21a6', 'apos;': "'",
'squf;': '\u25aa', 'blacktriangledown;': '\u25be', 'ShortDownArrow;': '\u2193', 'boxuL;': '\u255b',
'Lambda;': '\u039b', 'Darr;': '\u21a1', 'sup3;': '\xb3', 'xcirc;': '\u25ef', 'nscr;': '\U0001d4c3',
'UpArrowDownArrow;': '\u21c5', 'Auml': '\xc4', 'nrArr;': '\u21cf', 'nges;': '\u2a7e\u0338',
'parallel;': '\u2225', 'LeftUpTeeVector;': '\u2960', 'uwangle;': '\u29a7', 'napprox;': '\u2249',
'sol;': '/', 'nRightarrow;': '\u21cf', 'squ;': '\u25a1', 'natur;': '\u266e', 'Escr;': '\u2130',
'nLl;': '\u22d8\u0338', 'DD;': '\u2145', 'Chi;': '\u03a7', 'lBarr;': '\u290e', 'emptyset;': '\u2205',
'iexcl': '\xa1', 'rarrtl;': '\u21a3', 'gE;': '\u2267', 'LeftTeeVector;': '\u295a',
'DoubleUpDownArrow;': '\u21d5', 'Icirc;': '\xce', 'Racute;': '\u0154', 'vee;': '\u2228', 'bot;': '\u22a5',
'nleftrightarrow;': '\u21ae', 'atilde': '\xe3', 'frac35;': '\u2157', 'mDDot;': '\u223a', 'eqcolon;': '\u2255',
'bsolb;': '\u29c5', 'lltri;': '\u25fa', 'bsemi;': '\u204f', 'because;': '\u2235', 'Oslash': '\xd8',
'nu;': '\u03bd', 'rightarrow;': '\u2192', 'rangle;': '\u27e9', 'TRADE;': '\u2122', 'llhard;': '\u296b',
'LeftAngleBracket;': '\u27e8', 'scnsim;': '\u22e9', 'ccirc;': '\u0109', 'Jsercy;': '\u0408',
'nvsim;': '\u223c\u20d2', 'nleftarrow;': '\u219a', 'hopf;': '\U0001d559', 'Ccedil;': '\xc7',
'rrarr;': '\u21c9', 'twoheadleftarrow;': '\u219e', 'erDot;': '\u2253', 'epsiv;': '\u03f5', 'xi;': '\u03be',
'ring;': '\u02da', 'tscy;': '\u0446', 'mu;': '\u03bc', 'Uacute;': '\xda', 'Lang;': '\u27ea', 'ovbar;': '\u233d',
'nleq;': '\u2270', 'gbreve;': '\u011f', 'cedil;': '\xb8', 'gneq;': '\u2a88', 'wopf;': '\U0001d568',
'frac18;': '\u215b', 'Oscr;': '\U0001d4aa', 'Egrave': '\xc8', 'Igrave;': '\xcc', 'varnothing;': '\u2205',
'divideontimes;': '\u22c7', 'dot;': '\u02d9', 'EqualTilde;': '\u2242', 'NotTilde;': '\u2241', 'els;': '\u2a95',
'easter;': '\u2a6e', 'swarhk;': '\u2926', 'vnsup;': '\u2283\u20d2', 'ETH': '\xd0', 'blacksquare;': '\u25aa',
'bcong;': '\u224c', 'ocy;': '\u043e', 'rbrksld;': '\u298e', 'lhard;': '\u21bd', 'gtrarr;': '\u2978',
'nharr;': '\u21ae', 'rharu;': '\u21c0', 'Mfr;': '\U0001d510', 'npre;': '\u2aaf\u0338', 'oslash;': '\xf8',
'GreaterSlantEqual;': '\u2a7e', 'Ifr;': '\u2111', 'Pi;': '\u03a0', 'lrarr;': '\u21c6', 'sce;': '\u2ab0',
'NotSquareSubsetEqual;': '\u22e2', 'beta;': '\u03b2', 'tcedil;': '\u0163', 'Int;': '\u222c', 'Conint;': '\u222f',
'kappav;': '\u03f0', 'varphi;': '\u03d5', 'subsim;': '\u2ac7', 'nGt;': '\u226b\u20d2', 'blk14;': '\u2591',
'IJlig;': '\u0132', 'LeftUpVector;': '\u21bf', 'epsilon;': '\u03b5', 'ReverseElement;': '\u220b',
'angmsdaa;': '\u29a8', 'starf;': '\u2605', 'sung;': '\u266a', 'udarr;': '\u21c5',
'RightUpTeeVector;': '\u295c', 'gne;': '\u2a88', 'nlArr;': '\u21cd', 'Lcedil;': '\u013b', 'ccedil': '\xe7',
'dtri;': '\u25bf', 'nap;': '\u2249', 'neArr;': '\u21d7', 'boxVR;': '\u2560', 'verbar;': '|', 'omicron;': '\u03bf',
'precapprox;': '\u2ab7', 'Lcaron;': '\u013d', 'ugrave;': '\xf9', 'eDDot;': '\u2a77', 'NotTildeEqual;': '\u2244',
'pitchfork;': '\u22d4', 'top;': '\u22a4', 'quaternions;': '\u210d', 'imped;': '\u01b5', 'SquareSubset;': '\u228f',
'rarrbfs;': '\u2920', 'NotSquareSuperset;': '\u2290\u0338', 'boxvR;': '\u255e', 'ni;': '\u220b', 'gcirc;': '\u011d',
'ffr;': '\U0001d523', 'numsp;': '\u2007', 'notinvb;': '\u22f7', 'MinusPlus;': '\u2213', 'preceq;': '\u2aaf',
'boxH;': '\u2550', 'lsqb;': '[', 'lagran;': '\u2112', 'lnsim;': '\u22e6', 'triplus;': '\u2a39',
'angmsdah;': '\u29af', 'iff;': '\u21d4', 'LT;': '<', 'amp;': '&', 'rightrightarrows;': '\u21c9',
'operp;': '\u29b9', 'imacr;': '\u012b', 'frac38;': '\u215c', 'cent;': '\xa2', 'NotHumpEqual;': '\u224f\u0338',
'zeetrf;': '\u2128', 'DownTee;': '\u22a4', 'Scedil;': '\u015e', 'ShortLeftArrow;': '\u2190',
'Bernoullis;': '\u212c', 'prurel;': '\u22b0', 'gEl;': '\u2a8c', 'late;': '\u2aad', 'notniva;': '\u220c',
'robrk;': '\u27e7', 'alefsym;': '\u2135', 'eng;': '\u014b', 'sext;': '\u2736', 'roang;': '\u27ed',
'Tcedil;': '\u0162', 'NotLessLess;': '\u226a\u0338', 'efDot;': '\u2252', 'cscr;': '\U0001d4b8',
'dashv;': '\u22a3', 'cularr;': '\u21b6', 'numero;': '\u2116', 'caron;': '\u02c7', 'suphsub;': '\u2ad7',
'boxUr;': '\u2559', 'ncup;': '\u2a42', 'lozenge;': '\u25ca', 'lowast;': '\u2217', 'Ufr;': '\U0001d518',
'Gcedil;': '\u0122', 'curren;': '\xa4', 'Ycy;': '\u042b', 'NegativeThickSpace;': '\u200b',
'ulcorner;': '\u231c', 'sdotb;': '\u22a1', 'rdquor;': '\u201d', 'nvltrie;': '\u22b4\u20d2',
'LeftUpDownVector;': '\u2951', 'cap;': '\u2229', 'PrecedesEqual;': '\u2aaf', 'Ecirc;': '\xca',
'bscr;': '\U0001d4b7', 'UpArrow;': '\u2191', 'simg;': '\u2a9e', 'notin;': '\u2209',
'RightDownTeeVector;': '\u295d', 'disin;': '\u22f2', 'oacute;': '\xf3', 'nsube;': '\u2288',
'iquest': '\xbf', 'ltrif;': '\u25c2', 'ccups;': '\u2a4c', 'Because;': '\u2235', 'otimes;': '\u2297',
'Zopf;': '\u2124', 'supedot;': '\u2ac4', 'ee;': '\u2147', 'NotSucceedsSlantEqual;': '\u22e1', 'scap;': '\u2ab8',
'TildeEqual;': '\u2243', 'Colon;': '\u2237', 'rcaron;': '\u0159', 'GJcy;': '\u0403', 'curvearrowright;': '\u21b7',
'Barwed;': '\u2306', 'scirc;': '\u015d', 'Lopf;': '\U0001d543', 'hoarr;': '\u21ff', 'NotLess;': '\u226e',
'afr;': '\U0001d51e', 'homtht;': '\u223b', 'subsup;': '\u2ad3', 'NotRightTriangleEqual;': '\u22ed',
'raemptyv;': '\u29b3', 'ltrPar;': '\u2996', 'upsih;': '\u03d2', 'ccupssm;': '\u2a50', 'Longrightarrow;': '\u27f9',
'NotGreaterFullEqual;': '\u2267\u0338', 'bnequiv;': '\u2261\u20e5', 'lrtri;': '\u22bf', 'setminus;': '\u2216',
'supplus;': '\u2ac0', 'Rscr;': '\u211b', 'Popf;': '\u2119', 'NotSuperset;': '\u2283\u20d2',
'looparrowright;': '\u21ac', 'odot;': '\u2299', 'laquo': '\xab', 'sqcup;': '\u2294', 'hairsp;': '\u200a',
'Gamma;': '\u0393', 'lbrksld;': '\u298f', 'uplus;': '\u228e', 'equivDD;': '\u2a78', 'el;': '\u2a99',
'CHcy;': '\u0427', 'rarrsim;': '\u2974', 'ffilig;': '\ufb03', 'thorn;': '\xfe', 'ngtr;': '\u226f',
'qopf;': '\U0001d562', 'nvle;': '\u2264\u20d2', 'omid;': '\u29b6', 'vrtri;': '\u22b3', 'curvearrowleft;': '\u21b6',
'DownRightVector;': '\u21c1', 'frac58;': '\u215d', 'Uopf;': '\U0001d54c', 'AMP;': '&', 'equest;': '\u225f',
'succapprox;': '\u2ab8', 'intercal;': '\u22ba', 'rthree;': '\u22cc', 'gimel;': '\u2137', 'Uparrow;': '\u21d1',
'Ll;': '\u22d8', 'dzcy;': '\u045f', 'dfisht;': '\u297f', 'frac12': '\xbd', 'submult;': '\u2ac1', 'rang;': '\u27e9',
'Wscr;': '\U0001d4b2', 'Kcedil;': '\u0136', 'leqslant;': '\u2a7d', 'urcrop;': '\u230e', 'SOFTcy;': '\u042c',
'hamilt;': '\u210b', 'AMP': '&', 'pscr;': '\U0001d4c5', 'egs;': '\u2a96', 'supE;': '\u2ac6', 'searr;': '\u2198',
'varpi;': '\u03d6', 'nlarr;': '\u219a', 'nearrow;': '\u2197', 'ldsh;': '\u21b2', 'gesl;': '\u22db\ufe00',
'rarrfs;': '\u291e', 'LessTilde;': '\u2272', 'boxUL;': '\u255d', 'And;': '\u2a53', 'LeftDoubleBracket;': '\u27e6',
'rAtail;': '\u291c', 'eogon;': '\u0119', 'bepsi;': '\u03f6', 'vDash;': '\u22a8', 'Coproduct;': '\u2210',
'ngeqq;': '\u2267\u0338', 'supne;': '\u228b', 'REG;': '\xae', 'kopf;': '\U0001d55c', 'cire;': '\u2257',
'boxhD;': '\u2565', 'cir;': '\u25cb', 'awconint;': '\u2233', 'LowerRightArrow;': '\u2198', 'Wfr;': '\U0001d51a',
'ssmile;': '\u2323', 'ic;': '\u2063', 'boxHd;': '\u2564', 'Oopf;': '\U0001d546', 'trisb;': '\u29cd',
'longleftarrow;': '\u27f5', 'vprop;': '\u221d', 'qfr;': '\U0001d52e', 'frac34;': '\xbe',
'vsubnE;': '\u2acb\ufe00', 'odiv;': '\u2a38', 'nvinfin;': '\u29de', 'boxminus;': '\u229f', 'efr;': '\U0001d522',
'ForAll;': '\u2200', 'lsaquo;': '\u2039', 'yen': '\xa5', 'lAtail;': '\u291b', 'tint;': '\u222d', 'ltri;': '\u25c3',
'DownTeeArrow;': '\u21a7', 'Tilde;': '\u223c', 'nsce;': '\u2ab0\u0338', 'larr;': '\u2190', 'supsup;': '\u2ad6',
'frac16;': '\u2159', 'eth;': '\xf0', 'acirc;': '\xe2', 'ddarr;': '\u21ca', 'Iscr;': '\u2110',
'triangleright;': '\u25b9', 'capand;': '\u2a44', 'HARDcy;': '\u042a', 'sup;': '\u2283',
'NotSubset;': '\u2282\u20d2', 'searrow;': '\u2198', 'nsc;': '\u2281', 'sup1': '\xb9', 'sup2': '\xb2',
'Breve;': '\u02d8', 'epar;': '\u22d5', 'clubsuit;': '\u2663', 'approx;': '\u2248', 'NotGreaterLess;': '\u2279',
'mapsto;': '\u21a6', 'scsim;': '\u227f', 'notinE;': '\u22f9\u0338', 'hcirc;': '\u0125',
'rightthreetimes;': '\u22cc', 'geq;': '\u2265', 'Kappa;': '\u039a', 'vdash;': '\u22a2', 'Congruent;': '\u2261',
'boxdr;': '\u250c', 'DoubleContourIntegral;': '\u222f', 'upuparrows;': '\u21c8', 'csub;': '\u2acf',
'PrecedesSlantEqual;': '\u227c', 'boxbox;': '\u29c9', 'zdot;': '\u017c', 'sub;': '\u2282', 'andand;': '\u2a55',
'laemptyv;': '\u29b4', 'dstrok;': '\u0111', 'perp;': '\u22a5', 'HumpDownHump;': '\u224e', 'int;': '\u222b',
'RightUpDownVector;': '\u294f', 'LongRightArrow;': '\u27f6', 'hstrok;': '\u0127', 'ngt;': '\u226f',
'lbrke;': '\u298b', 'Ograve': '\xd2', 'nvrtrie;': '\u22b5\u20d2', 'leqq;': '\u2266', 'intprod;': '\u2a3c',
'centerdot;': '\xb7', 'emptyv;': '\u2205', 'infintie;': '\u29dd', 'lbbrk;': '\u2772', 'Cacute;': '\u0106',
'rscr;': '\U0001d4c7', 'otilde;': '\xf5', 'DiacriticalGrave;': '`', 'supe;': '\u2287', 'rotimes;': '\u2a35',
'die;': '\xa8', 'mapstodown;': '\u21a7', 'fjlig;': 'fj', 'SquareSuperset;': '\u2290', 'curren': '\xa4',
'GreaterLess;': '\u2277', 'smile;': '\u2323', 'NotHumpDownHump;': '\u224e\u0338', 'ucirc;': '\xfb',
'vArr;': '\u21d5', 'boxV;': '\u2551', 'Tcaron;': '\u0164', 'not;': '\xac', 'mho;': '\u2127', 'sfrown;': '\u2322',
'ZeroWidthSpace;': '\u200b', 'Acirc': '\xc2', 'gneqq;': '\u2269', 'Euml': '\xcb', 'Ccaron;': '\u010c',
'Iacute': '\xcd', 'Yopf;': '\U0001d550', 'aogon;': '\u0105', 'rationals;': '\u211a', 'Bopf;': '\U0001d539',
'uopf;': '\U0001d566', 'acE;': '\u223e\u0333', 'ETH;': '\xd0', 'intcal;': '\u22ba', 'clubs;': '\u2663',
'plussim;': '\u2a26', 'olt;': '\u29c0', 'tprime;': '\u2034', 'iogon;': '\u012f', 'diamondsuit;': '\u2666',
'ltlarr;': '\u2976', 'frac14': '\xbc', 'fscr;': '\U0001d4bb', 'aacute': '\xe1', 'dollar;': '$', 'xmap;': '\u27fc',
'vscr;': '\U0001d4cb', 'ShortRightArrow;': '\u2192', 'Square;': '\u25a1', 'blk12;': '\u2592', 'triangle;': '\u25b5',
'eacute': '\xe9', 'angrt;': '\u221f', 'circlearrowright;': '\u21bb', 'UpTee;': '\u22a5', 'copy;': '\xa9',
'scnE;': '\u2ab6', 'aelig;': '\xe6', 'doteq;': '\u2250', 'parsl;': '\u2afd', 'Ugrave': '\xd9',
'lfr;': '\U0001d529', 'gvnE;': '\u2269\ufe00', 'rarrc;': '\u2933', 'Acy;': '\u0410', 'rbrace;': '}',
'ccedil;': '\xe7', 'nwarrow;': '\u2196', 'njcy;': '\u045a', 'UpperRightArrow;': '\u2197', 'dHar;': '\u2965',
'gt': '>', 'jscr;': '\U0001d4bf', 'rarrpl;': '\u2945', 'varrho;': '\u03f1', 'Ocirc;': '\xd4', 'lowbar;': '_',
'Yacute': '\xdd', 'nsub;': '\u2284', 'lessdot;': '\u22d6', 'NotGreaterGreater;': '\u226b\u0338',
'darr;': '\u2193', 'mcomma;': '\u2a29', 'Cedilla;': '\xb8', 'vartriangleright;': '\u22b3', 'vfr;': '\U0001d533',
'rfisht;': '\u297d', 'PlusMinus;': '\xb1', 'planck;': '\u210f', 'NotPrecedesSlantEqual;': '\u22e0',
'Egrave;': '\xc8', 'rightarrowtail;': '\u21a3', 'Prime;': '\u2033', 'gtrless;': '\u2277', 'thetasym;': '\u03d1',
'bbrktbrk;': '\u23b6', 'nle;': '\u2270', 'mlcp;': '\u2adb', 'larrsim;': '\u2973', 'jcy;': '\u0439',
'drcorn;': '\u231f', 'harrw;': '\u21ad', 'updownarrow;': '\u2195', 'ubreve;': '\u016d', 'pluse;': '\u2a72',
'UpTeeArrow;': '\u21a5', 'prime;': '\u2032', 'COPY;': '\xa9', 'CirclePlus;': '\u2295', 'Longleftarrow;': '\u27f8',
'dArr;': '\u21d3', 'xcup;': '\u22c3', 'AElig': '\xc6', 'leftharpoonup;': '\u21bc', 'Uarr;': '\u219f',
'lsquo;': '\u2018', 'nVdash;': '\u22ae', 'nwnear;': '\u2927', 'gescc;': '\u2aa9', 'rdsh;': '\u21b3',
'grave;': '`', 'blk34;': '\u2593', 'LeftVector;': '\u21bc', 'uharr;': '\u21be', 'isins;': '\u22f4',
'lescc;': '\u2aa8', 'eplus;': '\u2a71', 'jmath;': '\u0237', 'kscr;': '\U0001d4c0', 'nsim;': '\u2241',
'Aacute;': '\xc1', 'NotLessEqual;': '\u2270', 'tshcy;': '\u045b', 'plusmn': '\xb1', 'ecir;': '\u2256',
'nsmid;': '\u2224', 'lesdoto;': '\u2a81', 'nvdash;': '\u22ac', 'Lt;': '\u226a', 'DownRightVectorBar;': '\u2957',
'asymp;': '\u2248', 'ggg;': '\u22d9', 'szlig;': '\xdf', 'lneqq;': '\u2268', 'loplus;': '\u2a2d',
'ExponentialE;': '\u2147', 'profline;': '\u2312', 'DDotrahd;': '\u2911', 'rarrlp;': '\u21ac', 'Scy;': '\u0421',
'le;': '\u2264', 'auml;': '\xe4', 'roarr;': '\u21fe', 'fltns;': '\u25b1', 'vellip;': '\u22ee', 'apacir;': '\u2a6f',
'circledS;': '\u24c8', 'rfloor;': '\u230b', 'Cross;': '\u2a2f', 'DoubleLeftTee;': '\u2ae4', 'subsetneqq;': '\u2acb',
'ordf': '\xaa', 'rightleftharpoons;': '\u21cc', 'fllig;': '\ufb02', 'ntilde': '\xf1', 'emsp;': '\u2003',
'iacute;': '\xed', 'xfr;': '\U0001d535', 'fflig;': '\ufb00', 'xlarr;': '\u27f5', 'leftarrow;': '\u2190',
'urcorner;': '\u231d', 'dharl;': '\u21c3', 'reals;': '\u211d', 'Re;': '\u211c', 'bemptyv;': '\u29b0',
'angrtvb;': '\u22be', 'mdash;': '\u2014', 'dotsquare;': '\u22a1', 'omacr;': '\u014d', 'Vvdash;': '\u22aa',
'pm;': '\xb1', 'OverBar;': '\u203e', 'nldr;': '\u2025', 'target;': '\u2316', 'hksearow;': '\u2925',
'ecirc': '\xea', 'swnwar;': '\u292a', 'nfr;': '\U0001d52b', 'Copf;': '\u2102', 'Rarr;': '\u21a0',
'raquo;': '\xbb', 'oline;': '\u203e', 'utilde;': '\u0169', 'hookrightarrow;': '\u21aa', 'Or;': '\u2a54',
'origof;': '\u22b6', 'Theta;': '\u0398', 'kfr;': '\U0001d528', 'Sfr;': '\U0001d516', 'aopf;': '\U0001d552',
'lArr;': '\u21d0', 'equiv;': '\u2261', 'ord;': '\u2a5d', 'Sigma;': '\u03a3', 'DScy;': '\u0405',
'PrecedesTilde;': '\u227e', 'gnsim;': '\u22e7', 'colone;': '\u2254', 'boxhU;': '\u2568', 'Ntilde;': '\xd1',
'NotNestedGreaterGreater;': '\u2aa2\u0338', 'NotSucceeds;': '\u2281', 'larrfs;': '\u291d', 'models;': '\u22a7',
'DifferentialD;': '\u2146', 'toea;': '\u2928', 'Zdot;': '\u017b', 'zscr;': '\U0001d4cf', 'gtlPar;': '\u2995',
'ii;': '\u2148', 'Zcaron;': '\u017d', 'Leftarrow;': '\u21d0', 'ohbar;': '\u29b5', 'orv;': '\u2a5b',
'OverParenthesis;': '\u23dc', 'Upsilon;': '\u03a5', 'plusdo;': '\u2214', 'nis;': '\u22fc',
'Poincareplane;': '\u210c', 'tfr;': '\U0001d531', 'DownArrow;': '\u2193', 'Sub;': '\u22d0', 'Ncedil;': '\u0145',
'Iota;': '\u0399', 'InvisibleComma;': '\u2063', 'Ucy;': '\u0423', 'lnap;': '\u2a89', 'angst;': '\xc5',
'sube;': '\u2286', 'Gopf;': '\U0001d53e', 'Succeeds;': '\u227b', 'ap;': '\u2248', 'andv;': '\u2a5a',
'eDot;': '\u2251', 'angsph;': '\u2222', 'Dscr;': '\U0001d49f', 'boxHD;': '\u2566', 'gamma;': '\u03b3',
'RightTeeVector;': '\u295b', 'straightphi;': '\u03d5', 'ohm;': '\u03a9', 'frac15;': '\u2155',
'itilde;': '\u0129', 'jfr;': '\U0001d527', 'NJcy;': '\u040a', 'notinva;': '\u2209', 'frac25;': '\u2156',
'Epsilon;': '\u0395', 'xoplus;': '\u2a01', 'zcy;': '\u0437', 'Union;': '\u22c3', 'lesssim;': '\u2272',
'trpezium;': '\u23e2', 'bcy;': '\u0431', 'succsim;': '\u227f', 'boxDr;': '\u2553', 'beth;': '\u2136',
'prap;': '\u2ab7', 'bumpeq;': '\u224f', 'NotSquareSubset;': '\u228f\u0338', 'nhpar;': '\u2af2',
'vBar;': '\u2ae8', 'rbrke;': '\u298c', 'Dot;': '\xa8', 'ENG;': '\u014a', 'and;': '\u2227',
'nsupseteqq;': '\u2ac6\u0338', 'blacklozenge;': '\u29eb', 'boxdL;': '\u2555', 'odsold;': '\u29bc',
'bigsqcup;': '\u2a06', 'trade;': '\u2122', 'half;': '\xbd', 'elsdot;': '\u2a97', 'iota;': '\u03b9',
'diam;': '\u22c4', 'block;': '\u2588', 'parsim;': '\u2af3', 'KHcy;': '\u0425', 'Lstrok;': '\u0141',
'lesseqgtr;': '\u22da', 'div;': '\xf7', 'planckh;': '\u210e', 'rfr;': '\U0001d52f', 'loang;': '\u27ec',
'lnapprox;': '\u2a89', 'triangleleft;': '\u25c3', 'nvDash;': '\u22ad', 'oint;': '\u222e', 'ecirc;': '\xea',
'Lfr;': '\U0001d50f', 'eqsim;': '\u2242', 'emacr;': '\u0113', 'DownLeftVector;': '\u21bd', 'succeq;': '\u2ab0',
'yucy;': '\u044e', 'biguplus;': '\u2a04', 'plusmn;': '\xb1', 'smashp;': '\u2a33', 'cuvee;': '\u22ce',
'prec;': '\u227a', 'chi;': '\u03c7', 'angmsdag;': '\u29ae', 'backprime;': '\u2035', 'nbump;': '\u224e\u0338',
'Mcy;': '\u041c', 'subseteq;': '\u2286', 'gtrapprox;': '\u2a86', 'lmoustache;': '\u23b0', 'circledR;': '\xae',
'gsiml;': '\u2a90', 'subseteqq;': '\u2ac5', 'rbbrk;': '\u2773', 'inodot;': '\u0131', 'fpartint;': '\u2a0d',
'barvee;': '\u22bd', 'egsdot;': '\u2a98', 'fcy;': '\u0444', 'qint;': '\u2a0c', 'Gammad;': '\u03dc',
'upharpoonright;': '\u21be', 'NotEqual;': '\u2260', 'boxVL;': '\u2563', 'dotminus;': '\u2238', 'esim;': '\u2242',
'lotimes;': '\u2a34', 'Xopf;': '\U0001d54f', 'divide;': '\xf7', 'RightTriangleEqual;': '\u22b5', 'af;': '\u2061',
'tridot;': '\u25ec', 'lvnE;': '\u2268\ufe00', 'multimap;': '\u22b8', 'rsh;': '\u21b1', 'Ascr;': '\U0001d49c',
'hkswarow;': '\u2926', 'suplarr;': '\u297b', 'VDash;': '\u22ab', 'uscr;': '\U0001d4ca', 'sccue;': '\u227d',
'SHcy;': '\u0428', 'ndash;': '\u2013', 'YUcy;': '\u042e', 'rppolint;': '\u2a12', 'Equilibrium;': '\u21cc',
'boxvL;': '\u2561', 'nlt;': '\u226e', 'Euml;': '\xcb', 'IOcy;': '\u0401', 'times;': '\xd7', 'mapstoup;': '\u21a5',
'epsi;': '\u03b5', 'xlArr;': '\u27f8', 'cacute;': '\u0107', 'capcap;': '\u2a4b', 'ntriangleleft;': '\u22ea',
'sqsupseteq;': '\u2292', 'NotCupCap;': '\u226d', 'RightUpVector;': '\u21be', 'rpar;': ')', 'Xi;': '\u039e',
'tilde;': '\u02dc', 'auml': '\xe4', 'esdot;': '\u2250', 'nleqslant;': '\u2a7d\u0338', 'rhard;': '\u21c1',
'Delta;': '\u0394', 'gsime;': '\u2a8e', 'lt': '<', 'SHCHcy;': '\u0429', 'varsupsetneq;': '\u228b\ufe00',
'LeftUpVectorBar;': '\u2958', 'simne;': '\u2246', 'lozf;': '\u29eb', 'LeftTeeArrow;': '\u21a4',
'spadesuit;': '\u2660', 'Pr;': '\u2abb', 'Eacute;': '\xc9', 'boxVh;': '\u256b', 'Dashv;': '\u2ae4',
'ccaron;': '\u010d', 'setmn;': '\u2216', 'Aring;': '\xc5', 'plustwo;': '\u2a27', 'Rcaron;': '\u0158',
'sdote;': '\u2a66', 'ifr;': '\U0001d526', 'roplus;': '\u2a2e', 'qscr;': '\U0001d4c6', 'bernou;': '\u212c',
'Dstrok;': '\u0110', 'not': '\xac', 'backepsilon;': '\u03f6', 'Otilde;': '\xd5', 'langd;': '\u2991',
'lopf;': '\U0001d55d', 'KJcy;': '\u040c', 'infin;': '\u221e', 'uacute': '\xfa', 'Fopf;': '\U0001d53d',
'backsim;': '\u223d', 'ape;': '\u224a', 'LeftArrowRightArrow;': '\u21c6', 'Wedge;': '\u22c0',
'DownLeftTeeVector;': '\u295e', 'Ffr;': '\U0001d509', 'rtrif;': '\u25b8', 'gjcy;': '\u0453', 'supmult;': '\u2ac2',
'gt;': '>', 'swarr;': '\u2199', 'amalg;': '\u2a3f', 'rho;': '\u03c1', 'triminus;': '\u2a3a', 'or;': '\u2228',
'nesim;': '\u2242\u0338', 'sime;': '\u2243', 'larrlp;': '\u21ab', 'Sum;': '\u2211', 'khcy;': '\u0445',
'wscr;': '\U0001d4cc', 'caret;': '\u2041', 'agrave': '\xe0', 'Ocirc': '\xd4', 'Iopf;': '\U0001d540',
'bump;': '\u224e', 'ratail;': '\u291a', 'simgE;': '\u2aa0', 'precneqq;': '\u2ab5', 'varpropto;': '\u221d',
'yuml;': '\xff', 'ntrianglelefteq;': '\u22ec', 'ouml': '\xf6', 'lt;': '<', 'alpha;': '\u03b1',
'gopf;': '\U0001d558', 'smt;': '\u2aaa', 'doteqdot;': '\u2251', 'LessSlantEqual;': '\u2a7d', 'mid;': '\u2223',
'simeq;': '\u2243', 'tstrok;': '\u0167', 'GreaterEqualLess;': '\u22db', 'escr;': '\u212f', 'Nfr;': '\U0001d511',
'nGg;': '\u22d9\u0338', 'simlE;': '\u2a9f', 'apid;': '\u224b', 'nvrArr;': '\u2903', 'dotplus;': '\u2214',
'cirscir;': '\u29c2', 'LeftTee;': '\u22a3', 'lnE;': '\u2268', 'topcir;': '\u2af1', 'egrave;': '\xe8',
'demptyv;': '\u29b1', 'copysr;': '\u2117', 'Vdashl;': '\u2ae6', 'yen;': '\xa5', 'gap;': '\u2a86',
'thetav;': '\u03d1', 'bumpE;': '\u2aae', 'Ncaron;': '\u0147', 'blacktriangleright;': '\u25b8',
'olcir;': '\u29be', 'UnderBracket;': '\u23b5', 'nsimeq;': '\u2244', 'downarrow;': '\u2193', 'Assign;': '\u2254',
'opar;': '\u29b7', 'diams;': '\u2666', 'jsercy;': '\u0458', 'SubsetEqual;': '\u2286', 'bkarow;': '\u290d',
'square;': '\u25a1', 'ntriangleright;': '\u22eb', 'nrarr;': '\u219b', 'Udblac;': '\u0170', 'sqsubset;': '\u228f',
'sup1;': '\xb9', 'ldrdhar;': '\u2967', 'erarr;': '\u2971', 'frown;': '\u2322', 'cemptyv;': '\u29b2',
'rtri;': '\u25b9', 'Hscr;': '\u210b', 'Cconint;': '\u2230', 'Edot;': '\u0116', 'hardcy;': '\u044a',
'there4;': '\u2234', 'frac56;': '\u215a', 'Gbreve;': '\u011e', 'ldquo;': '\u201c', 'wedgeq;': '\u2259',
'ncong;': '\u2247', 'prop;': '\u221d', 'isinsv;': '\u22f3', 'hbar;': '\u210f', 'supseteq;': '\u2287',
'Abreve;': '\u0102', 'swarrow;': '\u2199', 'lfisht;': '\u297c', 'siml;': '\u2a9d', 'equals;': '=',
'lesges;': '\u2a93', 'phiv;': '\u03d5', 'Proportion;': '\u2237', 'Dcy;': '\u0414', 'edot;': '\u0117',
'CounterClockwiseContourIntegral;': '\u2233', 'shortparallel;': '\u2225', 'frac34': '\xbe', 'solbar;': '\u233f',
'sbquo;': '\u201a', 'LessLess;': '\u2aa1', 'harrcir;': '\u2948', 'Jfr;': '\U0001d50d', 'Xscr;': '\U0001d4b3',
'NotNestedLessLess;': '\u2aa1\u0338', 'zcaron;': '\u017e', 'abreve;': '\u0103', 'nacute;': '\u0144',
'ultri;': '\u25f8', 'Bcy;': '\u0411', 'ThickSpace;': '\u205f\u200a', 'questeq;': '\u225f',
'DoubleLongLeftArrow;': '\u27f8', 'ccaps;': '\u2a4d', 'rHar;': '\u2964', 'upharpoonleft;': '\u21bf',
'iacute': '\xed', 'cong;': '\u2245', 'yopf;': '\U0001d56a', 'nvlt;': '<\u20d2', 'bopf;': '\U0001d553',
'Supset;': '\u22d1', 'Subset;': '\u22d0', 'varsubsetneqq;': '\u2acb\ufe00', 'Omega;': '\u03a9',
'lsh;': '\u21b0', 'iiiint;': '\u2a0c', 'copy': '\xa9', 'gscr;': '\u210a', 'Star;': '\u22c6', 'boxHU;': '\u2569',
'circ;': '\u02c6', 'lap;': '\u2a85', 'rlhar;': '\u21cc', 'percnt;': '%', 'NotLessSlantEqual;': '\u2a7d\u0338',
'maltese;': '\u2720', 'looparrowleft;': '\u21ab', 'LeftVectorBar;': '\u2952', 'nLeftrightarrow;': '\u21ce',
'bsolhsub;': '\u27c8', 'nsubseteqq;': '\u2ac5\u0338', 'Rfr;': '\u211c', 'lgE;': '\u2a91',
'RightTriangleBar;': '\u29d0', 'Superset;': '\u2283', 'reg;': '\xae', 'frac14;': '\xbc', 'RBarr;': '\u2910',
'realpart;': '\u211c', 'zwnj;': '\u200c', 'nrarrc;': '\u2933\u0338', 'pluscir;': '\u2a22', 'lharul;': '\u296a',
'thickapprox;': '\u2248', 'lscr;': '\U0001d4c1', 'caps;': '\u2229\ufe00', 'supsim;': '\u2ac8',
'cirfnint;': '\u2a10', 'boxvh;': '\u253c', 'therefore;': '\u2234', 'Verbar;': '\u2016', 'nsqsube;': '\u22e2',
'latail;': '\u2919', 'propto;': '\u221d', 'boxuR;': '\u2558', 'Omacr;': '\u014c', 'ges;': '\u2a7e',
'Scaron;': '\u0160', 'oslash': '\xf8', 'oast;': '\u229b', 'phi;': '\u03c6', 'cuwed;': '\u22cf',
'oplus;': '\u2295', 'ncedil;': '\u0146', 'scnap;': '\u2aba', 'Iogon;': '\u012e', 'bne;': '=\u20e5',
'Oslash;': '\xd8', 'xuplus;': '\u2a04', 'precnsim;': '\u22e8', 'bigtriangledown;': '\u25bd', 'iprod;': '\u2a3c',
'ange;': '\u29a4', 'RightTee;': '\u22a2', 'tosa;': '\u2929', 'Iukcy;': '\u0406', 'leftrightarrows;': '\u21c6',
'DoubleLeftArrow;': '\u21d0', 'COPY': '\xa9', 'frac13;': '\u2153', 'middot': '\xb7', 'pr;': '\u227a',
'rhov;': '\u03f1', 'Qopf;': '\u211a', 'weierp;': '\u2118', 'ofr;': '\U0001d52c', 'lrhard;': '\u296d',
'commat;': '@', 'nesear;': '\u2928', 'sopf;': '\U0001d564', 'raquo': '\xbb', 'malt;': '\u2720',
'OElig;': '\u0152', 'Uscr;': '\U0001d4b0', 'eqslantless;': '\u2a95', 'LeftTriangleEqual;': '\u22b4',
'oacute': '\xf3', 'andslope;': '\u2a58', 'yfr;': '\U0001d536', 'nsup;': '\u2285', 'NotElement;': '\u2209',
'angmsdaf;': '\u29ad', 'nsccue;': '\u22e1', 'ge;': '\u2265', 'fallingdotseq;': '\u2252', 'rbarr;': '\u290d',
'DoubleLongLeftRightArrow;': '\u27fa', 'uparrow;': '\u2191', 'orarr;': '\u21bb', 'Rcy;': '\u0420',
'acute;': '\xb4', 'NewLine;': '\n', 'lmoust;': '\u23b0', 'NegativeMediumSpace;': '\u200b', 'Nacute;': '\u0143',
'aelig': '\xe6', 'prcue;': '\u227c', 'ensp;': '\u2002', 'utdot;': '\u22f0', 'napos;': '\u0149',
'DoubleLongRightArrow;': '\u27f9', 'Vfr;': '\U0001d519', 'xutri;': '\u25b3', 'awint;': '\u2a11',
'leftrightsquigarrow;': '\u21ad', 'plusacir;': '\u2a23', 'FilledVerySmallSquare;': '\u25aa', 'Mscr;': '\u2133',
'leftrightharpoons;': '\u21cb', 'sqcups;': '\u2294\ufe00', 'LJcy;': '\u0409', 'circleddash;': '\u229d',
'NoBreak;': '\u2060', 'nlsim;': '\u2274', 'Uogon;': '\u0172', 'NotRightTriangleBar;': '\u29d0\u0338',
'Ecy;': '\u042d', 'sdot;': '\u22c5', 'smeparsl;': '\u29e4', 'niv;': '\u220b', 'kcedil;': '\u0137',
'xrarr;': '\u27f6', 'isindot;': '\u22f5', 'xodot;': '\u2a00', 'gtdot;': '\u22d7', 'natural;': '\u266e',
'eqvparsl;': '\u29e5', 'gnap;': '\u2a8a', 'Psi;': '\u03a8', 'Rho;': '\u03a1', 'micro;': '\xb5',
'cylcty;': '\u232d', 'gesles;': '\u2a94', 'uHar;': '\u2963', 'CircleTimes;': '\u2297', 'sqsub;': '\u228f',
'ldrushar;': '\u294b', 'bsol;': '\\', 'rcedil;': '\u0157', 'nprec;': '\u2280', 'vltri;': '\u22b2',
'atilde;': '\xe3', 'prsim;': '\u227e', 'primes;': '\u2119', 'Omicron;': '\u039f', 'ocirc;': '\xf4',
'iiint;': '\u222d', 'quest;': '?', 'daleth;': '\u2138', 'nbsp': '\xa0', 'nwArr;': '\u21d6', 'gammad;': '\u03dd',
'heartsuit;': '\u2665', 'wedbar;': '\u2a5f', 'OverBrace;': '\u23de', 'spar;': '\u2225', 'brvbar': '\xa6',
'blacktriangleleft;': '\u25c2', 'lopar;': '\u2985', 'xwedge;': '\u22c0', 'iexcl;': '\xa1', 'boxul;': '\u2518',
'Imacr;': '\u012a', 'ominus;': '\u2296', 'eopf;': '\U0001d556', 'DotDot;': '\u20dc', 'Scirc;': '\u015c',
'succnsim;': '\u22e9', 'sigmaf;': '\u03c2', 'ReverseEquilibrium;': '\u21cb', 'DiacriticalDot;': '\u02d9',
'AElig;': '\xc6', 'zigrarr;': '\u21dd', 'NegativeThinSpace;': '\u200b', 'approxeq;': '\u224a', 'Gcy;': '\u0413',
'Vert;': '\u2016', 'NotSquareSupersetEqual;': '\u22e3', 'srarr;': '\u2192', 'rtrie;': '\u22b5',
'VeryThinSpace;': '\u200a', 'RightDoubleBracket;': '\u27e7', 'dfr;': '\U0001d521', 'Eogon;': '\u0118',
'Cscr;': '\U0001d49e', 'gnE;': '\u2269', 'nparallel;': '\u2226', 'lsime;': '\u2a8d', 'lceil;': '\u2308',
'ijlig;': '\u0133', 'RightCeiling;': '\u2309', 'Icy;': '\u0418', 'yuml': '\xff', 'exist;': '\u2203',
'DiacriticalAcute;': '\xb4', 'boxVr;': '\u255f', 'mscr;': '\U0001d4c2', 'NotGreaterSlantEqual;': '\u2a7e\u0338',
'leftrightarrow;': '\u2194', 'Wopf;': '\U0001d54e', 'supset;': '\u2283', 'DownArrowUpArrow;': '\u21f5',
'glj;': '\u2aa4', 'Colone;': '\u2a74', 'prnsim;': '\u22e8', 'Zfr;': '\u2128', 'lbrkslu;': '\u298d',
'scedil;': '\u015f', 'Dcaron;': '\u010e', 'coloneq;': '\u2254', 'CapitalDifferentialD;': '\u2145',
'nshortmid;': '\u2224', 'trianglelefteq;': '\u22b4', 'rarrb;': '\u21e5', 'ssetmn;': '\u2216', 'ufr;': '\U0001d532',
'Acirc;': '\xc2', 'LeftRightArrow;': '\u2194', 'varr;': '\u2195', 'eth': '\xf0', 'varsupsetneqq;': '\u2acc\ufe00',
'HilbertSpace;': '\u210b', 'diamond;': '\u22c4', 'npart;': '\u2202\u0338', 'Cfr;': '\u212d', 'slarr;': '\u2190',
'cwconint;': '\u2232', 'ncaron;': '\u0148', 'theta;': '\u03b8', 'NotSupersetEqual;': '\u2289',
'nsubset;': '\u2282\u20d2', 'EmptySmallSquare;': '\u25fb', 'Tstrok;': '\u0166', 'lg;': '\u2276', 'urcorn;': '\u231d',
'acy;': '\u0430', 'DoubleVerticalBar;': '\u2225', 'Phi;': '\u03a6', 'imof;': '\u22b7', 'angle;': '\u2220',
'supdot;': '\u2abe', 'timesb;': '\u22a0', 'bfr;': '\U0001d51f', 'dcaron;': '\u010f', 'Aacute': '\xc1',
'cent': '\xa2', 'rdquo;': '\u201d', 'jopf;': '\U0001d55b', 'sup2;': '\xb2', 'triangledown;': '\u25bf',
'lHar;': '\u2962', 'leftarrowtail;': '\u21a2', 'HorizontalLine;': '\u2500', 'duarr;': '\u21f5', 'cupcap;': '\u2a46',
'euml': '\xeb', 'shy': '\xad', 'curarr;': '\u21b7', 'larrhk;': '\u21a9', 'Kfr;': '\U0001d50e', 'olarr;': '\u21ba',
'nsupE;': '\u2ac6\u0338', 'colon;': ':', 'Eta;': '\u0397', 'dsol;': '\u29f6', 'LessGreater;': '\u2276',
'dblac;': '\u02dd', 'vopf;': '\U0001d567', 'incare;': '\u2105', 'wreath;': '\u2240', 'NotSucceedsEqual;': '\u2ab0\u0338',
'lcaron;': '\u013e', 'conint;': '\u222e', 'napid;': '\u224b\u0338', 'Equal;': '\u2a75', 'dscr;': '\U0001d4b9',
'Itilde;': '\u0128', 'iiota;': '\u2129', 'UpDownArrow;': '\u2195', 'Vcy;': '\u0412', 'lobrk;': '\u27e6',
'thksim;': '\u223c', 'Ucirc;': '\xdb', 'Rcedil;': '\u0156', 'tritime;': '\u2a3b', 'boxh;': '\u2500',
'Fouriertrf;': '\u2131', 'realine;': '\u211b', 'rightleftarrows;': '\u21c4', 'wp;': '\u2118', 'thkap;': '\u2248',
'sqsupset;': '\u2290', 'CloseCurlyQuote;': '\u2019', 'SquareSubsetEqual;': '\u2291', 'Iuml;': '\xcf',
'sqsup;': '\u2290', 'NotDoubleVerticalBar;': '\u2226', 'ugrave': '\xf9', 'acd;': '\u223f', 'oscr;': '\u2134',
'Qfr;': '\U0001d514', 'ncap;': '\u2a43', 'Vdash;': '\u22a9', 'nrtrie;': '\u22ed', 'lesdot;': '\u2a7f',
'nltri;': '\u22ea', 'ncy;': '\u043d', 'Hacek;': '\u02c7', 'radic;': '\u221a', 'frac78;': '\u215e',
'NotReverseElement;': '\u220c', 'Therefore;': '\u2234', 'lates;': '\u2aad\ufe00', 'varepsilon;': '\u03f5',
'ruluhar;': '\u2968', 'rsaquo;': '\u203a', 'Tscr;': '\U0001d4af', 'subsetneq;': '\u228a', 'UnderBrace;': '\u23df',
'Uring;': '\u016e', 'acirc': '\xe2', 'check;': '\u2713', 'rsquor;': '\u2019', 'tbrk;': '\u23b4',
'NotLessTilde;': '\u2274', 'vsupne;': '\u228b\ufe00', 'wfr;': '\U0001d534', 'hellip;': '\u2026', 'nless;': '\u226e',
'Yuml;': '\u0178', 'FilledSmallSquare;': '\u25fc', 'SucceedsEqual;': '\u2ab0', 'frac23;': '\u2154',
'OverBracket;': '\u23b4', 'SupersetEqual;': '\u2287', 'gesdot;': '\u2a80', 'excl;': '!', 'UpArrowBar;': '\u2912',
'barwed;': '\u2305', 'barwedge;': '\u2305', 'notinvc;': '\u22f6', 'uArr;': '\u21d1', 'lthree;': '\u22cb',
'risingdotseq;': '\u2253', 'Mopf;': '\U0001d544', 'yacute;': '\xfd', 'otimesas;': '\u2a36', 'capcup;': '\u2a47',
'ofcir;': '\u29bf', 'Upsi;': '\u03d2', 'Ecaron;': '\u011a', 'Qscr;': '\U0001d4ac', 'hookleftarrow;': '\u21a9',
'Ograve;': '\xd2', 'precnapprox;': '\u2ab9', 'Uarrocir;': '\u2949', 'part;': '\u2202', 'subsub;': '\u2ad5',
'lmidot;': '\u0140', 'DJcy;': '\u0402', 'nexists;': '\u2204', 'NotEqualTilde;': '\u2242\u0338',
'profalar;': '\u232e', 'sum;': '\u2211', 'Precedes;': '\u227a', 'Ofr;': '\U0001d512', 'fopf;': '\U0001d557',
'iecy;': '\u0435', 'ShortUpArrow;': '\u2191', 'nparsl;': '\u2afd\u20e5', 'boxUR;': '\u255a',
'exponentiale;': '\u2147', 'upsilon;': '\u03c5', 'Jopf;': '\U0001d541', 'VerticalSeparator;': '\u2758',
'Dfr;': '\U0001d507', 'NonBreakingSpace;': '\xa0', 'bottom;': '\u22a5', 'orslope;': '\u2a57', 'boxDL;': '\u2557',
'bigcap;': '\u22c2', 'Vbar;': '\u2aeb', 'pound;': '\xa3', 'boxvr;': '\u251c', 'Cup;': '\u22d3',
'bigtriangleup;': '\u25b3', 'RightAngleBracket;': '\u27e9', 'lesg;': '\u22da\ufe00', 'RightDownVector;': '\u21c2',
'Gfr;': '\U0001d50a', 'shy;': '\xad', 'supnE;': '\u2acc', 'cirE;': '\u29c3', 'angmsdae;': '\u29ac',
'Bumpeq;': '\u224e', 'delta;': '\u03b4', 'thinsp;': '\u2009', 'EmptyVerySmallSquare;': '\u25ab',
'leftleftarrows;': '\u21c7', 'les;': '\u2a7d', 'ltcc;': '\u2aa6', 'TildeFullEqual;': '\u2245', 'iocy;': '\u0451',
'supsetneqq;': '\u2acc', 'rharul;': '\u296c', 'hArr;': '\u21d4', 'amp': '&', 'Cdot;': '\u010a', 'rbrack;': ']',
'nspar;': '\u2226', 'pcy;': '\u043f', 'NotSucceedsTilde;': '\u227f\u0338', 'acute': '\xb4', 'dlcrop;': '\u230d',
'subdot;': '\u2abd', 'UnionPlus;': '\u228e', 'mapstoleft;': '\u21a4', 'DoubleRightTee;': '\u22a8',
'sigmav;': '\u03c2', 'sfr;': '\U0001d530', 'Igrave': '\xcc', 'euro;': '\u20ac', 'complement;': '\u2201',
'profsurf;': '\u2313', 'nabla;': '\u2207', 'para;': '\xb6', 'Dopf;': '\U0001d53b', 'cdot;': '\u010b',
'sim;': '\u223c', 'popf;': '\U0001d561', 'ImaginaryI;': '\u2148', 'notni;': '\u220c', 'RightArrowBar;': '\u21e5',
'intlarhk;': '\u2a17', 'gtcir;': '\u2a7a', 'llcorner;': '\u231e', 'Bfr;': '\U0001d505', 'Rang;': '\u27eb',
'ddagger;': '\u2021', 'vBarv;': '\u2ae9', 'forkv;': '\u2ad9', 'angmsd;': '\u2221', 'ouml;': '\xf6',
'nvgt;': '>\u20d2', 'Dagger;': '\u2021', 'lharu;': '\u21bc', 'Exists;': '\u2203', 'LeftTriangleBar;': '\u29cf',
'ratio;': '\u2236', 'TildeTilde;': '\u2248', 'minusb;': '\u229f', 'race;': '\u223d\u0331', 'rAarr;': '\u21db',
'bigoplus;': '\u2a01', 'rangd;': '\u2992', 'micro': '\xb5', 'osol;': '\u2298', 'strns;': '\xaf',
'Longleftrightarrow;': '\u27fa', 'boxUl;': '\u255c', 'Sc;': '\u2abc', 'ocirc': '\xf4', 'ac;': '\u223e',
'nsubE;': '\u2ac5\u0338', 'DotEqual;': '\u2250', 'zopf;': '\U0001d56b', 'llarr;': '\u21c7', 'permil;': '\u2030',
'Topf;': '\U0001d54b', 'UpperLeftArrow;': '\u2196', 'ulcorn;': '\u231c', 'curlyeqsucc;': '\u22df',
'aleph;': '\u2135', 'image;': '\u2111', 'igrave': '\xec', 'NestedLessLess;': '\u226a', 'LongLeftRightArrow;': '\u27f7',
'sqsupe;': '\u2292', 'midast;': '*', 'dwangle;': '\u29a6', 'uring;': '\u016f', 'becaus;': '\u2235',
'GreaterFullEqual;': '\u2267', 'dd;': '\u2146', 'kcy;': '\u043a', 'Laplacetrf;': '\u2112', 'marker;': '\u25ae',
'simrarr;': '\u2972', 'Agrave;': '\xc0', 'bNot;': '\u2aed', 'ocir;': '\u229a', 'supsetneq;': '\u228b',
'fork;': '\u22d4', 'pi;': '\u03c0', 'topbot;': '\u2336', 'xharr;': '\u27f7', 'Jukcy;': '\u0404',
'naturals;': '\u2115', 'csup;': '\u2ad0', 'ltimes;': '\u22c9', 'mcy;': '\u043c', 'lessgtr;': '\u2276',
'uuml': '\xfc', 'iquest;': '\xbf', 'boxhd;': '\u252c', 'nsupe;': '\u2289', 'leftharpoondown;': '\u21bd',
'Lacute;': '\u0139', 'Emacr;': '\u0112', 'Vee;': '\u22c1', 'cupcup;': '\u2a4a', 'backsimeq;': '\u22cd',
'dlcorn;': '\u231e', 'bprime;': '\u2035', 'HumpEqual;': '\u224f', 'simdot;': '\u2a6a', 'oelig;': '\u0153',
'ntilde;': '\xf1', 'xdtri;': '\u25bd', 'hscr;': '\U0001d4bd', 'cups;': '\u222a\ufe00', 'pre;': '\u2aaf',
'yscr;': '\U0001d4ce', 'boxplus;': '\u229e', 'Jcirc;': '\u0134', 'suphsol;': '\u27c9', 'Nopf;': '\u2115',
'DZcy;': '\u040f', 'flat;': '\u266d', 'ldquor;': '\u201e', 'Leftrightarrow;': '\u21d4', 'veebar;': '\u22bb',
'Rrightarrow;': '\u21db', 'compfn;': '\u2218', 'succ;': '\u227b', 'NegativeVeryThinSpace;': '\u200b',
'cupbrcap;': '\u2a48', 'notindot;': '\u22f5\u0338', 'supseteqq;': '\u2ac6', 'plankv;': '\u210f', 'ordm': '\xba',
'nsupseteq;': '\u2289', 'sacute;': '\u015b', 'ordm;': '\xba', 'dtdot;': '\u22f1', 'NotSubsetEqual;': '\u2288',
'subedot;': '\u2ac3', 'curlywedge;': '\u22cf', 'GreaterGreater;': '\u2aa2', 'dbkarow;': '\u290f',
'quatint;': '\u2a16', 'ContourIntegral;': '\u222e', 'LeftTriangle;': '\u22b2', 'lrcorner;': '\u231f',
'RightVectorBar;': '\u2953', 'nequiv;': '\u2262', 'ltrie;': '\u22b4', 'divonx;': '\u22c7', 'topf;': '\U0001d565',
'cuepr;': '\u22de', 'LeftRightVector;': '\u294e', 'rtimes;': '\u22ca', 'LeftCeiling;': '\u2308', 'iukcy;': '\u0456',
'ordf;': '\xaa', 'OpenCurlyQuote;': '\u2018', 'fnof;': '\u0192', 'thorn': '\xfe', 'star;': '\u2606',
'lne;': '\u2a87', 'hearts;': '\u2665', 'dash;': '\u2010', 'vartriangleleft;': '\u22b2', 'shcy;': '\u0448',
'hfr;': '\U0001d525', 'uuarr;': '\u21c8', 'isin;': '\u2208', 'tcaron;': '\u0165', 'bigodot;': '\u2a00',
'lurdshar;': '\u294a', 'ucy;': '\u0443', 'nmid;': '\u2224', 'semi;': ';', 'laquo;': '\xab', 'bullet;': '\u2022',
'hslash;': '\u210f', 'gtrsim;': '\u2273', 'InvisibleTimes;': '\u2062', 'cfr;': '\U0001d520', 'tscr;': '\U0001d4c9',
'nltrie;': '\u22ec', 'succcurlyeq;': '\u227d', 'ogon;': '\u02db', 'NotExists;': '\u2204', 'kgreen;': '\u0138',
'seArr;': '\u21d8', 'Product;': '\u220f', 'sqcap;': '\u2293', 'rx;': '\u211e', 'nLeftarrow;': '\u21cd',
'Updownarrow;': '\u21d5', 'Ecirc': '\xca', 'Lcy;': '\u041b', 'icirc;': '\xee', 'bigstar;': '\u2605',
'gtcc;': '\u2aa7', 'olcross;': '\u29bb', 'in;': '\u2208', 'VerticalTilde;': '\u2240', 'filig;': '\ufb01',
'rightsquigarrow;': '\u219d', 'pfr;': '\U0001d52d', 'Intersection;': '\u22c2', 'Not;': '\u2aec', 'rsqb;': ']',
'Ncy;': '\u041d', 'period;': '.', 'xhArr;': '\u27fa', 'phmmat;': '\u2133', 'NotCongruent;': '\u2262',
'boxdR;': '\u2552', 'kjcy;': '\u045c', 'bigwedge;': '\u22c0', 'NotGreaterTilde;': '\u2275', 'nsqsupe;': '\u22e3',
'aring;': '\xe5', 'prnE;': '\u2ab5', 'LessFullEqual;': '\u2266', 'eqcirc;': '\u2256', 'downharpoonleft;': '\u21c3',
'rlarr;': '\u21c4', 'smallsetminus;': '\u2216', 'omega;': '\u03c9', 'mldr;': '\u2026', 'vzigzag;': '\u299a',
'nleqq;': '\u2266\u0338', 'ulcrop;': '\u230f', 'straightepsilon;': '\u03f5', 'Auml;': '\xc4', 'LongLeftArrow;': '\u27f5'}
def substitute_entity(match):
ent = match.group(2) + match.group(3)
res = ""
while not ent in html5 and not ent.endswith(";") and match.group(1) != "#":
# Excepción para cuando '&' se usa como argumento en la urls contenidas en los datos
try:
res = ent[-1] + res
ent = ent[:-1]
except:
break
if match.group(1) == "#":
ent = unichr(int(ent.replace(";","")))
return ent.encode('utf-8')
else:
cp = html5.get(ent)
if cp:
return cp.decode("unicode-escape").encode('utf-8') + res
else:
return match.group()
return entity_re.subn(substitute_entity, data)[0]
| gpl-3.0 | -1,300,616,030,855,172,400 | 83.053549 | 134 | 0.492533 | false |
CyrilWaechter/pyRevitMEP | pyRevitMEP.tab/Create.panel/BatchCreation.pulldown/BatchDependentViewCreation.pushbutton/script.py | 1 | 2538 | # coding: utf8
import rpw
# noinspection PyUnresolvedReferences
from rpw import revit, DB
from pyrevit.forms import WPFWindow
from pyrevit import script
from pyrevitmep.workset import Workset
# noinspection PyUnresolvedReferences
from System.Collections.ObjectModel import ObservableCollection
__doc__ = "Batch create dependent views corresponding to existing Scope Boxes for selected views"
__title__ = "DependentViews"
__author__ = "Cyril Waechter"
__context__ = "selection"
doc = rpw.revit.doc
logger = script.get_logger()
class Gui(WPFWindow):
def __init__(self, xaml_file_name):
WPFWindow.__init__(self, xaml_file_name)
volume_of_interest = DB.FilteredElementCollector(doc).OfCategory(DB.BuiltInCategory.OST_VolumeOfInterest)
self.data_grid_content = ObservableCollection[object](volume_of_interest)
self.datagrid.ItemsSource = self.data_grid_content
image_dict = {
"plus_img": "icons8-plus-32.png",
"minus_img": "icons8-minus-32.png",
"import_img": "icons8-import-32.png",
"ok_img": "icons8-checkmark-32.png"
}
for k, v in image_dict.items():
self.set_image_source(getattr(self, k), v)
# noinspection PyUnusedLocal
def ok_click(self, sender, e):
for view_id in rpw.uidoc.Selection.GetElementIds():
view = doc.GetElement(view_id)
try:
with rpw.db.Transaction("BatchCreateDependentViews"):
for volume_of_interest in self.data_grid_content:
new_view_id = view.Duplicate(DB.ViewDuplicateOption.AsDependent)
new_view = doc.GetElement(new_view_id)
parameter = new_view.get_Parameter(DB.BuiltInParameter.VIEWER_VOLUME_OF_INTEREST_CROP)
parameter.Set(volume_of_interest.Id)
except AttributeError as e:
print("{} doesn't seem to be a view".format(view))
logger.debug("{}".format(e.message))
# noinspection PyUnusedLocal
def load_from_file_click(self, sender, e):
for workset in Workset.read_from_txt():
self.data_grid_content.Add(workset)
# noinspection PyUnusedLocal
def add(self, sender, e):
self.data_grid_content.Add(Workset(""))
# noinspection PyUnusedLocal
def remove(self, sender, e):
for item in list(self.datagrid.SelectedItems):
self.data_grid_content.Remove(item)
gui = Gui("WPFWindow.xaml")
gui.ShowDialog()
| gpl-3.0 | 2,052,743,929,366,040,600 | 35.782609 | 113 | 0.644208 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/topology_resource.py | 1 | 1570 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyResource(Model):
"""The network resource topology information for the given resource group.
:param name: Name of the resource.
:type name: str
:param id: ID of the resource.
:type id: str
:param location: Resource location.
:type location: str
:param associations: Holds the associations the resource has with other
resources in the resource group.
:type associations:
list[~azure.mgmt.network.v2017_06_01.models.TopologyAssociation]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'associations': {'key': 'associations', 'type': '[TopologyAssociation]'},
}
def __init__(self, **kwargs):
super(TopologyResource, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.id = kwargs.get('id', None)
self.location = kwargs.get('location', None)
self.associations = kwargs.get('associations', None)
| mit | -4,338,701,136,379,454,000 | 36.380952 | 81 | 0.591083 | false |
eliben/code-for-blog | 2018/type-inference/parser.py | 1 | 7046 | # EBNF specification for micro-ML. { x } means zero or more repetitions of x.
#
# The top-level is decl.
#
# decl: ID { ID } '=' expr
#
# expr: INT
# | bool
# | ID
# | ID '(' { expr ',' } ')'
# | '(' expr ')'
# | expr op expr
# | 'if' expr 'then' expr 'else' expr
# | 'lambda' { ID } '->' expr
#
# op: + | * | - | == | > | >= | <= | < | !=
# bool: 'true' | 'false'
#
# ID: identifier
# INT: an integer
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import ast
import lexer
class ParseError(Exception):
pass
class Parser:
"""Parser for micro-ML.
The only public method here is parse_decl that parses a 'decl' from a
string. Usage:
p = Parser()
decl = p.parse_decl(<some micro-ML code>)
# decl is now an ast.Decl node
parse_decl() can be called multiple times with the same parser to parse
multiple decls (state is wiped out between calls).
"""
def __init__(self):
lex_rules = (
('if', 'IF'),
('then', 'THEN'),
('else', 'ELSE'),
('true', 'TRUE'),
('false', 'FALSE'),
('lambda', 'LAMBDA'),
('\d+', 'INT'),
('->', 'ARROW'),
('!=', '!='),
('==', '=='),
('>=', '>='),
('<=', '<='),
('<', '<'),
('>', '>'),
('\+', '+'),
('\-', '-'),
('\*', '*'),
('\(', '('),
('\)', ')'),
('=', '='),
(',', ','),
('[a-zA-Z_]\w*', 'ID'),
)
self.lexer = lexer.Lexer(lex_rules, skip_whitespace=True)
self.cur_token = None
self.operators = {'!=', '==', '>=', '<=', '<', '>', '+', '-', '*'}
def parse_decl(self, text):
"""Parse declaration given in text and return an AST node for it."""
self.lexer.input(text)
self._get_next_token()
decl = self._decl()
if self.cur_token.type != None:
self._error('Unexpected token "{}" (at #{})'.format(
self.cur_token.val, self.cur_token.pos))
return decl
def _error(self, msg):
raise ParseError(msg)
def _get_next_token(self):
"""Advances the parser's internal lexer to the next token.
This method doesn't return anything; it assigns self.cur_token to the
next token in the input stream.
"""
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError as e:
self._error('Lexer error at position {}: {}'.format(e.pos, e))
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched {} (found {})'.format(type,
self.cur_token.type))
def _decl(self):
name = self._match('ID')
argnames = []
# If we have arguments, collect them. Only IDs allowed here.
while self.cur_token.type == 'ID':
argnames.append(self.cur_token.val)
self._get_next_token()
self._match('=')
expr = self._expr()
if len(argnames) > 0:
return ast.Decl(name, ast.LambdaExpr(argnames, expr))
else:
return ast.Decl(name, expr)
def _expr(self):
"""Parse an expr of the form:
expr op expr
We only allow a single operator between expressions. Additional
operators should be nested using parens, e.g. x + (y * z)
"""
node = self._expr_component()
if self.cur_token.type in self.operators:
op = self.cur_token.type
self._get_next_token()
rhs = self._expr_component()
return ast.OpExpr(op, node, rhs)
else:
return node
def _expr_component(self):
"""Parse an expr component (components can be separated by an operator).
"""
curtok = self.cur_token
if self.cur_token.type == 'INT':
self._get_next_token()
return ast.IntConstant(curtok.val)
elif self.cur_token.type in ('FALSE', 'TRUE'):
self._get_next_token()
return ast.BoolConstant(curtok.val)
elif self.cur_token.type == 'ID':
self._get_next_token()
if self.cur_token.type == '(':
# ID followed by '(' is function application
return self._app(curtok.val)
else:
return ast.Identifier(curtok.val)
elif self.cur_token.type == '(':
self._get_next_token()
expr = self._expr()
self._match(')')
return expr
elif self.cur_token.type == 'IF':
return self._ifexpr()
elif self.cur_token.type == 'LAMBDA':
return self._lambda()
else:
self._error("Don't support {} yet".format(curtok.type))
def _ifexpr(self):
self._match('IF')
ifexpr = self._expr()
self._match('THEN')
thenexpr = self._expr()
self._match('ELSE')
elseexpr = self._expr()
return ast.IfExpr(ifexpr, thenexpr, elseexpr)
def _lambda(self):
self._match('LAMBDA')
argnames = []
while self.cur_token.type == 'ID':
argnames.append(self.cur_token.val)
self._get_next_token()
if len(argnames) < 1:
self._error('Expected non-empty argument list for lambda')
self._match('ARROW')
expr = self._expr()
return ast.LambdaExpr(argnames, expr)
def _app(self, name):
self._match('(')
args = []
while self.cur_token.type != ')':
args.append(self._expr())
if self.cur_token.type == ',':
self._get_next_token()
elif self.cur_token.type == ')':
pass # the loop will break
else:
self._error("Unexpected {} in application".format(
self.cur_token.val))
self._match(')')
return ast.AppExpr(ast.Identifier(name), args)
| unlicense | -3,593,171,190,825,320,400 | 31.925234 | 80 | 0.453591 | false |
nsi-iff/nsi_site | apps/news/migrations/0002_auto.py | 1 | 6169 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field project on 'New'
db.delete_table('news_new_project')
# Adding M2M table for field projects_relateds on 'New'
db.create_table('news_new_projects_relateds', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('new', models.ForeignKey(orm['news.new'], null=False)),
('project', models.ForeignKey(orm['projects.project'], null=False))
))
db.create_unique('news_new_projects_relateds', ['new_id', 'project_id'])
def backwards(self, orm):
# Adding M2M table for field project on 'New'
db.create_table('news_new_project', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('new', models.ForeignKey(orm['news.new'], null=False)),
('project', models.ForeignKey(orm['projects.project'], null=False))
))
db.create_unique('news_new_project', ['new_id', 'project_id'])
# Removing M2M table for field projects_relateds on 'New'
db.delete_table('news_new_projects_relateds')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'news.new': {
'Meta': {'object_name': 'New'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'projects_relateds': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['projects.Project']", 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'description': ('django.db.models.fields.TextField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sponsor': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['news']
| mit | 7,173,289,495,667,929,000 | 61.94898 | 182 | 0.556006 | false |
fifengine/fifengine | tests/fife_test/scripts/test.py | 1 | 3991 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2019 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from __future__ import print_function
from builtins import object
import os
class TestManager(object):
def __init__(self, engine, application, settings):
self._engine = engine
self._application = application
self._settings = settings
self._running = None
self._testdir = "tests"
self._tests = []
files = []
for f in os.listdir(self._testdir):
path = os.path.join(self._testdir, f)
if os.path.isfile(path) and os.path.splitext(f)[1] == ".py" and f != "__init__.py":
files.append(os.path.splitext(f)[0])
for f in files:
importtest = self._settings.get("Tests", f, False)
if importtest:
try:
print("Importing test plugin: ", f)
exec("import " + self._testdir + "." + f)
test = eval(self._testdir + "." + f + "." + f + "()")
if isinstance(test, Test) is False:
print(f + " is not an instance of Test!")
else:
self._tests.append(test)
except BaseException as error:
print("Error: ", error)
print("Invalid test: ", f)
else:
print("Not importing test: ", f)
self._settings.set("Tests", f, importtest)
def _getRunningTest(self):
return self._running
def runTest(self, test):
if test in self._tests and not self._running:
self._running = test
self._running.create(self._engine, self._application)
self._running.run()
def stopTest(self):
if self._running:
if self._running.isRunning():
self._running.stop()
self._running.destroy()
self._running = None
def resetTest(self):
if self._running:
if self._running.isRunning():
self._running.stop()
self._running.destroy()
self._running.create(self._engine, self._application)
self._running.run()
def _getTests(self):
return self._tests
def _getTestNameList(self):
namelist = []
for t in self._tests:
namelist.append(t.getName())
return namelist
tests = property(_getTests)
testnames = property(_getTestNameList)
runningtest = property(_getRunningTest)
class Test(object):
""" The base calss for all tests. All tests must override these functions! """
def create(self, engine, application):
raise NotImplementedError("Test has not implemented the init() function!")
def destroy(self):
raise NotImplementedError("Test has not implemented the destroy() function!")
def run(self):
raise NotImplementedError("Test has not implemented the run() function!")
def stop(self):
raise NotImplementedError("Test has not implemented the stop() function!")
def isRunning(self):
raise NotImplementedError("Test has not implemented the isRunning() function!")
def getName(self):
raise NotImplementedError("Test has not implemented the getName() function!")
def getAuthor(self):
return "unknown"
def getDescription(self):
return "none"
def getHelp(self):
return "You're on your own for this one!"
def onConsoleCommand(self, cmd):
return cmd[0] + ": not found."
def pump(self):
pass
| lgpl-2.1 | 1,703,994,664,542,917,000 | 27.105634 | 86 | 0.656978 | false |
Patrick-Cole/pygmi | pygmi/clust/graphtool.py | 1 | 26477 | # -----------------------------------------------------------------------------
# Name: graph_tool.py (part of PyGMI)
#
# Author: Patrick Cole
# E-Mail: pcole@geoscience.org.za
#
# Copyright: (c) 2013 Council for Geoscience
# Licence: GPL-3.0
#
# This file is part of PyGMI
#
# PyGMI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyGMI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Multi-function graphing tool for use with cluster analysis."""
import numpy as np
from PyQt5 import QtWidgets, QtCore
from matplotlib.figure import Figure
from matplotlib import cm
from matplotlib.artist import Artist
from matplotlib.patches import Polygon
from matplotlib.lines import Line2D
from matplotlib.path import Path
from matplotlib.ticker import NullFormatter
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
class GraphHist(FigureCanvasQTAgg):
"""Graph Hist."""
def __init__(self, parent=None):
self.figure = Figure()
super().__init__(self.figure)
self.setParent(parent)
self.nullfmt = NullFormatter()
self.pntxy = None
self.polyi = None
self.axhistx = None
self.axhisty = None
self.axscatter = None
self.histx = None
self.histy = None
self.xcoord = None
self.ycoord = None
self.data = []
self.cindx = [0, 1, 0]
self.cdata = []
self.csp = None
def get_hist(self, bins):
"""
Routine to get the scattergram with histogram overlay.
Parameters
----------
bins : int
Number of bins.
Returns
-------
xymahist : numpy array
Output data.
"""
xyhist = np.zeros((bins + 1, bins + 1))
xxx = self.xcoord.compressed()
yyy = self.ycoord.compressed()
xyhist = np.histogram2d(xxx, yyy, bins + 1)
xymahist = np.ma.masked_equal(xyhist[0], 0)
return xymahist
def get_clust_scat(self, bins, dattmp, ctmp):
"""
Routine to get the scattergram with cluster overlay.
Parameters
----------
bins : int
Number of bins.
dattmp : list
Data.
ctmp : list
Cluster indices.
Returns
-------
xymahist : numpy array
Output data.
"""
clust = np.ma.array(dattmp[ctmp[2] - 1].data.flatten())
clust.mask = np.ma.getmaskarray(self.xcoord)
clust = clust.compressed()
xxx = self.xcoord.compressed()
yyy = self.ycoord.compressed()
xyhist = np.zeros((bins + 1, bins + 1))
xyhist[xxx, yyy] = (clust + 1)
xymahist = np.ma.masked_equal(xyhist, 0)
return xymahist
def init_graph(self):
"""
Initialize the Graph.
Returns
-------
None.
"""
self.figure.clf()
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = bottom + height + 0.02
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
self.axscatter = self.figure.add_axes(rect_scatter, label='s')
self.axhistx = self.figure.add_axes(rect_histx, label='x')
self.axhisty = self.figure.add_axes(rect_histy, label='y')
# Setup the coordinates
self.setup_coords()
# setup 1d histograms
self.setup_hist()
# Compressed eliminates the masked values so that hist
xymahist = self.get_hist(50)
self.axscatter.get_xaxis().set_visible(False)
self.axscatter.get_yaxis().set_visible(False)
self.csp = self.axscatter.imshow(xymahist.T, interpolation='nearest',
cmap=cm.get_cmap('jet'),
aspect='auto')
self.csp.set_clim(xymahist.min(), xymahist.max())
self.csp.changed()
self.figure.canvas.draw()
def polyint(self):
"""
Polygon Interactor routine.
Returns
-------
None.
"""
pntxy = np.transpose([self.xcoord, self.ycoord])
self.polyi = PolygonInteractor(self.axscatter, pntxy)
self.polyi.ishist = True
def setup_coords(self):
"""
Routine to setup the coordinates for the scattergram.
Returns
-------
None.
"""
self.xcoord = self.data[self.cindx[0]].data.flatten()
self.ycoord = self.data[self.cindx[1]].data.flatten()
self.xcoord -= self.xcoord.min()
self.ycoord -= self.ycoord.min()
xptp = self.xcoord.ptp()
yptp = self.ycoord.ptp()
xstep = xptp / 50
ystep = yptp / 50
self.xcoord /= xstep
self.ycoord /= ystep
self.xcoord = self.xcoord.astype(int)
self.ycoord = self.ycoord.astype(int)
def setup_hist(self):
"""
Routine to setup the 1D histograms.
Returns
-------
None.
"""
self.axhistx.xaxis.set_major_formatter(self.nullfmt)
self.axhisty.yaxis.set_major_formatter(self.nullfmt)
self.axhistx.yaxis.set_major_formatter(self.nullfmt)
self.axhisty.xaxis.set_major_formatter(self.nullfmt)
xrng = [self.xcoord.min(), self.xcoord.max()]
yrng = [self.ycoord.min(), self.ycoord.max()]
self.histx = self.axhistx.hist(self.xcoord.compressed(), 50)
self.histy = self.axhisty.hist(self.ycoord.compressed(), 50,
orientation='horizontal')
self.axhistx.set_xlim(xrng)
self.axhisty.set_ylim(yrng[::-1])
def update_graph(self, clearaxis=False):
"""
Draw Routine.
Parameters
----------
clearaxis : bool, optional
True to clear the axis. The default is False.
Returns
-------
None.
"""
if clearaxis is True:
self.axhistx.cla()
self.axhisty.cla()
self.setup_coords()
self.polyi.pntxy = np.array([self.xcoord, self.ycoord]).T
self.setup_hist()
if self.cindx[2] > 0:
xymahist = self.get_clust_scat(50, self.cdata, self.cindx)
else:
xymahist = self.get_hist(50)
if self.csp is None:
return
self.csp.set_data(xymahist.T)
self.csp.set_clim(xymahist.min(), xymahist.max())
self.csp.changed()
self.figure.canvas.draw()
self.polyi.draw_callback()
class GraphMap(FigureCanvasQTAgg):
"""
Graph Map.
Attributes
----------
parent : parent
reference to the parent routine
"""
def __init__(self, parent=None):
self.figure = Figure()
super().__init__(self.figure)
self.setParent(parent)
self.parent = parent
self.polyi = None
self.data = []
self.cdata = []
self.mindx = [0, 0]
self.csp = None
self.subplot = None
def init_graph(self):
"""
Initialize the Graph.
Returns
-------
None.
"""
mtmp = self.mindx
dat = self.data[mtmp[0]]
self.figure.clf()
self.subplot = self.figure.add_subplot(111)
self.subplot.get_xaxis().set_visible(False)
self.subplot.get_yaxis().set_visible(False)
self.csp = self.subplot.imshow(dat.data, cmap=cm.get_cmap('jet'))
self.subplot.figure.colorbar(self.csp)
self.figure.canvas.draw()
def polyint(self):
"""
Polygon Integrator.
Returns
-------
None.
"""
mtmp = self.mindx
dat = self.data[mtmp[0]].data
xtmp = np.arange(dat.shape[1])
ytmp = np.arange(dat.shape[0])
xmesh, ymesh = np.meshgrid(xtmp, ytmp)
xmesh = np.ma.array(xmesh, dtype=float, mask=dat.mask)
ymesh = np.ma.array(ymesh, dtype=float, mask=dat.mask)
xmesh = xmesh.flatten()
ymesh = ymesh.flatten()
xmesh = xmesh.filled(np.nan)
ymesh = ymesh.filled(np.nan)
pntxy = np.transpose([xmesh, ymesh])
self.polyi = PolygonInteractor(self.subplot, pntxy)
self.polyi.ishist = False
def update_graph(self):
"""
Draw routine.
Returns
-------
None.
"""
mtmp = self.mindx
dat = self.data[mtmp[0]]
if mtmp[1] > 0:
cdat = self.cdata[mtmp[1] - 1].data
self.csp.set_data(cdat)
self.csp.set_clim(cdat.min(), cdat.max())
else:
self.csp.set_data(dat.data)
self.csp.set_clim(dat.data.min(), dat.data.max())
self.csp.changed()
self.figure.canvas.draw()
self.polyi.draw_callback()
class PolygonInteractor(QtCore.QObject):
"""Polygon Interactor."""
showverts = True
epsilon = 5 # max pixel distance to count as a vertex hit
polyi_changed = QtCore.pyqtSignal(list)
def __init__(self, axtmp, pntxy):
super().__init__()
self.ax = axtmp
self.poly = Polygon([(1, 1)], animated=True)
self.ax.add_patch(self.poly)
self.canvas = self.poly.figure.canvas
self.poly.set_alpha(0.5)
self.pntxy = pntxy
self.ishist = True
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
xtmp, ytmp = list(zip(*self.poly.xy))
self.line = Line2D(xtmp, ytmp, marker='o', markerfacecolor='r',
color='y', animated=True)
self.ax.add_line(self.line)
self.poly.add_callback(self.poly_changed)
self._ind = None # the active vert
self.canvas.mpl_connect('button_press_event',
self.button_press_callback)
self.canvas.mpl_connect('button_release_event',
self.button_release_callback)
self.canvas.mpl_connect('motion_notify_event',
self.motion_notify_callback)
def draw_callback(self):
"""
Draw callback.
Returns
-------
None.
"""
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
QtWidgets.QApplication.processEvents()
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.update()
def new_poly(self, npoly):
"""
Create new Polygon.
Parameters
----------
npoly : list
New polygon coordinates.
Returns
-------
None.
"""
self.poly.set_xy(npoly)
self.line.set_data(list(zip(*self.poly.xy)))
self.canvas.draw()
self.update_plots()
def poly_changed(self, poly):
"""
Polygon changed.
Parameters
----------
poly : TYPE
DESCRIPTION.
Returns
-------
None.
"""
# this method is called whenever the polygon object is called
# only copy the artist props to the line (except visibility)
vis = self.line.get_visible()
Artist.update_from(self.line, poly)
self.line.set_visible(vis) # don't use the poly visibility state
def get_ind_under_point(self, event):
"""
Get the index of vertex under point if within epsilon tolerance.
Parameters
----------
event : TYPE
DESCRIPTION.
Returns
-------
ind : int or None
Index of vertex under point.
"""
# display coords
xytmp = np.asarray(self.poly.xy)
xyt = self.poly.get_transform().transform(xytmp)
xtt, ytt = xyt[:, 0], xyt[:, 1]
dtt = np.sqrt((xtt - event.x) ** 2 + (ytt - event.y) ** 2)
indseq = np.nonzero(np.equal(dtt, np.amin(dtt)))[0]
ind = indseq[0]
if dtt[ind] >= self.epsilon:
ind = None
return ind
def button_press_callback(self, event):
"""
Button press callback.
Parameters
----------
event : TYPE
DESCRIPTION.
Returns
-------
None.
"""
if event.inaxes is None:
return
if event.button != 1:
return
if self.ax.get_navigate_mode() is not None:
return
self._ind = self.get_ind_under_point(event)
if self._ind is None:
xys = self.poly.get_transform().transform(self.poly.xy)
ptmp = self.poly.get_transform().transform([event.xdata,
event.ydata])
# ptmp = event.x, event.y # display coords
if len(xys) == 1:
self.poly.xy = np.array(
[(event.xdata, event.ydata)] +
[(event.xdata, event.ydata)])
self.line.set_data(list(zip(*self.poly.xy)))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.update()
return
dmin = -1
imin = -1
for i in range(len(xys) - 1):
s0tmp = xys[i]
s1tmp = xys[i + 1]
dtmp = dist_point_to_segment(ptmp, s0tmp, s1tmp)
if dmin == -1:
dmin = dtmp
imin = i
elif dtmp < dmin:
dmin = dtmp
imin = i
i = imin
if np.array_equal(self.poly.xy, np.ones((2, 2))):
self.poly.set_xy([[event.xdata, event.ydata]])
else:
self.poly.xy = np.array(list(self.poly.xy[:i + 1]) +
[(event.xdata, event.ydata)] +
list(self.poly.xy[i + 1:]))
# self.poly.xy = np.array(list(self.poly.xy[:i + 1]) +
# [(event.xdata, event.ydata)] +
# list(self.poly.xy[i + 1:]))
self.line.set_data(list(zip(*self.poly.xy)))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.update()
def button_release_callback(self, event):
"""
Button release callback.
Parameters
----------
event : TYPE
DESCRIPTION.
Returns
-------
None.
"""
if event.button != 1:
return
self._ind = None
self.update_plots()
def update_plots(self):
"""
Update plots.
Returns
-------
None.
"""
polymask = Path(self.poly.xy).contains_points(self.pntxy)
self.polyi_changed.emit(polymask.tolist())
def motion_notify_callback(self, event):
"""
Mouse notify callback.
Parameters
----------
event : TYPE
DESCRIPTION.
Returns
-------
None.
"""
if self._ind is None:
return
if event.inaxes is None:
return
if event.button != 1:
return
xtmp, ytmp = event.xdata, event.ydata
self.poly.xy[self._ind] = xtmp, ytmp
if self._ind == 0:
self.poly.xy[-1] = xtmp, ytmp
self.line.set_data(list(zip(*self.poly.xy)))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.update()
class ScatterPlot(QtWidgets.QDialog):
"""
Main Graph Tool Routine.
Attributes
----------
parent : parent
reference to the parent routine
indata : dictionary
dictionary of input datasets
outdata : dictionary
dictionary of output datasets
"""
def __init__(self, parent=None):
super().__init__(parent)
self.indata = {}
self.outdata = {}
self.parent = parent
self.m1 = 0
self.c = [0, 1, 0]
self.m = [0, 0]
self.dat_tmp = None
if parent is None:
self.showprocesslog = print
else:
self.showprocesslog = parent.showprocesslog
self.map = GraphMap(self)
self.hist = GraphHist(self)
self.cp_dpoly = QtWidgets.QPushButton('Delete Polygon')
self.cp_combo = QtWidgets.QComboBox()
self.cp_combo2 = QtWidgets.QComboBox()
self.cp_combo3 = QtWidgets.QComboBox()
self.map_dpoly = QtWidgets.QPushButton('Delete Polygon')
self.map_combo = QtWidgets.QComboBox()
self.map_combo2 = QtWidgets.QComboBox()
self.setupui()
self.hist.cindx = self.c
self.map.mindx = self.m
def setupui(self):
"""
Set up UI.
Returns
-------
None.
"""
grid_main = QtWidgets.QGridLayout(self)
group_cp = QtWidgets.QGroupBox('Cross Plot Settings')
grid_left = QtWidgets.QGridLayout(group_cp)
group_map = QtWidgets.QGroupBox('Map Settings')
grid_right = QtWidgets.QGridLayout(group_map)
self.setWindowTitle('Graph Window')
lbl_combo_left = QtWidgets.QLabel('X Data Band:')
lbl_combo2_left = QtWidgets.QLabel('Y Data Band:')
lbl_combo3_left = QtWidgets.QLabel('Cluster Overlay:')
lbl_combo_right = QtWidgets.QLabel('Data Band:')
lbl_combo2_right = QtWidgets.QLabel('Cluster Overlay:')
grid_left.addWidget(lbl_combo_left, 0, 0, 1, 1)
grid_left.addWidget(lbl_combo2_left, 1, 0, 1, 1)
grid_left.addWidget(lbl_combo3_left, 2, 0, 1, 1)
grid_left.addWidget(self.cp_dpoly, 0, 2, 1, 1)
grid_left.addWidget(self.cp_combo, 0, 1, 1, 1)
grid_left.addWidget(self.cp_combo2, 1, 1, 1, 1)
grid_left.addWidget(self.cp_combo3, 2, 1, 1, 1)
grid_right.addWidget(lbl_combo_right, 0, 0, 1, 1)
grid_right.addWidget(lbl_combo2_right, 1, 0, 1, 1)
grid_right.addWidget(self.map_dpoly, 0, 2, 1, 1)
grid_right.addWidget(self.map_combo, 0, 1, 1, 1)
grid_right.addWidget(self.map_combo2, 1, 1, 1, 1)
grid_main.addWidget(self.hist, 0, 0, 1, 1)
grid_main.addWidget(self.map, 0, 1, 1, 1)
grid_main.addWidget(group_cp, 1, 0, 1, 1)
grid_main.addWidget(group_map, 1, 1, 1, 1)
self.cp_dpoly.clicked.connect(self.on_cp_dpoly)
self.map_dpoly.clicked.connect(self.on_map_dpoly)
def on_cp_dpoly(self):
"""
On cp dpoly.
Returns
-------
None.
"""
self.hist.polyi.new_poly([[10, 10]])
mtmp = self.map_combo.currentIndex()
mask = self.indata['Raster'][mtmp].data.mask
dattmp = self.map.csp.get_array()
dattmp.mask = mask
self.map.csp.changed()
self.map.figure.canvas.draw()
def on_map_dpoly(self):
"""
On map dpoly.
Returns
-------
None.
"""
self.map.polyi.new_poly([[10, 10]])
dattmp = self.hist.csp.get_array()
dattmp.mask = np.ma.getmaskarray(np.ma.masked_equal(dattmp.data, 0.))
self.hist.csp.changed()
self.hist.figure.canvas.draw()
def on_cp_combo(self):
"""
On cp combo.
Returns
-------
None.
"""
gstmp = self.cp_combo.currentIndex()
if gstmp != self.c[0]:
self.c[0] = gstmp
self.hist.update_graph(clearaxis=True)
self.map.polyi.update_plots()
def on_cp_combo2(self):
"""
On cp combo 2.
Returns
-------
None.
"""
gstmp = self.cp_combo2.currentIndex()
if gstmp != self.c[1]:
self.c[1] = gstmp
self.hist.update_graph(clearaxis=True)
self.map.polyi.update_plots()
def on_cp_combo3(self):
"""
On cp combo 3.
Returns
-------
None.
"""
self.c[2] = self.cp_combo3.currentIndex()
self.hist.update_graph()
self.map.polyi.update_plots()
def on_map_combo(self):
"""
On map combo.
Returns
-------
None.
"""
self.m[0] = self.map_combo.currentIndex()
self.map.update_graph()
self.hist.polyi.update_plots()
def on_map_combo2(self):
"""
On map combo 2.
Returns
-------
None.
"""
self.m[1] = self.map_combo2.currentIndex()
self.map.update_graph()
self.hist.polyi.update_plots()
def settings(self, nodialog=False):
"""
Run.
Returns
-------
bool
True if successful, False otherwise.
"""
if 'Raster' not in self.indata:
self.showprocesslog('Error: You must have a multi-band raster '
'dataset in addition to your cluster analysis'
' results')
return False
self.dat_tmp = self.indata['Raster']
self.map.data = self.indata['Raster']
self.hist.data = self.indata['Raster']
bands = [i.dataid for i in self.indata['Raster']]
self.cp_combo.clear()
self.cp_combo2.clear()
self.map_combo.clear()
self.cp_combo.addItems(bands)
self.cp_combo2.addItems(bands)
self.map_combo.addItems(bands)
self.cp_combo2.setCurrentIndex(1)
self.cp_combo.currentIndexChanged.connect(self.on_cp_combo)
self.cp_combo2.currentIndexChanged.connect(self.on_cp_combo2)
self.map_combo.currentIndexChanged.connect(self.on_map_combo)
cbands = ['Scatter Amplitudes']
mbands = ['None']
if 'Cluster' in self.indata:
self.hist.cdata = self.indata['Cluster']
self.map.cdata = self.indata['Cluster']
cbands += [i.dataid for i in self.indata['Cluster']]
mbands += [i.dataid for i in self.indata['Cluster']]
self.cp_combo3.clear()
self.map_combo2.clear()
self.cp_combo3.addItems(cbands)
self.map_combo2.addItems(mbands)
self.cp_combo3.currentIndexChanged.connect(self.on_cp_combo3)
self.map_combo2.currentIndexChanged.connect(self.on_map_combo2)
self.hist.init_graph()
self.map.init_graph()
self.show()
self.hist.polyint()
self.map.polyint()
self.hist.polyi.polyi_changed.connect(self.update_map)
self.map.polyi.polyi_changed.connect(self.update_hist)
self.hist.update_graph(clearaxis=True)
self.map.update_graph()
return True
def loadproj(self, projdata):
"""
Load project data into class.
Parameters
----------
projdata : dictionary
Project data loaded from JSON project file.
Returns
-------
chk : bool
A check to see if settings was successfully run.
"""
return False
def saveproj(self):
"""
Save project data from class.
Returns
-------
projdata : dictionary
Project data to be saved to JSON project file.
"""
projdata = {}
# projdata['ftype'] = '2D Mean'
return projdata
def update_map(self, polymask):
"""
Update map.
Parameters
----------
polymask : numpy array
Polygon mask.
Returns
-------
None.
"""
if max(polymask) is False:
return
mtmp = self.map_combo.currentIndex()
mask = self.indata['Raster'][mtmp].data.mask
polymask = np.array(polymask)
polymask.shape = mask.shape
polymask = np.logical_or(~polymask, mask)
dattmp = self.map.csp.get_array()
dattmp.mask = polymask
self.map.csp.changed()
self.map.figure.canvas.draw()
def update_hist(self, polymask):
"""
Update histogram.
Parameters
----------
polymask : numpy array
Polygon mask.
Returns
-------
None.
"""
if max(polymask) is False:
return
polymask = np.array(polymask)
dattmp = self.hist.csp.get_array()
atmp = np.array([self.hist.xcoord[polymask],
self.hist.ycoord[polymask]]).T
dattmp.mask = np.ones_like(np.ma.getmaskarray(dattmp))
for i in atmp:
dattmp.mask[i[1], i[0]] = False
self.hist.csp.changed()
self.hist.figure.canvas.draw()
def dist_point_to_segment(p, s0, s1):
"""
Dist point to segment.
Reimplementation of Matplotlib's dist_point_to_segment, after it was
depreciated. Follows http://geomalgorithms.com/a02-_lines.html
Parameters
----------
p : numpy array
Point.
s0 : numpy array
Start of segment.
s1 : numpy array
End of segment.
Returns
-------
numpy array
Distance of point to segment.
"""
p = np.array(p)
s0 = np.array(s0)
s1 = np.array(s1)
v = s1 - s0
w = p - s0
c1 = np.dot(w, v)
if c1 <= 0:
return np.linalg.norm(p - s0)
c2 = np.dot(v, v)
if c2 <= c1:
return np.linalg.norm(p - s1)
b = c1/c2
pb = s0 + b*v
return np.linalg.norm(p - pb)
| gpl-3.0 | 3,424,058,039,443,024,000 | 25.583333 | 79 | 0.526721 | false |
aspose-pdf/Aspose.Pdf-for-Java | Plugins/Aspose_Pdf_Java_for_Python/WorkingWithDocumentObject/__init__.py | 1 | 9774 | __author__ = 'fahadadeel'
import jpype
import re
import datetime
class AddJavascript:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
self.JavascriptAction=jpype.JClass("com.aspose.pdf.JavascriptAction")
def main(self):
# Open a pdf document.
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'Template.pdf'
# Adding JavaScript at Document Level
# Instantiate JavascriptAction with desried JavaScript statement
javaScript = self.JavascriptAction("this.print({bUI:true,bSilent:false,bShrinkToFit:true});");
# Assign JavascriptAction object to desired action of Document
doc.setOpenAction(javaScript)
js=self.JavascriptAction("app.alert('page 2 is opened')")
# Adding JavaScript at Page Level
doc.getPages.get_Item(2)
doc.getActions().setOnOpen(js())
doc.getPages().get_Item(2).getActions().setOnClose(self.JavascriptAction("app.alert('page 2 is closed')"))
# Save PDF Document
doc.save(self.dataDir + "JavaScript-Added.pdf")
print "Added JavaScript Successfully, please check the output file."
class AddToc:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
self.TocInfo=jpype.JClass("com.aspose.pdf.TocInfo")
self.TextFragment=jpype.JClass("com.aspose.pdf.TextFragment")
self.TextSegment=jpype.JClass("com.aspose.pdf.TextSegment")
self.Heading=jpype.JClass("com.aspose.pdf.Heading")
def main(self):
# Open a pdf document.
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Get access to first page of PDF file
toc_page = doc.getPages().insert(1)
# Create object to represent TOC information
toc_info = self.TocInfo()
title = self.TextFragment("Table Of Contents")
title.getTextState().setFontSize(20)
# Set the title for TOC
toc_info.setTitle(title)
toc_page.setTocInfo(toc_info)
# Create string objects which will be used as TOC elements
titles = ["First page", "Second page"]
i = 0;
while (i < 2):
# Create Heading object
heading2 = self.Heading(1);
segment2 = self.TextSegment
heading2.setTocPage(toc_page)
heading2.getSegments().add(segment2)
# Specify the destination page for heading object
heading2.setDestinationPage(doc.getPages().get_Item(i + 2))
# Destination page
heading2.setTop(doc.getPages().get_Item(i + 2).getRect().getHeight())
# Destination coordinate
segment2.setText(titles[i])
# Add heading to page containing TOC
toc_page.getParagraphs().add(heading2)
i +=1;
# Save PDF Document
doc.save(self.dataDir + "TOC.pdf")
print "Added TOC Successfully, please check the output file."
class GetDocumentWindow:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Get different document properties
# Position of document's window - Default: false
print "CenterWindow :- " + str(doc.getCenterWindow())
# Predominant reading order; determine the position of page
# when displayed side by side - Default: L2R
print "Direction :- " + str(doc.getDirection())
# Whether window's title bar should display document title.
# If false, title bar displays PDF file name - Default: false
print "DisplayDocTitle :- " + str(doc.getDisplayDocTitle())
#Whether to resize the document's window to fit the size of
#first displayed page - Default: false
print "FitWindow :- " + str(doc.getFitWindow())
# Whether to hide menu bar of the viewer application - Default: false
print "HideMenuBar :-" + str(doc.getHideMenubar())
# Whether to hide tool bar of the viewer application - Default: false
print "HideToolBar :-" + str(doc.getHideToolBar())
# Whether to hide UI elements like scroll bars
# and leaving only the page contents displayed - Default: false
print "HideWindowUI :-" + str(doc.getHideWindowUI())
# The document's page mode. How to display document on exiting full-screen mode.
print "NonFullScreenPageMode :-" + str(doc.getNonFullScreenPageMode())
# The page layout i.e. single page, one column
print "PageLayout :-" + str(doc.getPageLayout())
#How the document should display when opened.
print "pageMode :-" + str(doc.getPageMode())
class GetPdfFileInfo:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Get document information
doc_info = doc.getInfo();
# Show document information
print "Author:-" + str(doc_info.getAuthor())
print "Creation Date:-" + str(doc_info.getCreationDate())
print "Keywords:-" + str(doc_info.getKeywords())
print "Modify Date:-" + str(doc_info.getModDate())
print "Subject:-" + str(doc_info.getSubject())
print "Title:-" + str(doc_info.getTitle())
class GetXMPMetadata:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Get properties
print "xmp:CreateDate: " + str(doc.getMetadata().get_Item("xmp:CreateDate"))
print "xmp:Nickname: " + str(doc.getMetadata().get_Item("xmp:Nickname"))
print "xmp:CustomProperty: " + str(doc.getMetadata().get_Item("xmp:CustomProperty"))
class Optimize:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
# self.OptimizationOptions=jpype.JClass("com.aspose.pdf.Document.OptimizationOptions")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Optimize for web
doc.optimize();
#Save output document
doc.save(self.dataDir + "Optimized_Web.pdf")
print "Optimized PDF for the Web, please check output file."
class RemoveMetadata:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
if (re.findall('/pdfaid:part/',doc.getMetadata())):
doc.getMetadata().removeItem("pdfaid:part")
if (re.findall('/dc:format/',doc.getMetadata())):
doc.getMetadata().removeItem("dc:format")
# save update document with new information
doc.save(self.dataDir + "Remove_Metadata.pdf")
print "Removed metadata successfully, please check output file."
class SetExpiration:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
self.JavascriptAction=jpype.JClass("com.aspose.pdf.JavascriptAction")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
javascript = self.JavascriptAction(
"var year=2014; var month=4;today = new Date();today = new Date(today.getFullYear(), today.getMonth());expiry = new Date(year, month);if (today.getTime() > expiry.getTime())app.alert('The file is expired. You need a new one.');");
doc.setOpenAction(javascript);
# save update document with new information
doc.save(self.dataDir + "set_expiration.pdf");
print "Update document information, please check output file."
class SetPdfFileInfo:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Get document information
doc_info = doc.getInfo();
doc_info.setAuthor("Aspose.Pdf for java");
doc_info.setCreationDate(datetime.today.strftime("%m/%d/%Y"));
doc_info.setKeywords("Aspose.Pdf, DOM, API");
doc_info.setModDate(datetime.today.strftime("%m/%d/%Y"));
doc_info.setSubject("PDF Information");
doc_info.setTitle("Setting PDF Document Information");
# save update document with new information
doc.save(self.dataDir + "Updated_Information.pdf")
print "Update document information, please check output file."
| mit | -4,455,166,005,678,652,000 | 32.907143 | 246 | 0.593513 | false |
andr3wmac/metaTower | mt/EventManager.py | 1 | 1710 | """
* metaTower v0.4.5
* http://www.metatower.com
*
* Copyright 2012, Andrew Mac
* http://www.andrewmac.ca
* Licensed under GPL v3.
* See license.txt
* or http://www.metatower.com/license.txt
"""
import mt, inspect
class EventManager:
class EventItem:
def __init__(self, event, function, source):
self.event = event
self.function = function
self.source = source
def __init__(self):
self.events = []
def register(self, event, function):
source = mt.utils.getSource()
newEvent = self.EventItem(event, function, source)
self.events.append(newEvent)
def clear(self, function = None):
if ( function == None ): self.events = []
else:
new_list = []
for e in self.events:
if e.function != function: new_list.append(e)
self.events = new_list
def clearSource(self, source):
new_list = []
for e in self.events:
if e.source != source: new_list.append(e)
self.events = new_list
def trigger(self, event, *args):
#print "Triggering event: " + event + " with " + str(len(args)) + " arg(s)"
result = None
for e in self.events:
if e.event == event:
arg_count = len(inspect.getargspec(e.function).args)
if ( arg_count == 0 ) : result = e.function()
if ( arg_count > 0 ):
if ( arg_count == len(args) ):
result = e.function(*args)
if ( arg_count < len(args) ):
result = e.function(*args[:(arg_count-len(args))])
return result
| gpl-3.0 | -5,570,807,352,983,404,000 | 30.090909 | 83 | 0.523392 | false |
andreagrandi/workshopvenues | workshopvenues/venues/migrations/0007_auto__add_country__chg_field_address_country__add_index_address_countr.py | 1 | 3797 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Country'
db.create_table(u'venues_country', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=30)),
))
db.send_create_signal(u'venues', ['Country'])
# Deleting field 'Address.country'
db.delete_column(u'venues_address', 'country')
def backwards(self, orm):
# Deleting model 'Country'
db.delete_table(u'venues_country')
# Adding field 'Address.country'
db.add_column(u'venues_address', 'country',
self.gf('django.db.models.fields.CharField')(default='', max_length=30, blank=True),
keep_default=False)
models = {
u'venues.address': {
'Meta': {'object_name': 'Address'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.country': {
'Meta': {'object_name': 'Country'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.image': {
'Meta': {'object_name': 'Image'},
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Venue']"})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Address']"}),
'capacity': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contact_twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'cost': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['venues'] | bsd-3-clause | -1,636,155,754,172,928,000 | 50.324324 | 141 | 0.54148 | false |
bswartz/cinder | cinder/volume/drivers/netapp/dataontap/fc_cmode.py | 1 | 5282 | # Copyright (c) - 2014, Clinton Knight. All rights reserved.
# Copyright (c) - 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp Data ONTAP (C-mode) FibreChannel storage systems.
"""
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.zonemanager import utils as fczm_utils
@interface.volumedriver
class NetAppCmodeFibreChannelDriver(driver.BaseVD,
driver.ConsistencyGroupVD,
driver.ManageableVD,
driver.ExtendVD,
driver.TransferVD,
driver.SnapshotVD):
"""NetApp C-mode FibreChannel volume driver."""
DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct'
def __init__(self, *args, **kwargs):
super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs)
self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh,
self.get_filter_function(),
self.get_goodness_function())
def get_default_filter_function(self):
return self.library.get_default_filter_function()
def get_default_goodness_function(self):
return self.library.get_default_goodness_function()
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume, connector):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
return self.library.initialize_connection_fc(volume, connector)
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
return self.library.terminate_connection_fc(volume, connector,
**kwargs)
def get_pool(self, volume):
return self.library.get_pool(volume)
def create_consistencygroup(self, context, group):
return self.library.create_consistencygroup(group)
def delete_consistencygroup(self, context, group, volumes):
return self.library.delete_consistencygroup(group, volumes)
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
return self.library.update_consistencygroup(group, add_volumes=None,
remove_volumes=None)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.create_cgsnapshot(cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.delete_cgsnapshot(cgsnapshot, snapshots)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
return self.library.create_consistencygroup_from_src(
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
source_cg=source_cg, source_vols=source_vols)
| apache-2.0 | -1,633,660,446,188,067,300 | 39.320611 | 78 | 0.65373 | false |
cloudbrain/cloudbrain_examples | sandbox/print_data.py | 1 | 1223 | import time
from cloudbrain.subscribers.rabbitmq import PikaSubscriber
from cloudbrain_examples.settings import (base_routing_key, metric_name, num_channels, buffer_size,
rabbitmq_address, rabbitmq_user, rabbitmq_pwd)
def _print_callback(unsed_ch, unsed_method, unsed_properties, body):
print "==> %s" % body
def main():
# Setup the subscriber
subscriber = PikaSubscriber(base_routing_key=base_routing_key,
rabbitmq_address=rabbitmq_address,
rabbitmq_user=rabbitmq_user,
rabbitmq_pwd=rabbitmq_pwd)
subscriber.connect()
subscriber.register(metric_name, num_channels)
time.sleep(1) # Leave it some time to register
# Get one message at a time
one_message = subscriber.get_one_message(metric_name)
print "\n==> Got one message: %s\n" % one_message
time.sleep(2) # Give people time to read the message
# Get message continuously
print "==> Subscribing ..."
try:
subscriber.subscribe(metric_name, _print_callback)
except KeyboardInterrupt:
subscriber.disconnect()
if __name__ == '__main__':
main()
| agpl-3.0 | -5,175,418,729,232,379,000 | 28.829268 | 99 | 0.623876 | false |
dazzzl/transwhat | transwhat.py | 1 | 3061 | #!/usr/bin/python
__author__ = "Steffen Vogel"
__copyright__ = "Copyright 2015, Steffen Vogel"
__license__ = "GPLv3"
__maintainer__ = "Steffen Vogel"
__email__ = "post@steffenvogel.de"
"""
This file is part of transWhat
transWhat is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
transwhat is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with transWhat. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import traceback
import logging
import asyncore
import sys, os
import e4u
import Queue
import threadutils
sys.path.insert(0, os.getcwd())
from Spectrum2.iochannel import IOChannel
from config import SpectrumConfig
from whatsappbackend import WhatsAppBackend
from yowsup.common import YowConstants
from yowsup.stacks import YowStack
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--log', type=str)
parser.add_argument('--host', type=str, required=True)
parser.add_argument('--port', type=int, required=True)
parser.add_argument('--service.backend_id', metavar="ID", type=int, required=True)
parser.add_argument('config', type=str)
parser.add_argument('-j', type=str, required=True)
args, unknown = parser.parse_known_args()
YowConstants.PATH_STORAGE='/var/lib/spectrum2/' + args.j
if args.log is None:
args.log = '/var/log/spectrum2/' + args.j + '/backends/backend.log'
# Logging
logging.basicConfig(
filename=args.log,
format = "%(asctime)-15s %(levelname)s %(name)s: %(message)s",
level = logging.DEBUG if args.debug else logging.INFO
)
if args.config is not None:
specConf = SpectrumConfig(args.config)
else:
specConf = None
# Handler
def handleTransportData(data):
try:
plugin.handleDataRead(data)
except SystemExit as e:
raise e
except:
logger = logging.getLogger('transwhat')
logger.error(traceback.format_exc())
e4u.load()
closed = False
def connectionClosed():
global closed
closed = True
# Main
io = IOChannel(args.host, args.port, handleTransportData, connectionClosed)
plugin = WhatsAppBackend(io, args.j, specConf)
plugin.handleBackendConfig({
'features': [
('send_buddies_on_login', 1),
('muc', 'true'),
],
})
while True:
try:
asyncore.loop(timeout=1.0, count=10, use_poll = True)
try:
callback = YowStack._YowStack__detachedQueue.get(False) #doesn't block
callback()
except Queue.Empty:
pass
else:
break
if closed:
break
while True:
try:
callback = threadutils.eventQueue.get_nowait()
except Queue.Empty:
break
else:
callback()
except SystemExit:
break
except:
logger = logging.getLogger('transwhat')
logger.error(traceback.format_exc())
| gpl-3.0 | 6,710,417,840,655,057,000 | 23.488 | 82 | 0.733747 | false |
GoogleCloudPlatform/Solutions-Using-ETL-tool-on-Google-Compute-Engine | gce_api_test.py | 1 | 12371 | #!/usr/bin/python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests of gce_api.py."""
import unittest
import apiclient.discovery
import apiclient.errors
import mock
from mock import MagicMock
from mock import PropertyMock
import oauth2client.client
import oauth2client.file
import oauth2client.tools
from gce_api import GceApi
class GceApiTest(unittest.TestCase):
"""Unit test class of GceApi."""
def setUp(self):
self.gce_api = GceApi('gce_api_test', 'CLIENT_ID', 'CLIENT_SECRET',
'project-name', 'zone-name')
def tearDown(self):
mock.patch.stopall()
def _MockGoogleClientApi(self, credentials_validity=True):
"""Sets up mocks for Google Client API library.
Args:
credentials_validity: Type/validity of locally cached credentials.
None for no local credentials, False for invalid local credentials,
True for valid local credentials.
Returns:
Dictionary that holds mocks created.
"""
mock_local_credentials = MagicMock(
spec=oauth2client.client.Credentials, name='Mock Credentials')
mock_http_local = MagicMock(name='HTTP authorized by local credentials')
mock_local_credentials.authorize.return_value = mock_http_local
mock_new_credentials = MagicMock(
spec=oauth2client.client.Credentials, name='Mock Credentials')
mock_http_new = MagicMock(name='HTTP authorized by new credentials')
mock_new_credentials.authorize.return_value = mock_http_new
mock_api = MagicMock(name='Google Client API')
mock_storage_class = mock.patch('oauth2client.file.Storage').start()
mock_flow_class = mock.patch('gce_api.OAuth2WebServerFlow').start()
mock.patch('oauth2client.tools.run',
return_value=mock_new_credentials).start()
mock.patch('apiclient.discovery.build', return_value=mock_api).start()
mock.patch('httplib2.Http').start()
mock_storage = mock_storage_class.return_value
if credentials_validity is None:
mock_storage.get.return_value = None
else:
mock_storage.get.return_value = mock_local_credentials
mock_local_credentials.invalid = not credentials_validity
mock_flow = mock_flow_class.return_value
apiclient.discovery.build = MagicMock(return_value=mock_api)
return {'api': mock_api,
'storage_class': mock_storage_class,
'storage': mock_storage,
'flow_class': mock_flow_class,
'flow': mock_flow,
'local_credentials': mock_local_credentials,
'http_authorized_by_local_credentials': mock_http_local,
'new_credentials': mock_new_credentials,
'http_authorized_by_new_credentials': mock_http_new}
def testGetApi_CachedCredentials(self):
"""Unit test of GetApi(). Local credentials are valid."""
my_mocks = self._MockGoogleClientApi()
api = self.gce_api.GetApi()
self.assertEqual(my_mocks['api'], api)
self.assertEqual(1, my_mocks['storage_class'].call_count)
# When cached credentials are valid, OAuth2 dance won't happen.
self.assertFalse(my_mocks['flow_class'].called)
self.assertFalse(oauth2client.tools.run.called)
self.assertEqual(1, my_mocks['local_credentials'].authorize.call_count)
apiclient.discovery.build.assert_called_once_with(
'compute', mock.ANY,
http=my_mocks['http_authorized_by_local_credentials'])
self.assertRegexpMatches(
apiclient.discovery.build.call_args[0][1], '^v\\d')
def testGetApi_InvalidCachedCredentials(self):
"""Unit test of GetApi(). Local credentials are invalid."""
my_mocks = self._MockGoogleClientApi(False)
api = self.gce_api.GetApi()
self.assertEqual(my_mocks['api'], api)
self.assertEqual(1, my_mocks['storage_class'].call_count)
self.assertTrue(my_mocks['flow_class'].called)
oauth2client.tools.run.assert_called_once_with(
my_mocks['flow'], my_mocks['storage'])
# New credentials are used.
self.assertEqual(1, my_mocks['new_credentials'].authorize.call_count)
apiclient.discovery.build.assert_called_once_with(
'compute', mock.ANY,
http=my_mocks['http_authorized_by_new_credentials'])
self.assertRegexpMatches(
apiclient.discovery.build.call_args[0][1], '^v\\d')
def testGetApi_NoCachedCredentials(self):
"""Unit test of GetApi(). Local credentials are invalid."""
my_mocks = self._MockGoogleClientApi(None)
api = self.gce_api.GetApi()
self.assertEqual(my_mocks['api'], api)
self.assertEqual(1, my_mocks['storage_class'].call_count)
self.assertTrue(my_mocks['flow_class'].called)
oauth2client.tools.run.assert_called_once_with(
my_mocks['flow'], my_mocks['storage'])
# New credentials are used.
self.assertEqual(1, my_mocks['new_credentials'].authorize.call_count)
apiclient.discovery.build.assert_called_once_with(
'compute', mock.ANY,
http=my_mocks['http_authorized_by_new_credentials'])
self.assertRegexpMatches(
apiclient.discovery.build.call_args[0][1], '^v\\d')
def testGetInstance(self):
"""Unit test of GetInstance()."""
mock_api = MagicMock(name='Mock Google Client API')
self.gce_api.GetApi = MagicMock(return_value=mock_api)
instance_info = self.gce_api.GetInstance('instance-name')
self.assertEqual(1, self.gce_api.GetApi.call_count)
mock_api.instances.return_value.get.assert_called_once_with(
project='project-name', zone='zone-name', instance='instance-name')
(mock_api.instances.return_value.get.return_value.execute.
assert_called_once_with())
self.assertEqual(mock_api.instances.return_value.get.return_value.
execute.return_value,
instance_info)
def testListInstance_NoFilter(self):
"""Unit test of ListInstance() without filter string."""
mock_api = MagicMock(name='Mock Google Client API')
self.gce_api.GetApi = MagicMock(return_value=mock_api)
mock_api.instances.return_value.list.return_value.execute.return_value = {
'items': ['dummy', 'list']
}
instance_list = self.gce_api.ListInstances()
self.assertEqual(1, self.gce_api.GetApi.call_count)
mock_api.instances.return_value.list.assert_called_once_with(
project='project-name', zone='zone-name', filter=None)
(mock_api.instances.return_value.list.return_value.execute.
assert_called_once_with())
self.assertEqual(['dummy', 'list'], instance_list)
def testListInstance_Filter(self):
"""Unit test of ListInstance() with filter string."""
mock_api = MagicMock(name='Mock Google Client API')
self.gce_api.GetApi = MagicMock(return_value=mock_api)
mock_api.instances.return_value.list.return_value.execute.return_value = {
'items': ['dummy', 'list']
}
instance_list = self.gce_api.ListInstances('filter condition')
self.assertEqual(1, self.gce_api.GetApi.call_count)
mock_api.instances.return_value.list.assert_called_once_with(
project='project-name', zone='zone-name', filter='filter condition')
(mock_api.instances.return_value.list.return_value.execute.
assert_called_once_with())
self.assertEqual(['dummy', 'list'], instance_list)
def testCreateInstance_Success(self):
"""Unit test of CreateInstance() with success result."""
mock_api = MagicMock(name='Mock Google Client API')
self.gce_api.GetApi = MagicMock(return_value=mock_api)
mock_api.instances.return_value.insert.return_value.execute.return_value = {
'name': 'instance-name'
}
self.assertTrue(self.gce_api.CreateInstance(
'instance-name', 'machine-type', 'image-name'))
self.assertEqual(1, self.gce_api.GetApi.call_count)
mock_api.instances.return_value.insert.assert_called_once_with(
project='project-name', zone='zone-name', body=mock.ANY)
(mock_api.instances.return_value.insert.return_value.execute.
assert_called_once_with())
def testCreateInstance_SuccessWithWarning(self):
"""Unit test of CreateInstance() with warning."""
mock_api = MagicMock(name='Mock Google Client API')
self.gce_api.GetApi = MagicMock(return_value=mock_api)
mock_api.instances.return_value.insert.return_value.execute.return_value = {
'name': 'instance-name',
'warnings': [
{
'code': 'some warning code',
'message': 'some warning message'
}
]
}
self.assertTrue(self.gce_api.CreateInstance(
'instance-name', 'machine-type', 'image-name'))
self.assertEqual(1, self.gce_api.GetApi.call_count)
mock_api.instances.return_value.insert.assert_called_once_with(
project='project-name', zone='zone-name', body=mock.ANY)
(mock_api.instances.return_value.insert.return_value.execute.
assert_called_once_with())
def testCreateInstance_Error(self):
"""Unit test of CreateInstance() with error."""
mock_api = MagicMock(name='Mock Google Client API')
self.gce_api.GetApi = MagicMock(return_value=mock_api)
mock_api.instances.return_value.insert.return_value.execute.return_value = {
'name': 'instance-name',
'error': {
'errors': [
{
'code': 'some error code',
'message': 'some error message'
}
]
}
}
# CreateInstance() returns False.
self.assertFalse(self.gce_api.CreateInstance(
'instance-name', 'machine-type', 'image-name'))
self.assertEqual(1, self.gce_api.GetApi.call_count)
mock_api.instances.return_value.insert.assert_called_once_with(
project='project-name', zone='zone-name', body=mock.ANY)
(mock_api.instances.return_value.insert.return_value.execute.
assert_called_once_with())
def testDeleteInstance(self):
"""Unit test of DeleteInstance()."""
mock_api = MagicMock(name='Mock Google Client API')
self.gce_api.GetApi = MagicMock(return_value=mock_api)
mock_api.instances.return_value.insert.return_value.execute.return_value = {
'name': 'instance-name'
}
self.assertTrue(self.gce_api.DeleteInstance('instance-name'))
self.assertEqual(1, self.gce_api.GetApi.call_count)
mock_api.instances.return_value.delete.assert_called_once_with(
project='project-name', zone='zone-name', instance='instance-name')
(mock_api.instances.return_value.delete.return_value.execute.
assert_called_once_with())
def testCreateFirewall_created(self):
"""Unit test of CreateFirewall() where a firewall is created."""
mock_api = MagicMock(name='Mock Google Client API')
self.gce_api.GetApi = MagicMock(return_value=mock_api)
property_mock = PropertyMock(side_effect=apiclient.errors.HttpError
('resp', 'content'))
mock_api.firewalls.return_value.get = property_mock
self.assertTrue(self.gce_api.CreateFirewall(
'firewall-name', [{'IPProtocol': 'tcp'}]))
mock_api.firewalls.return_value.insert.assert_called_once_with(
project='project-name', body=mock.ANY)
(mock_api.firewalls.return_value.insert.return_value.execute.
assert_called_once_with())
def testCreateFirewall_notCreated(self):
"""Unit test of CreateFirewall() with no firewall created."""
mock_api = MagicMock(name='Mock Google Client API')
self.gce_api.GetApi = MagicMock(return_value=mock_api)
mock_api.firewalls.return_value.get.return_value.execute.return_value = {
'name': 'firewall-name'
}
self.assertTrue(self.gce_api.CreateFirewall(
'firewall-name', [{'IPProtocol': 'tcp'}]))
# Make sure firewall insert is not called
self.assertFalse(mock_api.firewalls.return_value.insert.called)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -1,525,027,034,134,814,000 | 38.273016 | 80 | 0.681432 | false |
Programie/Capture2Net | webinterface/generate.py | 1 | 4933 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2008 - 2012 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Thomas Herchenroeder (thron7)
#
################################################################################
##
# This is a stub proxy for the real generator.py
##
import sys, os, re, subprocess, codecs, optparse
CMD_PYTHON = sys.executable
QOOXDOO_PATH = '../../..'
QX_PYLIB = "tool/pylib"
##
# A derived OptionParser class that ignores unknown options (The parent
# class raises in those cases, and stops further processing).
# We need this, as we are only interested in -c/--config on this level, and
# want to ignore pot. other options.
#
class MyOptionParser(optparse.OptionParser):
##
# <rargs> is the raw argument list. The original _process_args mutates
# rargs, processing options into <values> and copying interspersed args
# into <largs>. This overridden version ignores unknown or ambiguous
# options.
def _process_args(self, largs, rargs, values):
while rargs:
try:
optparse.OptionParser._process_args(self, largs, rargs, values)
except (optparse.BadOptionError, optparse.AmbiguousOptionError):
pass
def parseArgs():
parser = MyOptionParser()
parser.add_option(
"-c", "--config", dest="config", metavar="CFGFILE",
default="config.json", help="path to configuration file"
)
parser.add_option(
"-v", "--verbose", dest="verbose", action="store_true",
default=False, help="run in verbose mode"
)
(options, args) = parser.parse_args(sys.argv[1:])
return options, args
ShellOptions, ShellArgs = parseArgs()
# this is from misc.json, duplicated for decoupling
_eolComment = re.compile(r'(?<![a-zA-Z]:)//.*$', re.M) # double $ for string.Template
_mulComment = re.compile(r'/\*.*?\*/', re.S)
def stripComments(s):
b = _eolComment.sub('',s)
b = _mulComment.sub('',b)
return b
def getQxPath():
path = QOOXDOO_PATH
# OS env takes precedence
if os.environ.has_key("QOOXDOO_PATH"):
path = os.environ["QOOXDOO_PATH"]
# else use QOOXDOO_PATH from config.json
else:
config_file = ShellOptions.config
if os.path.exists(config_file):
# try json parsing with qx json
if not path.startswith('${'): # template macro has been resolved
sys.path.insert(0, os.path.join(path, QX_PYLIB))
try:
from misc import json
got_json = True
except:
got_json = False
got_path = False
if got_json:
config_str = codecs.open(config_file, "r", "utf-8").read()
#config_str = stripComments(config_str) # not necessary under demjson
config = json.loads(config_str)
p = config.get("let")
if p:
p = p.get("QOOXDOO_PATH")
if p:
path = p
got_path = True
# regex parsing - error prone
if not got_path:
qpathr=re.compile(r'"QOOXDOO_PATH"\s*:\s*"([^"]*)"\s*,?')
conffile = codecs.open(config_file, "r", "utf-8")
aconffile = conffile.readlines()
for line in aconffile:
mo = qpathr.search(line)
if mo:
path = mo.group(1)
break # assume first occurrence is ok
path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), path))
return path
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) # switch to skeleton dir
qxpath = getQxPath()
REAL_GENERATOR = os.path.join(qxpath, 'tool', 'bin', 'generator.py')
if not os.path.exists(REAL_GENERATOR):
print "Cannot find real generator script under: \"%s\"; aborting" % REAL_GENERATOR
sys.exit(1)
elif ShellOptions.verbose:
print "\nInvoking real generator under %s ..." % REAL_GENERATOR
argList = []
argList.append(CMD_PYTHON)
argList.append(REAL_GENERATOR)
argList.extend(sys.argv[1:])
if sys.platform == "win32":
argList1=[]
for arg in argList:
if arg.find(' ')>-1:
argList1.append('"%s"' % arg)
else:
argList1.append(arg)
argList = argList1
else:
argList = ['"%s"' % x for x in argList] # quote argv elements
cmd = " ".join(argList)
retval = subprocess.call(cmd, shell=True)
sys.exit(retval)
| mit | 4,677,451,899,272,812,000 | 32.107383 | 94 | 0.569836 | false |
rokuz/pygeom | vec2.py | 1 | 4197 | import math
import copy
import geom_exceptions
import functions
import vec2_gen
class Vec2(vec2_gen.GenVec2):
"""2D Vector."""
def __init__(self, x=0.0, y=0.0):
vec2_gen.GenVec2.__init__(self, x, y)
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise ValueError("Integer key in the range [0;1] required")
def __setitem__(self, key, value):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise ValueError("Integer key in the range [0;1] required")
def __len__(self):
return 2
def __str__(self):
return 'Vec2({}; {})'.format(self.x, self.y)
def __copy__(self):
return Vec2(self.x, self.y)
def __deepcopy__(self, memodict={}):
return Vec2(self.x, self.y)
def __add__(self, other):
return Vec2(self.x + other[0], self.y + other[1])
def __iadd__(self, other):
self.x += other[0]
self.y += other[1]
return self
def __sub__(self, other):
return Vec2(self.x - other[0], self.y - other[1])
def __isub__(self, other):
self.x -= other[0]
self.y -= other[1]
return self
def __mul__(self, scalar):
return Vec2(self.x * scalar, self.y * scalar)
def __imul__(self, scalar):
self.x *= scalar
self.y *= scalar
return self
def __div__(self, scalar):
return Vec2(self.x / scalar, self.y / scalar)
def __truediv__(self, scalar):
return Vec2(self.x / scalar, self.y / scalar)
def __idiv__(self, scalar):
self.x /= scalar
self.y /= scalar
return self
def __itruediv__(self, scalar):
self.x /= scalar
self.y /= scalar
return self
def __neg__(self):
return Vec2(-self.x, -self.y)
def __eq__(self, other):
return functions.almost_equal(self.x, other[0]) and functions.almost_equal(self.y, other[1])
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if functions.almost_equal(self.x, other[0]):
return self.y < other[1]
return self.x < other[0]
def __gt__(self, other):
if functions.almost_equal(self.x, other[0]):
return self.y > other[1]
return self.x > other[0]
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def length_squared(self):
"""Calculates squared length of a vector."""
return self.x * self.x + self.y * self.y
def length(self):
"""Calculates length of a vector."""
return math.sqrt(self.length_squared())
def normalize(self):
"""Performs vector normalization. Raises VectorException in case of zero length."""
ls = self.length_squared()
if ls == 0.0:
raise geom_exceptions.VectorException("Zero-length normalization")
l = math.sqrt(ls)
self.x /= l
self.y /= l
def get_normalized(self):
"""Returns normalized copy of a vector. Raises VectorException in case of zero length."""
c = copy.copy(self)
c.normalize()
return c
def dot(self, v2):
"""Calculated dot product of current vector and vector v2."""
return self.x * v2[0] + self.y * v2[1]
def cross(self, v2):
"""Calculates cross product. It's a scalar which absolute value equals to
square of a parallelogram constructed on the current vector and vector v2.
The sign tells either v2 is on the left side (positive value) of the current
vector or on the right side (negative value)."""
return self.x * v2[1] - self.y * v2[0]
@property
def left_normal(self):
"""Calculates left normal vector to the current vector."""
return Vec2(-self.y, self.x)
@property
def right_normal(self):
"""Calculates right normal vector to the current vector."""
return Vec2(self.y, -self.x)
| mit | 6,090,678,753,408,154,000 | 27.358108 | 100 | 0.554205 | false |
HaebinShin/tensorflow | tensorflow/python/kernel_tests/seq2seq_test.py | 1 | 31311 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functional style sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import numpy as np
import tensorflow as tf
class Seq2SeqTest(tf.test.TestCase):
def testRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
_, enc_state = tf.nn.rnn(
tf.nn.rnn_cell.GRUCell(2), inp, dtype=tf.float32)
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.rnn_decoder(dec_inp, enc_state, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testBasicRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
_, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec, mem = tf.nn.seq2seq.embedding_rnn_decoder(
dec_inp, enc_state, cell, num_symbols=4, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
def testEmbeddingRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with tf.variable_scope("no_tuple"):
cell1 = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell1, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testEmbeddingTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test when num_decoder_symbols is provided, the size of decoder output
# is num_decoder_symbols.
with tf.variable_scope("decoder_symbols_seq2seq"):
dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, num_decoder_symbols=3,
embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2])] * 3
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoder2(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4,
num_heads=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.GRUCell(2)
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec, mem = tf.nn.seq2seq.embedding_attention_decoder(
dec_inp, enc_state, attn_states, cell, num_symbols=4,
embedding_size=2, output_size=3)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with tf.variable_scope("no_tuple"):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testOne2ManyRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp_dict = {}
dec_inp_dict["0"] = [
tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec_inp_dict["1"] = [
tf.constant(i, tf.int32, shape=[2]) for i in range(4)]
dec_symbols_dict = {"0": 5, "1": 6}
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
outputs_dict, state_dict = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(outputs_dict["0"])
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run(outputs_dict["1"])
self.assertEqual(4, len(res))
self.assertEqual((2, 6), res[0].shape)
res = sess.run([state_dict["0"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
res = sess.run([state_dict["1"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test that previous-feeding model ignores inputs after the first, i.e.
# dec_inp_dict2 has different inputs from dec_inp_dict after the first
# time-step.
dec_inp_dict2 = {}
dec_inp_dict2["0"] = [
tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
dec_inp_dict2["1"] = [
tf.constant(0, tf.int32, shape=[2]) for _ in range(4)]
with tf.variable_scope("other"):
outputs_dict3, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
outputs_dict1, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=True)
outputs_dict2, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=True)
res1 = sess.run(outputs_dict1["0"])
res2 = sess.run(outputs_dict2["0"])
res3 = sess.run(outputs_dict3["0"])
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testSequenceLoss(self):
with self.test_session() as sess:
logits = [tf.constant(i + 0.5, shape=[2, 5]) for i in range(3)]
targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
weights = [tf.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=True,
average_across_batch=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(1.60944, res)
average_loss_per_sequence = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=False,
average_across_batch=True)
res = sess.run(average_loss_per_sequence)
self.assertAllClose(4.828314, res)
total_loss = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=False,
average_across_batch=False)
res = sess.run(total_loss)
self.assertAllClose(9.656628, res)
def testSequenceLossByExample(self):
with self.test_session() as sess:
output_classes = 5
logits = [tf.constant(i + 0.5, shape=[2, output_classes])
for i in range(3)]
targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
weights = [tf.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = tf.nn.seq2seq.sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
loss_per_sequence = tf.nn.seq2seq.sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=False)
res = sess.run(loss_per_sequence)
self.assertAllClose(np.asarray([4.828314, 4.828314]), res)
def testModelWithBucketsScopeAndLoss(self):
"""Test that variable scope reuse is not reset after model_with_buckets."""
classes = 10
buckets = [(4, 4), (8, 8)]
with self.test_session():
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2,
state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=classes,
num_decoder_symbols=classes, embedding_size=24)
targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
return tf.nn.seq2seq.model_with_buckets(
enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
per_example_loss=per_example_loss)
# Now we construct the copy model.
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
with tf.variable_scope("root"):
_, losses1 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=False)
# Now check that we did not accidentally set reuse.
self.assertEqual(False, tf.get_variable_scope().reuse)
# Construct one more model with per-example loss.
tf.get_variable_scope().reuse_variables()
_, losses2 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=True)
# First loss is scalar, the second one is a 1-dimensinal tensor.
self.assertEqual([], losses1[0].get_shape().as_list())
self.assertEqual([None], losses2[0].get_shape().as_list())
def testModelWithBuckets(self):
"""Larger tests that does full sequence-to-sequence model training."""
# We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
classes = 10
buckets = [(4, 4), (8, 8)]
perplexities = [[], []] # Results for each bucket.
tf.set_random_seed(111)
random.seed(111)
np.random.seed(111)
with self.test_session() as sess:
# We use sampled softmax so we keep output projection separate.
w = tf.get_variable("proj_w", [24, classes])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [classes])
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2,
state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=classes,
num_decoder_symbols=classes, embedding_size=24,
output_projection=(w, b))
targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
def SampledLoss(inputs, labels):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, 8, classes)
return tf.nn.seq2seq.model_with_buckets(
enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
softmax_loss_function=SampledLoss)
# Now we construct the copy model.
batch_size = 8
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
with tf.variable_scope("root"):
_, losses = SampleGRUSeq2Seq(inp, out, weights)
updates = []
params = tf.all_variables()
optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
for i in range(len(buckets)):
full_grads = tf.gradients(losses[i], params)
grads, _ = tf.clip_by_global_norm(full_grads, 30.0)
update = optimizer.apply_gradients(zip(grads, params))
updates.append(update)
sess.run([tf.initialize_all_variables()])
steps = 6
for _ in range(steps):
bucket = random.choice(np.arange(len(buckets)))
length = buckets[bucket][0]
i = [np.array([np.random.randint(9) + 1 for _ in range(batch_size)],
dtype=np.int32) for _ in range(length)]
# 0 is our "GO" symbol here.
o = [np.array([0] * batch_size, dtype=np.int32)] + i
feed = {}
for i1, i2, o1, o2 in zip(inp[:length], i[:length],
out[:length], o[:length]):
feed[i1.name] = i2
feed[o1.name] = o2
if length < 8: # For the 4-bucket, we need the 5th as target.
feed[out[length].name] = o[length]
res = sess.run([updates[bucket], losses[bucket]], feed)
perplexities[bucket].append(math.exp(float(res[1])))
for bucket in range(len(buckets)):
if len(perplexities[bucket]) > 1: # Assert that perplexity went down.
self.assertLess(perplexities[bucket][-1], perplexities[bucket][0])
def testModelWithBooleanFeedPrevious(self):
"""Test the model behavior when feed_previous is True.
For example, the following two cases have the same effect:
- Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains
a `embedding_rnn_decoder` with `feed_previous=True` and
`update_embedding_for_previous=True`. The decoder is fed with "<Go>"
and outputs "A, B, C".
- Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder
is fed with "<Go>, A, B".
"""
num_encoder_symbols = 3
num_decoder_symbols = 5
batch_size = 2
num_enc_timesteps = 2
num_dec_timesteps = 3
def TestModel(seq2seq):
with self.test_session(graph=tf.Graph()) as sess:
tf.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [tf.constant(i + 1, tf.int32, shape=[batch_size])
for i in range(num_enc_timesteps)]
dec_inp_fp_true = [tf.constant(i, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
dec_inp_holder_fp_false = [tf.placeholder(tf.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)]
targets = [tf.constant(i + 1, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
weights = [tf.constant(1.0, shape=[batch_size])
for i in range(num_dec_timesteps)]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with tf.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = tf.get_collection(tf.GraphKeys.VARIABLES,
scope_name)
optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
tf.nn.seq2seq.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
dec_op_fp_false, update_fp_false, variables_fp_false = ForwardBackward(
enc_inp, dec_inp_holder_fp_false, feed_previous=False)
sess.run(tf.initialize_all_variables())
# We only check consistencies between the variables existing in both
# the models with True and False feed_previous. Variables created by
# the loop_function in the model with True feed_previous are ignored.
v_false_name_dict = {v.name.split('/', 1)[-1]: v
for v in variables_fp_false}
matched_variables = [(v, v_false_name_dict[v.name.split('/', 1)[-1]])
for v in variables_fp_true]
for v_true, v_false in matched_variables:
sess.run(tf.assign(v_false, v_true))
# Take the symbols generated by the decoder with feed_previous=True as
# the true input symbols for the decoder with feed_previous=False.
dec_fp_true = sess.run(dec_op_fp_true)
output_symbols_fp_true = np.argmax(dec_fp_true, axis=2)
dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(),
output_symbols_fp_true[:-1]))
sess.run(update_fp_true)
sess.run(update_fp_false,
{holder: inp for holder, inp in zip(dec_inp_holder_fp_false,
dec_inp_fp_false)})
for v_true, v_false in matched_variables:
self.assertAllClose(v_true.eval(), v_false.eval())
def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
TestModel(model)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -1,968,389,051,978,527,700 | 45.249631 | 80 | 0.601578 | false |
j00zek/PolishTranslations | TranslationsUpdater/myComponents.py | 1 | 15911 | # -*- coding: utf-8 -*-
# @j00zek 2015
from __init__ import *
from Components.ActionMap import ActionMap
from Components.config import *
from Components.MenuList import MenuList
from Components.ScrollLabel import ScrollLabel
from Components.Sources.StaticText import StaticText
from enigma import eConsoleAppContainer, eTimer
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
#
from os import system as os_system, popen as os_popen, path as os_path
config.plugins.TranslationsUpdater = ConfigSubsection()
config.plugins.TranslationsUpdater.SortowaniePoDacie = ConfigYesNo(default = False)
config.plugins.TranslationsUpdater.UkrywanieNiezainstalowanych = ConfigYesNo(default = False)
config.plugins.TranslationsUpdater.AutoUpdate = ConfigYesNo(default = False)
config.plugins.TranslationsUpdater.UsunPlikiTMP = ConfigYesNo(default = True)
def substring_2_translate(text):
to_translate = text.split('_(', 2)
text = to_translate[1]
to_translate = text.split(')', 2)
text = to_translate[0]
return text
def lastChance(text):
NonStandardTranslations=[('Jan','Styczeń '),('Feb','Luty'),('Mar','Marzec'),('Apr','Kwiecień'),('May','Maj'), \
('Jun','Czerwiec'),('Jul','Lipiec'),('Aug','Sierpień'),('Sep','Wrzesień'),('Oct','Październik'),('Nov','Listopad'),('Dec','Grudzień')]
for tr in NonStandardTranslations:
text=text.replace(tr[0],tr[1])
return text
def __(txt):
if txt.find('_(') == -1:
txt = _(txt)
else:
index = 0
while txt.find('_(') != -1:
tmptxt = substring_2_translate(txt)
translated_tmptxt = _(tmptxt)
if translated_tmptxt == tmptxt:
translated_tmptxt = lastChance(tmptxt)
txt = txt.replace('_(' + tmptxt + ')', translated_tmptxt)
index += 1
if index == 10:
break
return txt
class translatedConsole(Screen):
#TODO move this to skin.xml
skin = """
<screen position="center,center" size="550,450" title="Instalacja..." >
<widget name="text" position="0,0" size="550,450" font="Console;14" />
</screen>"""
def __init__(self, session, title = "translatedConsole", cmdlist = None, finishedCallback = None, closeOnSuccess = False):
Screen.__init__(self, session)
self.finishedCallback = finishedCallback
self.closeOnSuccess = closeOnSuccess
self.errorOcurred = False
self["text"] = ScrollLabel("")
self["actions"] = ActionMap(["WizardActions", "DirectionActions"],
{
"ok": self.cancel,
"back": self.cancel,
"up": self["text"].pageUp,
"down": self["text"].pageDown
}, -1)
self.cmdlist = cmdlist
self.newtitle = title.replace('\t',' ').replace(' ',' ').strip()
self.onShown.append(self.updateTitle)
self.container = eConsoleAppContainer()
self.run = 0
self.container.appClosed.append(self.runFinished)
self.container.dataAvail.append(self.dataAvail)
self.onLayoutFinish.append(self.startRun) # dont start before gui is finished
def updateTitle(self):
self.setTitle(self.newtitle)
def startRun(self):
self["text"].setText("" + "\n\n")
print "TranslatedConsole: executing in run", self.run, " the command:", self.cmdlist[self.run]
if self.container.execute(self.cmdlist[self.run]): #start of container application failed...
self.runFinished(-1) # so we must call runFinished manual
def runFinished(self, retval):
if retval:
self.errorOcurred = True
self.run += 1
if self.run != len(self.cmdlist):
if self.container.execute(self.cmdlist[self.run]): #start of container application failed...
self.runFinished(-1) # so we must call runFinished manual
else:
#lastpage = self["text"].isAtLastPage()
#str = self["text"].getText()
#str += _("\nUse up/down arrows to scroll text. OK closes window");
#self["text"].setText(str)
#if lastpage:
self["text"].lastPage()
if self.finishedCallback is not None:
self.finishedCallback()
if not self.errorOcurred and self.closeOnSuccess:
self.cancel()
def cancel(self):
from Screens.MessageBox import MessageBox
def rebootQuestionAnswered(ret):
if ret:
from enigma import quitMainloop
quitMainloop(3)
try: self.close()
except: pass
return
def doReboot(ret):
self.session.openWithCallback(rebootQuestionAnswered, MessageBox,"Restart GUI now?", type = MessageBox.TYPE_YESNO, timeout = 10, default = False)
if self.run == len(self.cmdlist):
self.container.appClosed.remove(self.runFinished)
self.container.dataAvail.remove(self.dataAvail)
if os_path.exists("/tmp/.rebootGUI"):
self.session.openWithCallback(doReboot,MessageBox, 'LICENCJA: Wszystkie tłumaczenia są autorstwem kolegów Mariusz1970P, Century, Kos i innych.\n\nMożesz z nich korzystać jedynie za pośrednictwem wtyczki "Aktualizator tłumaczeń".\nUszanuj pracę autorów i poświęcony czas i nie wykorzystuj ich bezpośrednio w swoich wtyczkach, czy paczkach.', MessageBox.TYPE_INFO, timeout=15)
else:
self.close()
def dataAvail(self, str):
#lastpage = self["text"].isAtLastPage()
self["text"].setText(self["text"].getText() + __(str))
#if lastpage:
self["text"].lastPage()
############################################
class j00zekTUMenu(Screen,):
def __init__(self, session, MenuFolder = "" , MenuFile = '_MenuItems', MenuTitle = 'j00zekTUMenu'):
self.myList = []
self.list = []
self.myPath = MenuFolder
self.MenuFile = "/tmp/%s" % (MenuFile)
self.SkryptOpcji = ""
self.PIC = ""
picHeight = 0
self.MenuTitle = MenuTitle
skin = """
<screen name="j00zekTUMenu" position="center,center" size="520,520" title="j00zekTUMenu" >
<widget name="list" position="5,30" font="Regular;20" size="510,350" scrollbarMode="showOnDemand" />
<eLabel text="Tłumaczenia: Mariusz1970, Century" position="0,390" size="520,30" font="Regular;22" foregroundColor="yellow" valign="center" halign="center" />
<eLabel text="Wtyczka: (c)2015,2016 j00zek" position="0,420" size="520,30" font="Regular;22" foregroundColor="yellow" valign="center" halign="center" />
<eLabel position=" 5,455" size="253, 30" zPosition="-10" backgroundColor="#20b81c46" />
<eLabel position="262,455" size="253, 30" zPosition="-10" backgroundColor="#20009f3c" />
<eLabel position=" 5,490" size="253, 30" zPosition="-10" backgroundColor="#209ca81b" />
<widget source="key_red" render="Label" position=" 5,455" size="253,30" zPosition="1" font="Regular;20" valign="center" halign="center" transparent="1" />
<widget source="key_green" render="Label" position="262,455" size="253,30" zPosition="1" font="Regular;20" valign="center" halign="center" transparent="1" />
<widget source="key_yellow" render="Label" position=" 5,490" size="253,30" zPosition="1" font="Regular;20" valign="center" halign="center" transparent="1" />
<widget source="Header1" render="Label" position=" 10,0" size="150,30" font="Regular;18" foregroundColor="#6DABBF" valign="center" halign="center" />
<widget source="Header2" render="Label" position="145,0" size="460,30" font="Regular;18" foregroundColor="#6DABBF" valign="center" halign="center" />
</screen>"""
self.skin = skin
self.session = session
Screen.__init__(self, session)
self["list"] = MenuList(self.list)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{"ok": self.run,
"cancel": self.close,
"red": self.ZmienSortowanie,
"green": self.ZmienUkrywanieNiezainstalowanych,
"yellow": self.ZmienAutoUpdate,
}, -1)
self.onLayoutFinish.append(self.onStart)
self.visible = True
self.setTitle("Pobieranie danych...")
self["key_red"] = StaticText("")
self["key_green"] = StaticText("")
self["key_yellow"] = StaticText("")
self["Header1"] = StaticText("")
self["Header2"] = StaticText("")
def onStart(self):
self.system( "rm -f /tmp/PolishTranslations.list" )
self.updateDataTimer = eTimer()
self.updateDataTimer.callback.append(self.updateData)
self.updateDataTimer.start(500, True) # singleshot
def updateData(self):
self.setButtons(czysc=True)
self.setTitle("Pobieranie danych...")
self.system( "%s/_MenuGenerator.sh %s" % (self.myPath, self.myPath) )
self.setTitle(self.MenuTitle)
self.clearLIST()
self.reloadLIST()
self.setButtons()
def ZmienAutoUpdate(self):
config.plugins.TranslationsUpdater.AutoUpdate.value = not config.plugins.TranslationsUpdater.AutoUpdate.value
config.plugins.TranslationsUpdater.AutoUpdate.save()
configfile.save()
self.setButtons()
def ZmienSortowanie(self):
config.plugins.TranslationsUpdater.SortowaniePoDacie.value = not config.plugins.TranslationsUpdater.SortowaniePoDacie.value
config.plugins.TranslationsUpdater.SortowaniePoDacie.save()
configfile.save()
self.setButtons(czysc=True)
self.clearLIST()
self.updateDataTimer.start(100, True) # singleshot
def ZmienUkrywanieNiezainstalowanych(self):
config.plugins.TranslationsUpdater.UkrywanieNiezainstalowanych.value = not config.plugins.TranslationsUpdater.UkrywanieNiezainstalowanych.value
config.plugins.TranslationsUpdater.UkrywanieNiezainstalowanych.save()
configfile.save()
self.setButtons(czysc=True)
self.clearLIST()
self.updateDataTimer.start(100, True) # singleshot
def setButtons(self, czysc=False):
if czysc == True:
self["key_red"].setText("")
self["key_green"].setText("")
self["Header1"].setText("")
self["Header2"].setText("")
return
if config.plugins.TranslationsUpdater.SortowaniePoDacie.value == True:
self["key_red"].setText("Posortuj po nazwie")
self["Header1"].setText("Z dnia")
self["Header2"].setText("Plik")
else:
self["key_red"].setText("Posortuj po dacie")
self["Header1"].setText("Plik")
self["Header2"].setText("z dnia")
if config.plugins.TranslationsUpdater.UkrywanieNiezainstalowanych.value == True:
self["key_green"].setText("Pokaż wszystkie")
else:
self["key_green"].setText("Ukryj niezainstalowane")
if config.plugins.TranslationsUpdater.AutoUpdate.value == True:
self["key_yellow"].setText("Wył. AutoAktualizację")
else:
self["key_yellow"].setText("Wł. AutoAktualizację")
def YESNO(self, decyzja):
if decyzja is False:
return
self.system("%s" % self.SkryptOpcji)
def system(self,komenda):
with open("/proc/sys/vm/drop_caches", "w") as f: f.write("1\n")
os_system(komenda)
def run(self):
selecteditem = self["list"].getCurrent()
if selecteditem is not None:
for opcja in self.myList:
if opcja[0] == selecteditem:
self.SkryptOpcji = opcja[2]
if opcja[1] == "CONSOLE":
self.session.openWithCallback(self.endrun ,translatedConsole, title = "%s" % selecteditem, cmdlist = [ ('chmod 775 %s 2>/dev/null' % self.SkryptOpcji),('%s' % self.SkryptOpcji) ])
if opcja[1] == "YESNO":
self.session.openWithCallback(self.YESNO ,MessageBox,_("Execute %s?") % selecteditem, MessageBox.TYPE_YESNO)
if opcja[1] == "SILENT":
self.system("%s" % self.SkryptOpcji)
self.endrun()
elif opcja[1] == "RUN":
self.system("%s" % self.SkryptOpcji)
self.session.openWithCallback(self.endrun,MessageBox,_("%s executed!") %( selecteditem ), MessageBox.TYPE_INFO, timeout=5)
elif opcja[1] == "MSG":
msgline = ""
popenret = os_popen( self.SkryptOpcji)
for readline in popenret.readlines():
msgline += readline
self.session.openWithCallback(self.endrun,MessageBox, "%s" %( msgline ), MessageBox.TYPE_INFO, timeout=15)
def endConsole(self, ret =0, wymusUpdate=False):
self.session.openWithCallback(self.endrun,MessageBox, 'LICENCJA: Wszystkie tłumaczenia są autorstwem kolegi Mariusz1970P.\nMożesz z nich korzystać jedynie za pośrednictwem wtyczki "Aktualizator tłumaczeń".\nUszanuj jego pracę i poświęcony czas i nie wykorzystuj ich bezpośrednio w swoich wtyczkach, czy paczkach.', MessageBox.TYPE_INFO, timeout=15)
def endrun(self, ret =0, wymusUpdate=False):
#odświerzamy menu
if not os_path.exists(self.MenuFile) or wymusUpdate == True:
self.system( "%s/_MenuGenerator.sh %s" % (self.myPath, self.myPath) )
self.clearLIST()
self.reloadLIST()
def SkryptOpcjiWithFullPAth(self, txt):
if txt.startswith('/'):
return txt
elif txt.split(' ')[0] in ('opkg'):
return txt
else:
return ('%s/%s') %(self.myPath,txt)
def clearLIST(self):
#czyścimy listę w ten dziwny sposób, aby GUI działało, bo nie zmienimy obiektów ;)
while len(self.list) > 0:
del self.myList[-1]
del self.list[-1]
self["list"].hide()
self["list"].show()
def reloadLIST(self):
if os_path.exists(self.MenuFile) is True:
self["list"].hide()
with open (self.MenuFile, "r") as myMenufile:
for MenuItem in myMenufile:
MenuItem = MenuItem.rstrip('\n')
if not MenuItem or MenuItem[0] == '#': #omijamy komentarze
continue
#interesują nas tylko pozycje menu
if MenuItem[0:5] == "ITEM|":
#teraz bierzemy pod uwage tylko te linie co mają odpowiednią ilość |
#print MenuItem
skladniki = MenuItem.replace("ITEM|","").split('|')
if len(skladniki) == 3:
(NazwaOpcji, TypOpcji, SkryptOpcji) = skladniki
if NazwaOpcji != "":
NazwaOpcji = __(NazwaOpcji)
#NazwaOpcji = NazwaOpcji.replace(NazwaOpcji[:3],_(NazwaOpcji[:3]))
self.myList.append( (NazwaOpcji, TypOpcji, self.SkryptOpcjiWithFullPAth(SkryptOpcji)) )
self.list.append( NazwaOpcji )
myMenufile.close()
myIdx = self["list"].getSelectionIndex()
if myIdx > len(self.list) -1:
self["list"].moveToIndex(len(self.list) -1)
self["list"].show()
| gpl-2.0 | 976,306,449,887,808,500 | 44.583333 | 390 | 0.592889 | false |
a25kk/osm | src/osm.sitetheme/osm/sitetheme/tests/test_setup.py | 1 | 1144 | # -*- coding: utf-8 -*-
"""Setup/installation tests for this package."""
from osm.buildout.testing import IntegrationTestCase
from plone import api
class TestInstall(IntegrationTestCase):
"""Test installation of osm.buildout into Plone."""
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer['portal']
self.installer = api.portal.get_tool('portal_quickinstaller')
def test_product_installed(self):
"""Test if osm.buildout is installed with portal_quickinstaller."""
self.assertTrue(self.installer.isProductInstalled('osm.buildout'))
def test_uninstall(self):
"""Test if osm.buildout is cleanly uninstalled."""
self.installer.uninstallProducts(['osm.buildout'])
self.assertFalse(self.installer.isProductInstalled('osm.buildout'))
# browserlayer.xmlw
def test_browserlayer(self):
"""Test that IosmBuildoutLayer is registered."""
from osm.buildout.interfaces import IosmBuildoutLayer
from plone.browserlayer import utils
self.failUnless(IosmBuildoutLayer in utils.registered_layers())
| mit | -2,264,357,882,213,975,300 | 37.133333 | 75 | 0.701049 | false |
borysiasty/inasafe | safe/gis/test/test_reclassify.py | 1 | 1890 | # coding=utf-8
"""Tests for reclassify implementation."""
import unittest
from collections import OrderedDict
from qgis.core import QgsVectorLayer, QgsFeatureRequest
from safe.gis.reclassify_gdal import reclassify_polygonize
from safe.test.utilities import test_data_path, get_qgis_app
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
class ReclassifyTest(unittest.TestCase):
"""Tests for reclassify a raster."""
def setUp(self):
pass
def tearDown(self):
pass
def test_reclassify_polygonize(self):
"""Test if we can reclassify a raster according to some thresholds."""
raster_path = test_data_path('hazard', 'continuous_flood_20_20.asc')
ranges = OrderedDict()
# value <= 0.2
ranges[1] = [None, 0.2]
# 0.2 < value <= 1
ranges[2] = [0.2, 1]
# 1 < value <= 1.3 and gap in output classes
ranges[10] = [1, 1.3]
# value > 1.3
ranges[11] = [1.3, None]
output = reclassify_polygonize(raster_path, ranges)
layer = QgsVectorLayer(output, 'test layer', 'ogr')
self.assertEqual(layer.featureCount(), 61)
expression = '"DN" = \'%s\'' % 1
request = QgsFeatureRequest().setFilterExpression(expression)
self.assertEqual(sum(1 for _ in layer.getFeatures(request)), 20)
expression = '"DN" = \'%s\'' % 2
request = QgsFeatureRequest().setFilterExpression(expression)
self.assertEqual(sum(1 for _ in layer.getFeatures(request)), 1)
expression = '"DN" = \'%s\'' % 10
request = QgsFeatureRequest().setFilterExpression(expression)
self.assertEqual(sum(1 for _ in layer.getFeatures(request)), 20)
expression = '"DN" = \'%s\'' % 11
request = QgsFeatureRequest().setFilterExpression(expression)
self.assertEqual(sum(1 for _ in layer.getFeatures(request)), 20)
| gpl-3.0 | -3,354,358,767,574,145,500 | 32.157895 | 78 | 0.631746 | false |
eddienigma/rpi-rht | GraphIndexTH.py | 1 | 4632 | #
# pull data from sql, plot using matplotlib
# see http://stackoverflow.com/questions/18663746/matplotlib-multiple-lines-with-common-date-on-x-axis-solved
#
# rev 1.0 12/02/2013 WPNS built from GraphAirmuxSD.py V1.1
# rev 1.1 12/02/2013 WPNS remove large delta values
# rev 1.2 12/02/2013 WPNS remove -0.1 values (failed to read)
# rev 1.3 12/02/2013 WPNS show count of anomalies
# rev 1.4 12/03/2013 WPNS cleanup, release
# rev 1.5 12/03/2013 WPNS better label
# rev 1.6 12/03/2013 WPNS bugfix, release
# rev 1.69 12/04/2013 WPNS release to Instructables
# rev 2.0-JAS 1/11/2014 JAS adjusted graph ranges for current conditions and to use SQLite3 instead of MySQL
import sys
import os
import time
import math
import datetime
import numpy
import sqlite3 as lite
# so matplotlib has to have some of the setup parameters _before_ pyplot
import matplotlib
matplotlib.use('agg')
#matplotlib.rcParams['figure.dpi'] = 100
#matplotlib.rcParams['figure.figsize'] = [10.24, 7.68]
matplotlib.rcParams['lines.linewidth'] = 1
matplotlib.rcParams['axes.color_cycle'] = ['r','g','b','k']
matplotlib.rcParams['axes.labelsize'] = 'large'
matplotlib.rcParams['font.size'] = 8
matplotlib.rcParams['grid.linestyle']='-'
import matplotlib.pyplot as plt
anomalies = 0
print "GraphTH.py V1.69 12/04/2013 WPNS",time.asctime(),
print "GraphTH.py V1.0-JAS 12/22/2013 JAS"
# open the database connection, read the last <many> seconds of data, put them in a Numpy array called Raw
DBconn = lite.connect('/var/rht/db/rht.db')
cursor = DBconn.cursor()
sql = "select ComputerTime,TempF,Humidity from rht where ComputerTime >= (strftime('%s','now')-(60*60*24))"
cursor.execute(sql)
cursor2 = DBconn.cursor()
sql2 = "SELECT datetime(ComputerTime,'unixepoch','localtime'),TempF,Humidity FROM rht WHERE ComputerTime = (select max(ComputerTime) from rht)"
cursor2.execute(sql2)
lastRow = cursor2.fetchone()
Raw = numpy.fromiter(cursor.fetchall(), count=-1, dtype=[('', numpy.float)]*3)
Raw = Raw.view(numpy.float).reshape(-1, 3)
(samples,ports)=Raw.shape
print 'Samples: {}, DataPoints: {}'.format(samples,ports),
plotme=numpy.zeros((samples,ports-1)) # make an array the same shape minus the epoch numbers
for y in range(ports-1):
# print y
for x in range(samples-1): # can't do last one, there's no (time) delta from previous sample
seconds = Raw[x+1,0]-Raw[x,0]
# if the number didn't overflow the counter
plotme[x,y] = Raw[x,y+1]
plotme[samples-1,y] = None # set last sample to "do not plot"
for x in range(samples-1): # go thru the dataset again
if (Raw[x+1,1] == -0.1): # if values are "reading failed" flag
plotme[x+1,0] = plotme[x,0] # copy current sample over it
plotme[x+1,1] = plotme[x,1] # for temperature and humidity both
anomalies += 1
if (abs(Raw[x+1,1]-Raw[x,1]) > 10): # if temperature jumps more than 10 degrees in a minute
plotme[x+1,0] = plotme[x,0] # copy current sample over it
plotme[x+1,1] = plotme[x,1] # for temperature and humidity both
anomalies += 1
print "Anomalies: ",anomalies,
#print plotme
# get an array of adatetime objects (askewchan from stackoverflow, above)
dts = map(datetime.datetime.fromtimestamp, Raw[:,0])
# set up the plot details we want
plt.grid(True)
plt.ylabel('Temp F, RH %%')
plt.axis(ymax=100,ymin=10)
plt.xlabel(time.asctime())
plt.title("Outdoor: Temperature (Red), Humidity (Green)")
plt.hold(True)
# and some fiddly bits around formatting the X (date) axis
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%m/%d %H:%M'))
plt.gca().xaxis.set_major_locator(matplotlib.dates.HourLocator())
lines = plt.plot(dts,plotme)
plt.gcf().autofmt_xdate()
FileName = '/var/rht/images/TH.png'
plt.savefig(FileName)
web = open('/var/www/index.html', 'w')
web.write('<HTML>\n')
web.write('<HEAD>\n')
web.write('<meta http-equiv=\"refresh\" content=\"60\">\n')
web.write('<TITLE>Raspberry Pi Temperature and Humidity Readings</TITLE>\n')
web.write('</HEAD>\n')
web.write('\n')
web.write('<BODY BGCOLOR="#FFFFFF">\n')
web.write('<CENTER>\n')
web.write('<IMG SRC="/images/TH.png">\n')
web.write('<BR><BR>\n')
web.write('<FONT COLOR=\"#FF0000\" SIZE=+2>Temp: ' + str(lastRow[1]) + 'F </FONT> <FONT COLOR=\"#00FF00\" SIZE=+2>Humidity: ' + str(lastRow[2]) + '% </FONT><BR>\n')
web.write('<FONT SIZE=+2>Time: ' + str(lastRow[0]) + '</FONT><BR>\n')
web.write('</CENTER>\n')
web.write('</BODY>\n')
web.write('\n')
web.write('</HTML\n')
print 'Done at',time.asctime()
| gpl-3.0 | -4,583,286,232,077,759,000 | 36.354839 | 185 | 0.676166 | false |
chagaz/SamSpecCoEN | code/setupCV_computeNetworks.py | 1 | 4177 | # @Author
# Chloe-Agathe Azencott
# chloe-agathe.azencott@mines-paristech.fr
# April 2016
import argparse
import h5py
import numpy as np
import os
import sys
import CoExpressionNetwork
def main():
""" Create sample-specific co-expression networks for one fold and one repeat
of a cross-validation for which fold indices have already been computed.
The data will be stored under
<data_dir>/repeat<repeat idx>
with the following structure:
edges.gz:
Gzipped file containing the list of edges of the co-expression networks.
Each line is an undirected edge, formatted as:
<index of gene 1> <index of gene 2>
By convention, the index of gene 1 is smaller than that of gene 2.
For k=0..(numFolds-1):
<k>/lioness/edge_weights.gz:
gzipped file containing the (self.numSamples, numEdges) array
describing the edge weights of the LIONESS co-expression networks
for the training samples.
<k>/lioness/edge_weights_te.gz:
gzipped file containing the (self.numSamples, numEdges) array
describing the edge weights of the LIONESS co-expression networks
for the test samples.
<k>/regline/edge_weights.gz:
gzipped file containing the (self.numSamples, numEdges) array
describing the edge weights of the Regline co-expression networks
for the training samples.
<k>/regline/edge_weights_te.gz:
gzipped file containing the (self.numSamples, numEdges) array
describing the edge weights of the Regline co-expression networks
for the test samples.
Parameters
----------
aces_dir: path
Path to the ACES folder.
data_dir: path
Path to the folder containing fold indices (under <data_dir>/repeat<repeat_idx>/fold<fold_idx>).
fold: int
Fold index.
repeat: int
Repeat index.
Example
-------
$ python setUpSubTypeStratifiedCV_computeNetworks.py ACES outputs/U133A_combat_RFS/subtype_stratified 0 0
Reference
---------
Allahyar, A., and Ridder, J. de (2015).
FERAL: network-based classifier with application to breast cancer outcome prediction.
Bioinformatics 31, i311--i319.
"""
parser = argparse.ArgumentParser(description="Build sample-specific co-expression networks" + \
"for a 10-fold sub-type stratified CV on the RFS data",
add_help=True)
parser.add_argument("aces_dir", help="Path to ACES data")
parser.add_argument("data_dir", help="Path to the fold indices")
parser.add_argument("fold", help="Index of the fold", type=int)
parser.add_argument("repeat", help="Index of the repeat", type=int)
args = parser.parse_args()
outDir = '%s/repeat%d' % (args.data_dir, args.repeat)
# Get expression data, sample labels.
# Do not normalize the data while loading it (so as not to use test data for normalization).
f = h5py.File("%s/experiments/data/U133A_combat.h5" % args.aces_dir)
expressionData = np.array(f['U133A_combat_RFS']['ExpressionData'])
sampleLabels = np.array(f['U133A_combat_RFS']['PatientClassLabels'])
f.close()
foldNr = args.fold
# Output directory
foldDir = "%s/fold%d" % (outDir, foldNr)
# Read train indices from file
trIndicesF = '%s/train.indices' % foldDir
trIndices = np.loadtxt(trIndicesF, dtype=int)
sys.stdout.write("Read training indices for fold %d from %s\n" % (foldNr, trIndicesF))
# Read test indices from file
teIndicesF = '%s/test.indices' % foldDir
teIndices = np.loadtxt(teIndicesF, dtype=int)
sys.stdout.write("Read training indices for fold %d from %s\n" % (foldNr, teIndicesF))
print teIndices
print teIndices.shape
# Create networks
CoExpressionNetwork.run_whole_data(expressionData, sampleLabels, foldDir,
trIndices=trIndices, teIndices=teIndices)
if __name__ == "__main__":
main()
| mit | -4,337,070,075,003,851,000 | 38.037383 | 113 | 0.639454 | false |
anaran/olympia | services/update.py | 1 | 14315 | import smtplib
import sys
import traceback
from email.Utils import formatdate
from email.mime.text import MIMEText
from time import time
from urlparse import parse_qsl
from django.utils.http import urlencode
import settings_local as settings
# This has to be imported after the settings so statsd knows where to log to.
from django_statsd.clients import statsd
import commonware.log
import MySQLdb as mysql
import sqlalchemy.pool as pool
try:
from compare import version_int
except ImportError:
from apps.versions.compare import version_int
from constants import applications, base
from utils import (APP_GUIDS, get_mirror, log_configure, PLATFORMS,
STATUSES_PUBLIC)
# Go configure the log.
log_configure()
good_rdf = """<?xml version="1.0"?>
<RDF:RDF xmlns:RDF="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<RDF:Description about="urn:mozilla:%(type)s:%(guid)s">
<em:updates>
<RDF:Seq>
<RDF:li resource="urn:mozilla:%(type)s:%(guid)s:%(version)s"/>
</RDF:Seq>
</em:updates>
</RDF:Description>
<RDF:Description about="urn:mozilla:%(type)s:%(guid)s:%(version)s">
<em:version>%(version)s</em:version>
<em:targetApplication>
<RDF:Description>
<em:id>%(appguid)s</em:id>
<em:minVersion>%(min)s</em:minVersion>
<em:maxVersion>%(max)s</em:maxVersion>
<em:updateLink>%(url)s</em:updateLink>
%(if_update)s
%(if_hash)s
</RDF:Description>
</em:targetApplication>
</RDF:Description>
</RDF:RDF>"""
bad_rdf = """<?xml version="1.0"?>
<RDF:RDF xmlns:RDF="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
</RDF:RDF>"""
no_updates_rdf = """<?xml version="1.0"?>
<RDF:RDF xmlns:RDF="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<RDF:Description about="urn:mozilla:%(type)s:%(guid)s">
<em:updates>
<RDF:Seq>
</RDF:Seq>
</em:updates>
</RDF:Description>
</RDF:RDF>"""
timing_log = commonware.log.getLogger('z.timer')
error_log = commonware.log.getLogger('z.services')
def getconn():
db = settings.SERVICES_DATABASE
return mysql.connect(host=db['HOST'], user=db['USER'],
passwd=db['PASSWORD'], db=db['NAME'])
mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5, recycle=300)
class Update(object):
def __init__(self, data, compat_mode='strict'):
self.conn, self.cursor = None, None
self.data = data.copy()
self.data['row'] = {}
self.version_int = 0
self.compat_mode = compat_mode
def is_valid(self):
# If you accessing this from unit tests, then before calling
# is valid, you can assign your own cursor.
if not self.cursor:
self.conn = mypool.connect()
self.cursor = self.conn.cursor()
data = self.data
# Version can be blank.
data['version'] = data.get('version', '')
for field in ['reqVersion', 'id', 'appID', 'appVersion']:
if field not in data:
return False
data['app_id'] = APP_GUIDS.get(data['appID'])
if not data['app_id']:
return False
sql = """SELECT id, status, addontype_id, guid FROM addons
WHERE guid = %(guid)s AND
inactive = 0 AND
status != %(STATUS_DELETED)s
LIMIT 1;"""
self.cursor.execute(sql, {'guid': self.data['id'],
'STATUS_DELETED': base.STATUS_DELETED})
result = self.cursor.fetchone()
if result is None:
return False
data['id'], data['addon_status'], data['type'], data['guid'] = result
data['version_int'] = version_int(data['appVersion'])
if 'appOS' in data:
for k, v in PLATFORMS.items():
if k in data['appOS']:
data['appOS'] = v
break
else:
data['appOS'] = None
return True
def get_update(self):
data = self.data
data.update(STATUSES_PUBLIC)
data['STATUS_BETA'] = base.STATUS_BETA
sql = ["""
SELECT
addons.guid as guid, addons.addontype_id as type,
addons.inactive as disabled_by_user,
applications.guid as appguid, appmin.version as min,
appmax.version as max, files.id as file_id,
files.status as file_status, files.hash,
files.filename, versions.id as version_id,
files.datestatuschanged as datestatuschanged,
files.strict_compatibility as strict_compat,
versions.releasenotes, versions.version as version,
addons.premium_type
FROM versions
INNER JOIN addons
ON addons.id = versions.addon_id AND addons.id = %(id)s
INNER JOIN applications_versions
ON applications_versions.version_id = versions.id
INNER JOIN applications
ON applications_versions.application_id = applications.id
AND applications.id = %(app_id)s
INNER JOIN appversions appmin
ON appmin.id = applications_versions.min
INNER JOIN appversions appmax
ON appmax.id = applications_versions.max
INNER JOIN files
ON files.version_id = versions.id AND (files.platform_id = 1
"""]
if data.get('appOS'):
sql.append(' OR files.platform_id = %(appOS)s')
sql.append("""
)
-- Find a reference to the user's current version, if it exists.
-- These should never be inner joins. We need results even if we
-- can't find the current version.
LEFT JOIN versions curver
ON curver.addon_id = addons.id AND curver.version = %(version)s
LEFT JOIN files curfile
ON curfile.version_id = curver.id
WHERE
-- Note that the WHEN clauses here will evaluate to the same
-- thing for each row we examine. The JOINs above narrow the
-- rows matched by the WHERE clause to versions of a specific
-- add-on, and the ORDER BY and LIMIT 1 clauses below make it
-- unlikely that we'll be examining a large number of rows,
-- so this is fairly cheap.
CASE
WHEN curfile.status = %(STATUS_BETA)s
THEN
-- User's current version is a known beta version.
--
-- Serve only beta updates. Serving a full version here
-- will forever kick users out of the beta update channel.
--
-- If the add-on does not have full review, serve no
-- updates.
addons.status = %(STATUS_PUBLIC)s AND
files.status = %(STATUS_BETA)s
WHEN addons.status IN (%(STATUS_LITE)s,
%(STATUS_LITE_AND_NOMINATED)s)
AND (curfile.id IS NULL OR curfile.status = %(STATUS_LITE)s)
THEN
-- Add-on is prelim, and user's current version is either a
-- known prelim, or an unknown version.
--
-- Serve only prelim versions. Serving a full version here
-- will prevent users from receiving further updates until
-- the add-on achieves full review.
files.status = %(STATUS_LITE)s
ELSE
-- Anything else, including:
--
-- * Add-on has full review
-- * User's current version has full review, regardless
-- of add-on status
--
-- Serve only full-reviewed updates.
files.status = %(STATUS_PUBLIC)s
END
""")
sql.append('AND appmin.version_int <= %(version_int)s ')
if self.compat_mode == 'ignore':
pass # no further SQL modification required.
elif self.compat_mode == 'normal':
# When file has strict_compatibility enabled, or file has binary
# components, default to compatible is disabled.
sql.append("""AND
CASE WHEN files.strict_compatibility = 1 OR
files.binary_components = 1
THEN appmax.version_int >= %(version_int)s ELSE 1 END
""")
# Filter out versions that don't have the minimum maxVersion
# requirement to qualify for default-to-compatible.
d2c_max = applications.D2C_MAX_VERSIONS.get(data['app_id'])
if d2c_max:
data['d2c_max_version'] = version_int(d2c_max)
sql.append("AND appmax.version_int >= %(d2c_max_version)s ")
# Filter out versions found in compat overrides
sql.append("""AND
NOT versions.id IN (
SELECT version_id FROM incompatible_versions
WHERE app_id=%(app_id)s AND
(min_app_version='0' AND
max_app_version_int >= %(version_int)s) OR
(min_app_version_int <= %(version_int)s AND
max_app_version='*') OR
(min_app_version_int <= %(version_int)s AND
max_app_version_int >= %(version_int)s)) """)
else: # Not defined or 'strict'.
sql.append('AND appmax.version_int >= %(version_int)s ')
# Special case for bug 1031516.
if data['guid'] == 'firefox-hotfix@mozilla.org':
app_version = data['version_int']
hotfix_version = data['version']
if version_int('10') <= app_version <= version_int('16.0.1'):
if hotfix_version < '20121019.01':
sql.append("AND versions.version = '20121019.01' ")
elif hotfix_version < '20130826.01':
sql.append("AND versions.version = '20130826.01' ")
elif version_int('16.0.2') <= app_version <= version_int('24.*'):
if hotfix_version < '20130826.01':
sql.append("AND versions.version = '20130826.01' ")
sql.append('ORDER BY versions.id DESC LIMIT 1;')
self.cursor.execute(''.join(sql), data)
result = self.cursor.fetchone()
if result:
row = dict(zip([
'guid', 'type', 'disabled_by_user', 'appguid', 'min', 'max',
'file_id', 'file_status', 'hash', 'filename', 'version_id',
'datestatuschanged', 'strict_compat', 'releasenotes',
'version', 'premium_type'],
list(result)))
row['type'] = base.ADDON_SLUGS_UPDATE[row['type']]
row['url'] = get_mirror(data['addon_status'],
data['id'], row)
data['row'] = row
return True
return False
def get_bad_rdf(self):
return bad_rdf
def get_rdf(self):
if self.is_valid():
if self.get_update():
rdf = self.get_good_rdf()
else:
rdf = self.get_no_updates_rdf()
else:
rdf = self.get_bad_rdf()
self.cursor.close()
if self.conn:
self.conn.close()
return rdf
def get_no_updates_rdf(self):
name = base.ADDON_SLUGS_UPDATE[self.data['type']]
return no_updates_rdf % ({'guid': self.data['guid'], 'type': name})
def get_good_rdf(self):
data = self.data['row']
data['if_hash'] = ''
if data['hash']:
data['if_hash'] = ('<em:updateHash>%s</em:updateHash>' %
data['hash'])
data['if_update'] = ''
if data['releasenotes']:
data['if_update'] = ('<em:updateInfoURL>%s%s%s/%%APP_LOCALE%%/'
'</em:updateInfoURL>' %
(settings.SITE_URL, '/versions/updateInfo/',
data['version_id']))
return good_rdf % data
def format_date(self, secs):
return '%s GMT' % formatdate(time() + secs)[:25]
def get_headers(self, length):
return [('Content-Type', 'text/xml'),
('Cache-Control', 'public, max-age=3600'),
('Last-Modified', self.format_date(0)),
('Expires', self.format_date(3600)),
('Content-Length', str(length))]
def mail_exception(data):
if settings.EMAIL_BACKEND != 'django.core.mail.backends.smtp.EmailBackend':
return
msg = MIMEText('%s\n\n%s' % (
'\n'.join(traceback.format_exception(*sys.exc_info())), data))
msg['Subject'] = '[Update] ERROR at /services/update'
msg['To'] = ','.join([a[1] for a in settings.ADMINS])
msg['From'] = settings.DEFAULT_FROM_EMAIL
conn = smtplib.SMTP(getattr(settings, 'EMAIL_HOST', 'localhost'),
getattr(settings, 'EMAIL_PORT', '25'))
conn.sendmail(settings.DEFAULT_FROM_EMAIL, msg['To'], msg.as_string())
conn.close()
def log_exception(data):
(typ, value, traceback) = sys.exc_info()
error_log.error(u'Type: %s, %s. Query: %s' % (typ, value, data))
def application(environ, start_response):
status = '200 OK'
with statsd.timer('services.update'):
data = dict(parse_qsl(environ['QUERY_STRING']))
compat_mode = data.pop('compatMode', 'strict')
try:
update = Update(data, compat_mode)
output = update.get_rdf()
start_response(status, update.get_headers(len(output)))
except:
#mail_exception(data)
log_exception(data)
raise
return [output]
| bsd-3-clause | -7,104,861,223,561,318,000 | 36.473822 | 79 | 0.533147 | false |
pLeBlanc93/ArcREST | src/arcrest/manageorg/_portals.py | 1 | 82353 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from ..security import PortalServerSecurityHandler
from ..manageags import AGSAdministration
from ..hostedservice import Services
from ..common.general import local_time_to_online
from .._abstract.abstract import BaseAGOLClass
import os
from ..packages.six.moves import urllib_parse as urlparse
from . import _parameters as parameters
import json
########################################################################
class Portals(BaseAGOLClass):
"""
A multitenant portal contains multiple portals, each one of which is
owned by and represents an organization. Each user in the multitenant
portal belongs to one of these organizational portals or to a default
portal that includes all users who do not belong to an organization.
The Portals Root resource is a root placeholder resource that covers
all the portals contained in the multitenant portal.
"""
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_culture = None
_region = None
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler=None,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if url.lower().endswith("/portals"):
self._url = url
else:
self._url = "%s/portals" % url
self._securityHandler = securityHandler
self._proxy_port = proxy_port
self._proxy_url = proxy_url
#----------------------------------------------------------------------
@property
def root(self):
"""gets the classes url"""
return self._url
#----------------------------------------------------------------------
@property
def regions(self):
"""gets the regions value"""
url = "%s/regions" % self.root
params = {"f": "json"}
return self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def languages(self):
"""returns the site's languages"""
url = "%s/languages" % self.root
params = {'f': "json"}
return self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def info(self):
"""gets the sharing api information"""
url = "%s/info" % self.root
params = {"f": "json"}
return self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def portalSelf(self):
"""The portal to which the current user belongs. This is an
organizational portal if the user belongs to an organization or the
default portal if the user does not belong to one"""
url = "%s/self" % self.root
return Portal(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
)
#----------------------------------------------------------------------
def portal(self, portalID=None):
"""returns a specific reference to a portal"""
if portalID is None:
portalID = self.portalSelf.id
url = "%s/%s" % (self.root, portalID)
return Portal(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initalize=True)
#----------------------------------------------------------------------
@property
def portalId(self):
"""gets the portal Id"""
return self.portalSelf.id
########################################################################
class Portal(BaseAGOLClass):
"""
Portal returns information on your organization and is accessible to
administrators. Publishers and information workers can view users and
resources of the organization.
"""
_bingKey = None
_authorizedCrossOriginDomains = None
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
_canSharePublic = None
_defaultExtent = None
_supportsHostedServices = None
_homePageFeaturedContentCount = None
_supportsOAuth = None
_portalName = None
_databaseUsage = None
_culture = None
_helpBase = None
_galleryTemplatesGroupQuery = None
_commentsEnabled = None
_databaseQuota = None
_id = None
_canSearchPublic = None
_customBaseUrl = None
_allSSL = None
_httpPort = None
_featuredGroupsId = None
_defaultBasemap = None
_created = None
_access = None
_platform = None
_isPortal = None
_canSignInArcGIS = None
_disableSignup = None
_httpsPort = None
_units = None
_backgroundImage = None
_mfaEnabled = None
_featuredGroups = None
_thumbnail = None
_featuredItemsGroupQuery = None
_canSignInIDP = None
_useStandardizedQuery = None
_rotatorPanels = None
_description = None
_homePageFeaturedContent = None
_helperServices = None
_canProvisionDirectPurchase = None
_canListData = None
_user = None
_helpMap = None
_canListPreProvisionedItems = None
_colorSetsGroupQuery = None
_canListApps = None
_portalProperties = None
_isWindows = None
_name = None
_supportsSceneServices = None
_stylesGroupQuery = None
_samlEnabled = None
_symbolSetsGroupQuery = None
_portalLocalHttpPort = None
_storageQuota = None
_canShareBingPublic = None
_maxTokenExpirationMinutes = None
_layerTemplatesGroupQuery = None
_staticImagesUrl = None
_modified = None
_portalHostname = None
_showHomePageDescription = None
_availableCredits = None
_portalMode = None
_portalLocalHttpsPort = None
_hostedServerHostedFolder = None
_storageUsage = None
_templatesGroupQuery = None
_portalLocalHostname = None
_basemapGalleryGroupQuery = None
_mfaAdmins = None
_portalId = None
_subscriptionInfo = None
_urlKey = None
_metadataEditable = None
_portalThumbnail = None
_metadataFormats = None
_ipCntryCode = None
_livingAtlasGroupQuery = None
_region = None
_contacts = None
_appInfo = None
_creditAssignments = None
_updateUserProfileDisabled = None
_analysisLayersGroupQuery = None
_defaultUserCreditAssignment = None
_analysisLayersGroupQuery = None
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler,
proxy_url=None,
proxy_port=None,
initalize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initalize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
"""loads the property data into the class"""
params = {
"f" : "json"
}
json_dict = self._get(url=self.root,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
setattr(self, k, v)
print( k, " - attribute not implemented in Portal class.")
#----------------------------------------------------------------------
def _findPortalId(self):
"""gets the portal id for a site if not known."""
if not self.root.lower().endswith("/self"):
url = self.root + "/self"
else:
url = self.root
params = {
"f" : "json"
}
res = self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'id' in res:
return res['id']
return None
@property
def analysisLayersGroupQuery(self):
if self._analysisLayersGroupQuery is None:
self.__init()
return self._analysisLayersGroupQuery
#----------------------------------------------------------------------
@property
def defaultUserCreditAssignment(self):
"""gets the property value for defaultUserCreditAssignment"""
if self._defaultUserCreditAssignment is None:
self.__init()
return self._defaultUserCreditAssignment
#----------------------------------------------------------------------
@property
def analysisLayersGroupQueryt(self):
"""gets the property value for analysisLayersGroupQuery"""
if self._analysisLayersGroupQuery is None:
self.__init()
return self._analysisLayersGroupQuery
#----------------------------------------------------------------------
@property
def updateUserProfileDisabled(self):
'''gets the property value for updateUserProfileDisabled'''
if self._updateUserProfileDisabled is None:
self.__init()
return self._updateUserProfileDisabled
#----------------------------------------------------------------------
@property
def bingKey(self):
'''gets the property value for bingKey'''
if self._bingKey is None:
self.__init()
return self._bingKey
#----------------------------------------------------------------------
@property
def subscriptionInfo(self):
'''gets the property value for subscriptionInfo'''
if self._subscriptionInfo is None:
self.__init()
return self._subscriptionInfo
#----------------------------------------------------------------------
@property
def authorizedCrossOriginDomains(self):
""" gets the authorizedCrossOriginDomains property """
if self._authorizedCrossOriginDomains is None:
self.__init()
return self._authorizedCrossOriginDomains
#----------------------------------------------------------------------
@property
def appInfo(self):
'''gets the property value for appInfo'''
if self._appInfo is None:
self.__init()
return self._appInfo
#----------------------------------------------------------------------
@property
def contacts(self):
'''gets the property value for contacts'''
if self._contacts is None:
self.__init()
return self._contacts
#----------------------------------------------------------------------
@property
def urlKey(self):
'''gets the property value for urlKey'''
if self._urlKey is None:
self.__init()
return self._urlKey
#----------------------------------------------------------------------
@property
def metadataEditable(self):
'''gets the property value for metadataEditable'''
if self._metadataEditable is None:
self.__init()
return self._metadataEditable
#----------------------------------------------------------------------
@property
def portalThumbnail(self):
'''gets the property value for portalThumbnail'''
if self._portalThumbnail is None:
self.__init()
return self._portalThumbnail
#----------------------------------------------------------------------
@property
def metadataFormats(self):
'''gets the property value for metadataFormats'''
if self._metadataFormats is None:
self.__init()
return self._metadataFormats
#----------------------------------------------------------------------
@property
def ipCntryCode(self):
'''gets the property value for ipCntryCode'''
if self._ipCntryCode is None:
self.__init()
return self._ipCntryCode
#----------------------------------------------------------------------
@property
def livingAtlasGroupQuery(self):
'''gets the property value for livingAtlasGroupQuery'''
if self._livingAtlasGroupQuery is None:
self.__init()
return self._livingAtlasGroupQuery
#----------------------------------------------------------------------
@property
def region(self):
'''gets the property value for region'''
if self._region is None:
self.__init()
return self._region
#----------------------------------------------------------------------
@property
def portalId(self):
"""gets the portal Id"""
if self._portalId is None:
self._portalId = self._findPortalId()
return self._portalId
#----------------------------------------------------------------------
def __str__(self):
"""returns class as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""iterates through raw JSON"""
if self._json_dict is None:
self.__init()
for k,v in self._json_dict.items():
yield [k,v]
#----------------------------------------------------------------------
@property
def root(self):
"""returns classes URL"""
return self._url
#----------------------------------------------------------------------
@property
def canSharePublic(self):
'''gets the property value for canSharePublic'''
if self._canSharePublic is None:
self.__init()
return self._canSharePublic
#----------------------------------------------------------------------
@property
def defaultExtent(self):
'''gets the property value for defaultExtent'''
if self._defaultExtent is None:
self.__init()
return self._defaultExtent
#----------------------------------------------------------------------
@property
def supportsHostedServices(self):
'''gets the property value for supportsHostedServices'''
if self._supportsHostedServices is None:
self.__init()
return self._supportsHostedServices
#----------------------------------------------------------------------
@property
def homePageFeaturedContentCount(self):
'''gets the property value for homePageFeaturedContentCount'''
if self._homePageFeaturedContentCount is None:
self.__init()
return self._homePageFeaturedContentCount
#----------------------------------------------------------------------
@property
def supportsOAuth(self):
'''gets the property value for supportsOAuth'''
if self._supportsOAuth is None:
self.__init()
return self._supportsOAuth
#----------------------------------------------------------------------
@property
def portalName(self):
'''gets the property value for portalName'''
if self._portalName is None:
self.__init()
return self._portalName
#----------------------------------------------------------------------
@property
def databaseUsage(self):
'''gets the property value for databaseUsage'''
if self._databaseUsage is None:
self.__init()
return self._databaseUsage
#----------------------------------------------------------------------
@property
def culture(self):
'''gets the property value for culture'''
if self._culture is None:
self.__init()
return self._culture
#----------------------------------------------------------------------
@property
def helpBase(self):
'''gets the property value for helpBase'''
if self._helpBase is None:
self.__init()
return self._helpBase
#----------------------------------------------------------------------
@property
def galleryTemplatesGroupQuery(self):
'''gets the property value for galleryTemplatesGroupQuery'''
if self._galleryTemplatesGroupQuery is None:
self.__init()
return self._galleryTemplatesGroupQuery
#----------------------------------------------------------------------
@property
def commentsEnabled(self):
'''gets the property value for commentsEnabled'''
if self._commentsEnabled is None:
self.__init()
return self._commentsEnabled
#----------------------------------------------------------------------
@property
def databaseQuota(self):
'''gets the property value for databaseQuota'''
if self._databaseQuota is None:
self.__init()
return self._databaseQuota
#----------------------------------------------------------------------
@property
def id(self):
'''gets the property value for id'''
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def canSearchPublic(self):
'''gets the property value for canSearchPublic'''
if self._canSearchPublic is None:
self.__init()
return self._canSearchPublic
#----------------------------------------------------------------------
@property
def customBaseUrl(self):
'''gets the property value for customBaseUrl'''
if self._customBaseUrl is None:
self.__init()
return self._customBaseUrl
#----------------------------------------------------------------------
@property
def allSSL(self):
'''gets the property value for allSSL'''
if self._allSSL is None:
self.__init()
return self._allSSL
#----------------------------------------------------------------------
@property
def httpPort(self):
'''gets the property value for httpPort'''
if self._httpPort is None:
self.__init()
return self._httpPort
#----------------------------------------------------------------------
@property
def featuredGroupsId(self):
'''gets the property value for featuredGroupsId'''
if self._featuredGroupsId is None:
self.__init()
return self._featuredGroupsId
#----------------------------------------------------------------------
@property
def defaultBasemap(self):
'''gets the property value for defaultBasemap'''
if self._defaultBasemap is None:
self.__init()
return self._defaultBasemap
#----------------------------------------------------------------------
@property
def created(self):
'''gets the property value for created'''
if self._created is None:
self.__init()
return self._created
#----------------------------------------------------------------------
@property
def access(self):
'''gets the property value for access'''
if self._access is None:
self.__init()
return self._access
#----------------------------------------------------------------------
@property
def platform(self):
'''gets the property value for platform'''
if self._platform is None:
self.__init()
return self._platform
#----------------------------------------------------------------------
@property
def isPortal(self):
'''gets the property value for isPortal'''
if self._isPortal is None:
self.__init()
return self._isPortal
#----------------------------------------------------------------------
@property
def canSignInArcGIS(self):
'''gets the property value for canSignInArcGIS'''
if self._canSignInArcGIS is None:
self.__init()
return self._canSignInArcGIS
#----------------------------------------------------------------------
@property
def disableSignup(self):
'''gets the property value for disableSignup'''
if self._disableSignup is None:
self.__init()
return self._disableSignup
#----------------------------------------------------------------------
@property
def httpsPort(self):
'''gets the property value for httpsPort'''
if self._httpsPort is None:
self.__init()
return self._httpsPort
#----------------------------------------------------------------------
@property
def units(self):
'''gets the property value for units'''
if self._units is None:
self.__init()
return self._units
#----------------------------------------------------------------------
@property
def backgroundImage(self):
'''gets the property value for backgroundImage'''
if self._backgroundImage is None:
self.__init()
return self._backgroundImage
#----------------------------------------------------------------------
@property
def mfaEnabled(self):
'''gets the property value for mfaEnabled'''
if self._mfaEnabled is None:
self.__init()
return self._mfaEnabled
#----------------------------------------------------------------------
@property
def featuredGroups(self):
'''gets the property value for featuredGroups'''
if self._featuredGroups is None:
self.__init()
return self._featuredGroups
#----------------------------------------------------------------------
@property
def thumbnail(self):
'''gets the property value for thumbnail'''
if self._thumbnail is None:
self.__init()
return self._thumbnail
#----------------------------------------------------------------------
@property
def featuredItemsGroupQuery(self):
'''gets the property value for featuredItemsGroupQuery'''
if self._featuredItemsGroupQuery is None:
self.__init()
return self._featuredItemsGroupQuery
#----------------------------------------------------------------------
@property
def canSignInIDP(self):
'''gets the property value for canSignInIDP'''
if self._canSignInIDP is None:
self.__init()
return self._canSignInIDP
#----------------------------------------------------------------------
@property
def useStandardizedQuery(self):
'''gets the property value for useStandardizedQuery'''
if self._useStandardizedQuery is None:
self.__init()
return self._useStandardizedQuery
#----------------------------------------------------------------------
@property
def rotatorPanels(self):
'''gets the property value for rotatorPanels'''
if self._rotatorPanels is None:
self.__init()
return self._rotatorPanels
#----------------------------------------------------------------------
@property
def description(self):
'''gets the property value for description'''
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def homePageFeaturedContent(self):
'''gets the property value for homePageFeaturedContent'''
if self._homePageFeaturedContent is None:
self.__init()
return self._homePageFeaturedContent
#----------------------------------------------------------------------
@property
def helperServices(self):
'''gets the property value for helperServices'''
if self._helperServices is None:
self.__init()
return self._helperServices
#----------------------------------------------------------------------
@property
def canProvisionDirectPurchase(self):
'''gets the property value for canProvisionDirectPurchase'''
if self._canProvisionDirectPurchase is None:
self.__init()
return self._canProvisionDirectPurchase
#----------------------------------------------------------------------
@property
def canListData(self):
'''gets the property value for canListData'''
if self._canListData is None:
self.__init()
return self._canListData
#----------------------------------------------------------------------
@property
def user(self):
'''gets the property value for user'''
if self._user is None:
self.__init()
return self._user
#----------------------------------------------------------------------
@property
def helpMap(self):
'''gets the property value for helpMap'''
if self._helpMap is None:
self.__init()
return self._helpMap
#----------------------------------------------------------------------
@property
def canListPreProvisionedItems(self):
'''gets the property value for canListPreProvisionedItems'''
if self._canListPreProvisionedItems is None:
self.__init()
return self._canListPreProvisionedItems
#----------------------------------------------------------------------
@property
def colorSetsGroupQuery(self):
'''gets the property value for colorSetsGroupQuery'''
if self._colorSetsGroupQuery is None:
self.__init()
return self._colorSetsGroupQuery
#----------------------------------------------------------------------
@property
def canListApps(self):
'''gets the property value for canListApps'''
if self._canListApps is None:
self.__init()
return self._canListApps
#----------------------------------------------------------------------
@property
def portalProperties(self):
'''gets the property value for portalProperties'''
if self._portalProperties is None:
self.__init()
return self._portalProperties
#----------------------------------------------------------------------
@property
def isWindows(self):
'''gets the property value for isWindows'''
if self._isWindows is None:
self.__init()
return self._isWindows
#----------------------------------------------------------------------
@property
def name(self):
'''gets the property value for name'''
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def supportsSceneServices(self):
'''gets the property value for supportsSceneServices'''
if self._supportsSceneServices is None:
self.__init()
return self._supportsSceneServices
#----------------------------------------------------------------------
@property
def stylesGroupQuery(self):
'''gets the property value for stylesGroupQuery'''
if self._stylesGroupQuery is None:
self.__init()
return self._stylesGroupQuery
#----------------------------------------------------------------------
@property
def samlEnabled(self):
'''gets the property value for samlEnabled'''
if self._samlEnabled is None:
self.__init()
return self._samlEnabled
#----------------------------------------------------------------------
@property
def symbolSetsGroupQuery(self):
'''gets the property value for symbolSetsGroupQuery'''
if self._symbolSetsGroupQuery is None:
self.__init()
return self._symbolSetsGroupQuery
#----------------------------------------------------------------------
@property
def portalLocalHttpPort(self):
'''gets the property value for portalLocalHttpPort'''
if self._portalLocalHttpPort is None:
self.__init()
return self._portalLocalHttpPort
#----------------------------------------------------------------------
@property
def storageQuota(self):
'''gets the property value for storageQuota'''
if self._storageQuota is None:
self.__init()
return self._storageQuota
#----------------------------------------------------------------------
@property
def canShareBingPublic(self):
'''gets the property value for canShareBingPublic'''
if self._canShareBingPublic is None:
self.__init()
return self._canShareBingPublic
#----------------------------------------------------------------------
@property
def maxTokenExpirationMinutes(self):
'''gets the property value for maxTokenExpirationMinutes'''
if self._maxTokenExpirationMinutes is None:
self.__init()
return self._maxTokenExpirationMinutes
#----------------------------------------------------------------------
@property
def layerTemplatesGroupQuery(self):
'''gets the property value for layerTemplatesGroupQuery'''
if self._layerTemplatesGroupQuery is None:
self.__init()
return self._layerTemplatesGroupQuery
#----------------------------------------------------------------------
@property
def staticImagesUrl(self):
'''gets the property value for staticImagesUrl'''
if self._staticImagesUrl is None:
self.__init()
return self._staticImagesUrl
#----------------------------------------------------------------------
@property
def modified(self):
'''gets the property value for modified'''
if self._modified is None:
self.__init()
return self._modified
#----------------------------------------------------------------------
@property
def portalHostname(self):
'''gets the property value for portalHostname'''
if self._portalHostname is None:
self.__init()
return self._portalHostname
#----------------------------------------------------------------------
@property
def showHomePageDescription(self):
'''gets the property value for showHomePageDescription'''
if self._showHomePageDescription is None:
self.__init()
return self._showHomePageDescription
#----------------------------------------------------------------------
@property
def availableCredits(self):
'''gets the property value for availableCredits'''
if self._availableCredits is None:
self.__init()
return self._availableCredits
#----------------------------------------------------------------------
@property
def portalMode(self):
'''gets the property value for portalMode'''
if self._portalMode is None:
self.__init()
return self._portalMode
#----------------------------------------------------------------------
@property
def portalLocalHttpsPort(self):
'''gets the property value for portalLocalHttpsPort'''
if self._portalLocalHttpsPort is None:
self.__init()
return self._portalLocalHttpsPort
#----------------------------------------------------------------------
@property
def hostedServerHostedFolder(self):
'''gets the property value for hostedServerHostedFolder'''
if self._hostedServerHostedFolder is None:
self.__init()
return self._hostedServerHostedFolder
#----------------------------------------------------------------------
@property
def storageUsage(self):
'''gets the property value for storageUsage'''
if self._storageUsage is None:
self.__init()
return self._storageUsage
#----------------------------------------------------------------------
@property
def templatesGroupQuery(self):
'''gets the property value for templatesGroupQuery'''
if self._templatesGroupQuery is None:
self.__init()
return self._templatesGroupQuery
#----------------------------------------------------------------------
@property
def portalLocalHostname(self):
'''gets the property value for portalLocalHostname'''
if self._portalLocalHostname is None:
self.__init()
return self._portalLocalHostname
#----------------------------------------------------------------------
@property
def basemapGalleryGroupQuery(self):
'''gets the property value for basemapGalleryGroupQuery'''
if self._basemapGalleryGroupQuery is None:
self.__init()
return self._basemapGalleryGroupQuery
#----------------------------------------------------------------------
@property
def mfaAdmins(self):
'''gets the property value for mfaAdmins'''
if self._mfaAdmins is None:
self.__init()
return self._mfaAdmins
#----------------------------------------------------------------------
@property
def creditAssignments(self):
'''gets the property value for creditAssignments'''
if self._creditAssignments is None:
self.__init()
return self._creditAssignments
#----------------------------------------------------------------------
@property
def urls(self):
"""gets the urls for a portal"""
url = "%s/urls" % self.root
params = {"f":"json"}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def featureServers(self):
"""gets the hosting feature AGS Server"""
services = []
if self.urls == {}:
return {}
urls = self.urls
if 'https' in urls['urls']['features']:
res = urls['urls']['features']['https']
else:
res = urls['urls']['features']['http']
for https in res:
if self.isPortal:
url = "%s/admin" % https
services.append(AGSAdministration(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
else:
url = "https://%s/%s/ArcGIS/admin" % (https, self.portalId)
services.append(Services(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
return services
#----------------------------------------------------------------------
@property
def tileServers(self):
"""
Returns the objects to manage site's tile hosted services/servers. It returns
AGSAdministration object if the site is Portal and it returns a
hostedservice.Services object if it is AGOL.
"""
services = []
ishttps = False
if self.urls == {}:
return {}
urls = self.urls["urls"]['tiles']
if 'https' in urls:
res = urls['https']
ishttps = True
else:
res = urls['http']
for https in res:
if ishttps:
scheme = "https"
else:
scheme = "http"
if self.isPortal == False:
url = "%s://%s/tiles/%s/arcgis/admin/services" % (scheme, https, self.portalId)
services.append(Services(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
else:
url = "%s/admin" % https
servers = self.servers
for server in servers.servers:
url = server.adminUrl
sh = PortalServerSecurityHandler(tokenHandler=self._securityHandler,
serverUrl=url,
referer=server.name.split(":")[0]
)
services.append(
AGSAdministration(url=url,
securityHandler=sh,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
)
return services
#----------------------------------------------------------------------
@property
def purchases(self):
"""gets the portal's purchases"""
url = "%s/purchases" % self.root
params = {"f":"json"}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def customers(self):
"""gets the site's customers"""
url = "%s/customers" % self.root
params = {"f":"json"}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def exportCustomers(self, outPath):
"""exports customer list to a csv file
Input:
outPath - save location of the customer list
"""
url = "%s/customers/export" % self.root
params = {"f":"csv"}
dirPath = None
fileName = None
if outPath is not None:
dirPath = os.path.dirname(outPath)
fileName = os.path.basename(outPath)
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler, proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
out_folder=dirPath,
file_name=fileName)
#----------------------------------------------------------------------
def update(self,
updatePortalParameters,
clearEmptyFields=False):
"""
The Update operation allows administrators only to update the
organization information such as name, description, thumbnail, and
featured groups.
Inputs:
updatePortalParamters - parameter.PortalParameters object that holds information to update
clearEmptyFields - boolean that clears all whitespace from fields
"""
url = self.root + "/update"
params = {
"f" : "json",
"clearEmptyFields" : clearEmptyFields
}
if isinstance(updatePortalParameters, parameters.PortalParameters):
params.update(updatePortalParameters.value)
elif isinstance(updatePortalParameters, dict):
for k,v in updatePortalParameters.items():
params[k] = v
else:
raise AttributeError("updatePortalParameters must be of type parameter.PortalParameters")
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateUserRole(self,
user,
role):
"""
The Update User Role operation allows the administrator of an org
anization to update the role of a user within a portal.
Inputs:
role - Sets the user's role.
Roles are the following:
org_user - Ability to add items, create groups, and
share in the organization.
org_publisher - Same privileges as org_user plus the
ability to publish hosted services from ArcGIS for
Desktop and ArcGIS Online.
org_admin - In addition to add, create, share, and publish
capabilities, an org_admin administers and customizes
the organization.
Example: role=org_publisher
user - The username whose role you want to change.
"""
url = self._url + "/updateuserrole"
params = {
"f" : "json",
"user" : user,
"role" : role
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def removeUser(self, users):
"""
The Remove Users operation allows the administrator to remove users
from a portal. Before the administrator can remove the user, all of
the user's content and groups must be reassigned or deleted.
Inputs:
users - Comma-separated list of usernames to remove.
"""
url = self._url + "/removeusers"
params = {
"f" : "json",
"users" : users
}
return self._post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def isServiceNameAvailable(self,
name,
serviceType):
"""
Checks to see if a given service name and type are available for
publishing a new service. true indicates that the name and type is
not found in the organization's services and is available for
publishing. false means the requested name and type are not available.
Inputs:
name - requested name of service
serviceType - type of service allowed values: Feature Service or
Map Service
"""
_allowedTypes = ['Feature Service', "Map Service"]
url = self._url + "/isServiceNameAvailable"
params = {
"f" : "json",
"name" : name,
"type" : serviceType
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def servers(self):
"""gets the federated or registered servers for Portal"""
url = "%s/servers" % self.root
return Servers(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def assignUserCredits(self, usernames, credits):
"""
assigns credit to a user.
Inputs:
usernames - list of users
credits - number of credits to assign to the users
Ouput:
dictionary
"""
userAssignments = []
for name in usernames:
userAssignments.append(
{
"username" : name,
"credits" : credits
}
)
params = {
"userAssignments" : userAssignments,
"f" : "json"
}
url = self.root + "/assignUserCredits"
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def users(self,
start=1,
num=10,
sortField="fullName",
sortOrder="asc",
role=None):
"""
Lists all the members of the organization. The start and num paging
parameters are supported.
Inputs:
start - The number of the first entry in the result set response.
The index number is 1-based.
The default value of start is 1 (that is, the first
search result).
The start parameter, along with the num parameter, can
be used to paginate the search results.
num - The maximum number of results to be included in the result
set response.
The default value is 10, and the maximum allowed value is
100.The start parameter, along with the num parameter, can
be used to paginate the search results.
sortField - field to sort on
sortOrder - asc or desc on the sortField
role - name of the role or role id to search
Output:
list of User classes
"""
users = []
url = self._url + "/users"
params = {
"f" : "json",
"start" : start,
"num" : num
}
if not role is None:
params['role'] = role
if not sortField is None:
params['sortField'] = sortField
if not sortOrder is None:
params['sortOrder'] = sortOrder
from ._community import Community
res = self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
if "users" in res:
if len(res['users']) > 0:
parsed = urlparse.urlparse(self._url)
if parsed.netloc.lower().find('arcgis.com') == -1:
cURL = "%s://%s/%s/sharing/rest/community" % (parsed.scheme,
parsed.netloc,
parsed.path[1:].split('/')[0])
else:
cURL = "%s://%s/sharing/rest/community" % (parsed.scheme,
parsed.netloc)
com = Community(url=cURL,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
for r in res['users']:
users.append(
com.users.user(r["username"])
)
res['users'] = users
return res
#----------------------------------------------------------------------
def createRole(self, name, description):
"""
creates a role for a portal/agol site.
Inputs:
names - name of the role
description - brief text string stating the nature of this
role.
Ouput:
dictionary
"""
params = {
"name" : name,
"description" : description,
"f" : "json"
}
url = self.root + "/createRole"
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def roles(self):
"""gets the roles class that allows admins to manage custom roles
on portal"""
return Roles(url="%s/roles" % self.root,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def cost(self,
tileStorage=0,
fileStorage=0,
featureStorage=0,
generatedTileCount=0,
loadedTileCount=0,
enrichVariableCount=0,
enrichReportCount=0,
serviceAreaCount=0,
geocodeCount=0):
"""
returns the cost values for a given portal
Inputs:
tileStorage - int - numbe of tiles to store in MBs
fileStorage - int - size of file to store in MBs
featureStorage - int - size in MBs
generateTileCount - int - number of tiles to genearte on site
loadedTileCount -int- cost to host a certian number of tiles
enrichVariableCount - int - cost to enrich data
enrichReportCount - int - cost to generate an enrichment report
serviceAreaCount - int - cost to generate x number of service
areas
geocodeCount - int - cost to generate x number of addresses
"""
params = {
"f" : "json",
"tileStorage": tileStorage,
"fileStorage": fileStorage,
"featureStorage": featureStorage,
"generatedTileCount": generatedTileCount,
"loadedTileCount":loadedTileCount,
"enrichVariableCount": enrichVariableCount,
"enrichReportCount" : enrichReportCount,
"serviceAreaCount" : serviceAreaCount,
"geocodeCount" : geocodeCount
}
url = self._url + "/cost"
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def resources(self,
start=1,
num=10):
"""
Resources lists all file resources for the organization. The start
and num paging parameters are supported.
Inputs:
start - the number of the first entry in the result set response
The index number is 1-based and the default is 1
num - the maximum number of results to be returned as a whole #
"""
url = self._url + "/resources"
params = {
"f" : "json",
"start" : start,
"num" : num
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def addResource(self, key, filePath, text):
"""
The add resource operation allows the administrator to add a file
resource, for example, the organization's logo or custom banner.
The resource can be used by any member of the organization. File
resources use storage space from your quota and are scanned for
viruses.
Inputs:
key - The name the resource should be stored under.
filePath - path of file to upload
text - Some text to be written (for example, JSON or JavaScript)
directly to the resource from a web client.
"""
url = self.root + "/addresource"
params = {
"f": "json",
"token" : self._securityHandler.token,
"key" : key,
"text" : text
}
files = {}
files['file'] = filePath
res = self._post(url=url,
param_dict=params,
files=files,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return res
#----------------------------------------------------------------------
def removeResource(self, key):
"""
The Remove Resource operation allows the administrator to remove a
file resource.
Input:
key - name of resource to delete
"""
url = self._url + "/removeresource"
params = {
"key" : key,
"f" : "json"
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def securityPolicy(self):
"""gets the object to manage the portal's security policy"""
url = "%s/securityPolicy" % self.root
params = {'f': 'json'}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def resetSecurityPolicy(self):
"""resets the security policy to default install"""
params = {"f" : "json"}
url = "%s/securityPolicy/reset" % self.root
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateSecurityPolicy(self,
minLength=8,
minUpper=None,
minLower=None,
minLetter=None,
minDigit=None,
minOther=None,
expirationInDays=None,
historySize=None):
"""updates the Portals security policy"""
params = {
"f" : "json",
"minLength" : minLength,
"minUpper": minUpper,
"minLower": minLower,
"minLetter": minLetter,
"minDigit": minDigit,
"minOther": minOther,
"expirationInDays" : expirationInDays,
"historySize": historySize
}
url = "%s/securityPolicy/update" % self.root
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def portalAdmin(self):
"""gets a reference to a portal administration class"""
from ..manageportal import PortalAdministration
return PortalAdministration(admin_url="https://%s/portaladmin" % self.portalHostname,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initalize=False)
#----------------------------------------------------------------------
def addUser(self, invitationList,
subject, html):
"""
adds a user without sending an invitation email
Inputs:
invitationList - InvitationList class used to add users without
sending an email
subject - email subject
html - email message sent to users in invitation list object
"""
url = self._url + "/invite"
params = {"f" : "json"}
if isinstance(invitationList, parameters.InvitationList):
params['invitationList'] = invitationList.value()
params['html'] = html
params['subject'] = subject
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def inviteByEmail(self,
emails,
subject,
text,
html,
role="org_user",
mustApprove=True,
expiration=1440):
"""Invites a user or users to a site.
Inputs:
emails - comma seperated list of emails
subject - title of email
text - email text
html - email text in html
role - site role (can't be administrator)
mustApprove - verifies if user that is join must be approved by
an administrator
expiration - time in seconds. Default is 1 day 1440
"""
url = self.root + "/inviteByEmail"
params = {
"f" : "json",
"emails": emails,
"subject": subject,
"text": text,
"html" : html,
"role" : role,
"mustApprove": mustApprove,
"expiration" : expiration
}
return self._post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def invitations(self):
"""gets all the invitations to the current portal"""
params = {"f": "json"}
url = "%s/invitations" % self.root
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def usage(self, startTime, endTime, vars=None, period=None,
groupby=None, name=None, stype=None, etype=None,
appId=None, deviceId=None, username=None, appOrgId=None,
userOrgId=None, hostOrgId=None):
"""
returns the usage statistics value
"""
url = self.root + "/usage"
startTime = str(int(local_time_to_online(dt=startTime)))
endTime = str(int(local_time_to_online(dt=endTime)))
params = {
'f' : 'json',
'startTime' : startTime,
'endTime' : endTime,
'vars' : vars,
'period' : period,
'groupby' : groupby,
'name' : name,
'stype' : stype,
'etype' : etype,
'appId' : appId,
'deviceId' : deviceId,
'username' : username,
'appOrgId' : appOrgId,
'userOrgId' : userOrgId,
'hostOrgId' : hostOrgId,
}
params = {key:item for key,item in params.items() if item is not None}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def IDP(self):
"""gets the IDP information for the portal/agol"""
url = "%s/idp" % self.root
params = {"f": "json"}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
########################################################################
class Servers(BaseAGOLClass):
"""This resource lists the ArcGIS Server sites that have been federated
with the portal.This resource is not applicable to ArcGIS Online; it is
only applicable to Portal for ArcGIS.
"""
_servers = None
_surl = None
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
########################################################################
class Server(BaseAGOLClass):
_surl = None
_url = None
_id = None
_name = None
_adminUrl = None
_url = None
_isHosted = None
_serverKey = None
_serverType = None
_surl = None
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
"""represents a single server instance registers with portal"""
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler,
proxy_url=None,
proxy_port=None,
initalize=False):
"""Constructor"""
self._surl = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initalize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
"""loads the property data into the class"""
params = {
"f" : "pjson"
}
json_dict = self._get(url=self._surl,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print( k, " - attribute not implemented in Servers.Server class.")
#----------------------------------------------------------------------
def __str__(self):
"""returns class as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""iterates through raw JSON"""
if self._json_dict is None:
self.__init()
for k,v in self._json_dict.items():
yield [k,v]
#----------------------------------------------------------------------
@property
def root(self):
"""returns classes URL"""
return self._url
#----------------------------------------------------------------------
@property
def id(self):
"""gets the server id"""
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def name(self):
"""gets the server name"""
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def adminUrl(self):
"""gets the adminURL for the server"""
if self._adminUrl is None:
self.__init()
return self._adminUrl
#----------------------------------------------------------------------
@property
def url(self):
"""gets the url for the server"""
if self._url is None:
self.__init()
return self._url
#----------------------------------------------------------------------
@property
def isHosted(self):
"""gets the isHosted value"""
if self._isHosted is None:
self.__init()
return self._isHosted
#----------------------------------------------------------------------
@property
def serverKey(self):
"""gets the server key"""
if self._serverKey is None:
self.__init()
return self._serverKey
#----------------------------------------------------------------------
@property
def serverType(self):
"""gets the server type"""
if self._serverType is None:
self.__init()
return self._serverType
#----------------------------------------------------------------------
def unregister(self):
"""
This operation unregisters an ArcGIS Server site from the portal.
The server is no longer federated with the portal after this
operation completes.
After this operation completes, you must invoke the Update Security
Configuration operation on your ArcGIS Server site to specify how
you want the server to work with users and roles.
Inputs:
serverId - unique identifier of the server
"""
url = self._url + "/unregister"
params = {
"f" : "json"
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def update(self,
name,
url,
adminUrl,
isHosted,
serverType):
"""
This operation updates the properties of an ArcGIS Server site that
has been registered, or federated, with the portal. For example,
you can use this operation to change the federated site that acts
as the portal's hosting server.
Inputs:
name - The fully qualified name of the machine hosting the
ArcGIS Server site, followed by the port.
url - The externally visible URL of the ArcGIS Server site,
using the fully qualified name of the machine.
adminUrl - The administrative URL of the ArcGIS Server site,
using the fully qualified name of the machine.
isHosted - A Boolean property denoting whether the ArcGIS Server
site will be allowed to host services for the portal
(true) or will not be allowed to host services
(false).
serverType - The type of server being registered with the portal
For example: ArcGIS.
"""
url = self._url + "/update"
params = {
"name" : name,
"url" : url,
"adminUrl" : adminUrl,
"isHosted" : isHosted,
"serverType" : serverType
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler,
proxy_url=None,
proxy_port=None,
initalize=False):
"""Constructor"""
if url.lower().endswith('/servers') == False:
url = url + "/servers"
self._surl = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initalize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
"""loads the property data into the class"""
params = {
"f" : "json"
}
json_dict = self._get(url=self._surl,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print( k, " - attribute not implemented in Servers class.")
#----------------------------------------------------------------------
def __str__(self):
"""returns class as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""iterates through raw JSON"""
if self._json_dict is None:
self.__init()
for k,v in self._json_dict.items():
yield [k,v]
#----------------------------------------------------------------------
@property
def root(self):
"""returns classes URL"""
return self._surl
#----------------------------------------------------------------------
def register(self,
name,
url,
adminUrl,
isHosted,
serverType):
"""
You can optionally register (or "federate") an ArcGIS Server site
with your Portal for ArcGIS deployment. This provides the
following benefits:
The server and the portal share the same user store (that of
the portal). This results in a convenient single sign-on
experience.
Any items you publish to the server are automatically shared
on the portal.
You can optionally allow the server to host tiled map services
and feature services published by portal users.
After you register a server with your portal, you must invoke the
Update Security Configuration operation on the ArcGIS Server site
and configure the site's security store to take advantage of users
and roles from the portal.
This operation is only applicable to Portal for ArcGIS; it is not
supported with ArcGIS Online.
Inputs:
name - The fully qualified name of the machine hosting the
ArcGIS Server site, followed by the port.
url - The externally visible URL of the ArcGIS Server site,
using the fully qualified name of the machine.
adminUrl - The administrative URL of your ArcGIS Server site,
using the fully qualified name of the machine.
isHosted - A Boolean property denoting whether the ArcGIS Server
site will be allowed to host services for the portal
(true) or not be allowed to host services (false).
serverType - The type of server being registered with the portal
For example: ArcGIS.
"""
url = self.root + "/register"
params = {
"f" : "json",
"url" : url,
"adminUrl" : adminUrl,
"isHosted" : isHosted,
"name" : name,
"serverType" : serverType
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def servers(self):
"""gets all the server resources"""
self.__init()
items = []
for k,v in self._json_dict.items():
if k == "servers":
for s in v:
if 'id' in s:
url = "%s/%s" % (self.root, s['id'])
items.append(
self.Server(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
del k,v
return items
########################################################################
class Roles(BaseAGOLClass):
"""Handles the searching, creation, deletion and updating of roles on
AGOL or Portal.
"""
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if url.find('/roles') < 0:
url = url + "/roles"
self._url = url
self._securityHandler = securityHandler
self._proxy_url = proxy_url
self._proxy_port = proxy_port
#----------------------------------------------------------------------
def __str__(self):
"""returns the roles as a string"""
nextCount = 0
start = 0
num = 100
results = []
while nextCount != -1:
res = self.roles(start=start + nextCount, num=num)
results = results + res['roles']
nextCount = int(res['nextStart'])
return json.dumps(results)
#----------------------------------------------------------------------
def __iter__(self):
"""iterator to loop through role entries"""
nextCount = 0
start = 0
num = 100
results = []
while nextCount != -1:
res = self.roles(start=start + nextCount, num=num)
for r in res['roles']:
yield r
nextCount = int(res['nextStart'])
#----------------------------------------------------------------------
def roles(self, start, num):
"""
lists the custom roles on the AGOL/Portal site
Input:
start - default 1
num - 100 - number of roles to return
"""
url = self._url
params = {
"f" : "json",
"start" : start,
"num" : num
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def deleteRole(self, roleID):
"""
deletes a role by ID
"""
url = self._url + "/%s/delete" % roleID
params = {
"f" : "json"
}
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateRole(self, roleID, name, description):
"""allows for the role name or description to be modified"""
params = {
"name" : name,
"description" : description,
"f" : "json"
}
url = self._url + "/%s/update"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def info(self, roleID):
""""""
url = self._url + "/%s" % roleID
params = {"f" : "json"}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def findRoleID(self, name):
"""searches the roles by name and returns the role's ID"""
for r in self:
if r['name'].lower() == name.lower():
return r['id']
del r
return None
#----------------------------------------------------------------------
def privileges(self, roleID):
"""returns the assigned priveleges for a given custom role"""
url = self._url + "/%s/privileges" % roleID
params = {"f" : "json"}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def setPrivileges(self, roleID, privileges):
"""
assigns a role a set of actions that the role can perform on the
AGOL or Portal site.
Input:
roleID - unique id of the role
privileges - list of privileges to assign to role.
"""
params = {
"f" : "json",
"privileges" : {"privileges": privileges},
"id": roleID
}
url = self._url + "/%s/setPrivileges" % roleID
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
| apache-2.0 | 4,190,805,069,658,481,000 | 37.992898 | 101 | 0.445108 | false |
avanc/mopidy-usbplaylist | mopidy_usbplaylist/playlists.py | 1 | 1414 | from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from mopidy import backend
from mopidy.models import Playlist
from mopidy.models import Track
import os
import fnmatch
import glob
def find_files(path):
matches = glob.glob(os.path.join(path,'*.mp3'))
return matches
def find_files2(path):
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.mp3'):
matches.append(os.path.join(root, filename))
return matches
class USBPlaylistProvider(backend.PlaylistsProvider):
def create(self, name):
pass
def delete(self, uri):
pass
def lookup(self, uri):
path=self.backend.config['usbplaylist']['path']
for playlist in self.playlists:
if playlist.uri == uri:
files=find_files2(path)
tracks =[]
for file in files:
tracks.append(Track(uri='file:'+file, name="USB-File"))
return playlist.copy(tracks=tracks)
def refresh(self):
playlists=[]
uri="usb://playall"
playlist = Playlist(uri=uri, name="USB")
playlists.append(playlist)
self.playlists = playlists
backend.BackendListener.send('playlists_loaded')
def save(self, playlist):
pass
| apache-2.0 | -1,820,384,431,323,083,800 | 24.25 | 75 | 0.603253 | false |
TetraAsh/baruwa2 | baruwa/forms/accounts.py | 1 | 6737 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <andrew@topdog.za.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""accounts forms"""
from wtforms import PasswordField, validators, DecimalField, RadioField
from wtforms import BooleanField, TextField, SelectField
from wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField
from pylons.i18n.translation import lazy_ugettext as _
from sqlalchemy.orm.exc import NoResultFound
from baruwa.forms import Form
from baruwa.model.accounts import User
from baruwa.model.domains import Domain
from baruwa.model.meta import Session
from baruwa.forms.organizations import check_pw_strength
from baruwa.forms import TIMEZONE_TUPLES, REQ_MSG, EMAIL_MSG
from baruwa.forms.messages import MultiCheckboxField
ACCOUNT_TYPES = (
('3', _('User')),
('2', _('Domain admin')),
('1', _('Administrator')),
)
def check_password(form, field):
"check password strength"
check_pw_strength(field.data)
def check_domain(form, field):
"check domain"
domain = field.data.split('@')[1]
try:
Session.query(Domain).filter(Domain.name == domain).one()
except NoResultFound:
raise validators.ValidationError(
_(u'The domain: %(dom)s is not local')
% dict(dom=domain)
)
def check_account(form, field):
"check account"
if field.data == 3 and not form.domains.data:
raise validators.ValidationError(
_(u'Please select atleast one domain')
)
def can_reset(form, field):
"check account is legible to reset"
try:
user = Session.query(User)\
.filter(User.email == field.data)\
.one()
if user.account_type != 3:
raise validators.ValidationError(
_("Admin accounts cannot be reset via the web"))
except NoResultFound:
raise validators.ValidationError(_("Account not found"))
class AddUserForm(Form):
"""Add user"""
username = TextField(_('Username'),
[validators.Required(message=REQ_MSG),
validators.Length(min=4, max=254)])
firstname = TextField(_('First name'),
[validators.Length(max=254)])
lastname = TextField(_('Last name'),
[validators.Length(max=254)])
password1 = PasswordField(_('New Password'), [check_password,
validators.Required(message=REQ_MSG),
validators.EqualTo('password2',
message=_('Passwords must match'))])
password2 = PasswordField(_('Retype Password'),
[validators.Required(message=REQ_MSG)])
email = TextField(_('Email address'),
[validators.Required(message=REQ_MSG),
validators.Email(message=EMAIL_MSG)])
timezone = SelectField(_('Timezone'), choices=TIMEZONE_TUPLES)
account_type = SelectField(_('Account type'),
choices=list(ACCOUNT_TYPES))
domains = QuerySelectMultipleField(_('Domains'),
get_label='name',
allow_blank=True)
active = BooleanField(_('Enabled'))
send_report = BooleanField(_('Send reports'))
spam_checks = BooleanField(_('Enable spam checks'), default=True)
low_score = DecimalField(_('Probable spam score'), places=1, default=0)
high_score = DecimalField(_('Definite spam score'), places=1, default=0)
def validate_domains(form, field):
if int(form.account_type.data) == 3 and not field.data:
raise validators.ValidationError(
_(u'Please select atleast one domain'))
class EditUserForm(Form):
"""Edit user"""
username = TextField(_('Username'), [validators.Required(message=REQ_MSG),
validators.Length(min=4, max=254)])
firstname = TextField(_('First name'), [validators.Length(max=254)])
lastname = TextField(_('Last name'), [validators.Length(max=254)])
email = TextField(_('Email address'),
[validators.Required(message=REQ_MSG)])
timezone = SelectField(_('Timezone'), choices=TIMEZONE_TUPLES)
domains = QuerySelectMultipleField(_('Domains'), get_label='name',
allow_blank=False)
active = BooleanField(_('Enabled'))
send_report = BooleanField(_('Send reports'))
spam_checks = BooleanField(_('Enable spam checks'))
low_score = DecimalField(_('Spam low score'), places=1)
high_score = DecimalField(_('Spam high score'), places=1)
class BulkDelUsers(Form):
"""Bulk account delete form"""
accountid = MultiCheckboxField('')
whatdo = RadioField('', choices=[('delete', _('delete'),),
('disable', _('disable'),),
('enable', _('enable'),),])
class AddressForm(Form):
"""Add alias address"""
address = TextField(_('Email Address'),
[validators.Required(message=REQ_MSG),
validators.Email(message=EMAIL_MSG), check_domain])
enabled = BooleanField(_('Enabled'))
class ChangePasswordForm(Form):
"""Admin change user password"""
password1 = PasswordField(_('New Password'),
[check_password, validators.Required(message=REQ_MSG),
validators.EqualTo('password2',
message=_('Passwords must match'))])
password2 = PasswordField(_('Retype Password'),
[validators.Required(message=REQ_MSG)])
class UserPasswordForm(ChangePasswordForm):
"""User password change"""
password3 = PasswordField(_('Old Password'),
[validators.Required(message=REQ_MSG)])
class ResetPwForm(Form):
"""User reset password form"""
email = TextField(_('Email Address'),
[validators.Required(message=REQ_MSG),
validators.Email(message=EMAIL_MSG),
can_reset])
| gpl-3.0 | 4,717,546,960,922,763,000 | 38.629412 | 78 | 0.614962 | false |
Schille/weimar-graphstore | weimar.py | 1 | 1485 | '''
Created on Mar 17, 2014
@author: mschilonka
'''
import argparse, sys
from remote import server as Server
from remote import worker as Worker
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--worker", help="Starts a weimar worker instance.", action="store_true")
parser.add_argument("-t", "--threads",type=int, dest='threads', help="The number of threads running in one a worker (Default=3).")
parser.add_argument("-s", "--server", help="Starts a weimar graph server.", action="store_true")
parser.add_argument("-i", "--hyperdex-ip",type=str ,dest='hyperdex_ip', help='The HyperDex coordinator IP address. Must be specified if a server is started.')
parser.add_argument("-p", "--hyperdex-port",type=int ,dest='hyperdex_port', help="The HyperDex coordinator port number. Must be specified if a server is started.")
args = parser.parse_args()
if args.worker:
if(args.threads is None):
args.threads = 3
Worker.start_worker(args.threads)
elif args.server:
if(args.hyperdex_ip is None or args.hyperdex_port is None):
print('When starting a Weimar server, please specify the HyperDex\'s coordinators ip and port.')
parser.print_help()
sys.exit(1)
if(args.threads is not None):
print('--threads only refers to a worker process and will be omitted.')
Server.start_server(args.hyperdex_ip, args.hyperdex_port) | mit | 6,257,446,607,517,198,000 | 46.935484 | 167 | 0.665993 | false |
yunify/qingcloud-cli | qingcloud/cli/iaas_client/actions/notification/describe_notification_items.py | 1 | 2183 | # =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.iaas_client.actions.base import BaseAction
class DescribeNotificationItemsAction(BaseAction):
action = 'DescribeNotificationItems'
command = 'describe-notification-items'
usage = '%(prog)s [-i --notification_items...] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-i', '--notification-items', dest='notification_items',
action='store', type=str, default=None,
help='An array including IDs of notification items.')
parser.add_argument('-l', '--notification-list', dest='notification_list',
action='store', type=str, default=None,
help='The ID of notification list.')
parser.add_argument('-t', '--notification-item-type', dest='notification_item_type',
action='store', type=str, default=None,
help='The type of notification item, including email, phone and webhook.')
@classmethod
def build_directive(cls, options):
directive = {
"notification_items": options.notification_items,
"notification_list": options.notification_list,
"notification_item_type": options.notification_item_type
}
return directive
| apache-2.0 | 3,015,553,966,601,937,400 | 45.446809 | 102 | 0.584059 | false |
room77/py77 | pylib/util/git_util.py | 1 | 7051 | #!/usr/bin/env python
"""
utility file for various git functions
"""
__author__ = 'edelman@room77.com (Nicholas Edelman)'
__copyright__ = 'Copyright 2013 Room77, Inc.'
import os
import subprocess
from pylib.base.exec_utils import ExecUtils
from pylib.base.term_color import TermColor
class Error(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return "'%s'" % self.value
class EmptyHotfixError(Exception):
"""this exception is raised when a hotfix is applied twice"""
def __init__(self, value):
self.value = value
def __str__(self):
return "'%s'" % self.value
class GitUtil(object):
@classmethod
def apply_hotfix(cls, branch, commit_hash=""):
"""applies a hotfix to a specific branch
Args:
branch (string) - the branch to apply the hotfix
hash (string) - the commit hash to use
Raises:
EmptyHotfixError - raised when the hotfix is empty
Error - critical error such as conflict stopped with
hotfix from being applied
"""
print("moving to branch %s" % TermColor.ColorStr(branch, 'GREEN'))
# get onto the appropriate branch
cls.checkout_branch(branch)
# try to cherry-pick
print(TermColor.ColorStr("Applying hotfix to branch: %s" % branch,
'GREEN'))
ret = ExecUtils.RunCmd('git cherry-pick %s' % commit_hash)[0]
if not ret == 0:
r = ExecUtils.RunCmd('git diff --name-only')
if r[0]:
raise Error(TermColor.ColorStr('error doing a git diff', 'RED'))
files = r[1]
if not files:
raise EmptyHotfixError('hotfix is empty. likely already applied')
# not an error if empty
raise Error(TermColor.ColorStr(
('Hotfix apply failed at step cherry pick on branch %s.\n'
'You NEED to fix this NOW! Go to %s and fix the issue! '
'Impacted files: %s') % (
cls.get_current_branch(), os.getcwd(), files), 'RED'))
# push cherry-pick to remote
ret = ExecUtils.RunCmd('git push origin %s' % branch)[0]
if not ret == 0:
raise Error(TermColor.ColorStr(
'Please manually resolve your merge conflicts,' + \
'then commit, and finally run hotfix selecting the ' + \
'branches that have not yet received the commit', 'RED'))
print(TermColor.ColorStr('Applied hotfix to %s' % branch, 'GREEN'))
print(TermColor.ColorStr('On branch %s' % branch, 'GREEN'))
@classmethod
def checkout_branch(cls, branch):
"""Checks out the specified branch with the latest code
Args:
branch (string) - the branch name
"""
# fetches the latest code
ret = ExecUtils.RunCmd('git fetch origin')[0]
if not ret == 0:
raise Error(TermColor.ColorStr('error during git fetch origin!', 'RED'))
#subprocess.check_call(
# 'git checkout -b %s --track origin/%s 2>/dev/null' % \
# (branch, branch),
# shell=True)
ret = ExecUtils.RunCmd('git checkout -B %s --track origin/%s' % (
branch, branch))[0]
if not ret == 0:
raise Error(TermColor.ColorStr(
'error checking out branch %s' % branch, 'RED'))
@classmethod
def commit_push_hotfix(cls, files, msg, branch=''):
"""Commits/pushes the set of files to the CURRENT branch
AND if a branch param is specified, hotfixes to the specified branch
with this same commit
Args:
files (list) - the files to commit
msg (string) - the commit message
branch (string) - the name of the additional branch to hotfix if desired
"""
# commit/push the specified files
cls.commit_push(files, msg)
# find the SHA1 of the latest commit
commit_hash = cls.get_latest_commit()
# save the current branch
current_branch = cls.get_current_branch()
# hotfix to branch if not already on the branch
if branch and not current_branch == branch:
cls.apply_hotfix(branch, commit_hash)
# get back on current branch
cls.checkout_branch(current_branch)
@classmethod
def commit_push(cls, files, msg):
"""Commits to the current branch AND pushes to remote
Args:
files (list) - list of files to commit
msg (string) - the commit message
"""
ret = ExecUtils.RunCmd('git commit %s -m "%s"' % (' '.join(files), msg))[0]
if not ret == 0:
raise Error(TermColor.ColorStr(
'error committing these files: %s' % ' '.join(files), 'RED'))
ret = ExecUtils.RunCmd('git pull && git push')[0]
if not ret == 0:
raise Error(TermColor.ColorStr(
'Please manually resolve any conflicts preventing git push of ' + \
'the commit to remote', 'RED'))
@classmethod
def create_branch(cls, name):
"""Create and checkout branch and push to origin for tracking. Simply
checks out if the branch already exists
Args:
name (string) - the name of the branch to create
"""
# only create a new branch if did not exist before
params = ''
# check if the branch already exists
ret = subprocess.call(
'git show-ref --verify refs/heads/%s' % name, shell=True)
if ret:
params = '-b'
# checkout and/or create the branch
subprocess.check_call('git checkout %s %s' % (params, name), shell=True)
# push to remote for tracking
subprocess.check_call('git push -u origin %s' % name, shell=True)
@classmethod
def get_current_branch(cls):
"""Returns the name of the current branch"""
cmd = 'git rev-parse --abbrev-ref HEAD'
r = ExecUtils.RunCmd(cmd)
if r[0]:
raise Error(TermColor.ColorStr('error executing cmd %s' % cmd, 'RED'))
return r[1].strip()
@classmethod
def get_latest_commit(cls):
"""Returns the latest commit hash"""
commit_hash = subprocess.check_output('git log -1 --pretty=format:%H',
shell=True)
if not commit_hash:
raise Error(TermColor.ColorStr(
'unable to find the latest commit hash', 'RED'))
return commit_hash
@classmethod
def get_latest_release_branch(cls):
"""Returns the name of the latest release branch"""
return subprocess.check_output("git branch -r | grep release- | sed -e 's/^[ \t]*//' | sed 's/\* //' | sed 's/origin\///' | sort -r | head -n1", shell=True).strip()
@classmethod
def repo_root(cls):
"""Returns the root of the repository"""
return subprocess.check_output('git rev-parse --show-toplevel',
shell=True).strip()
@classmethod
def update_submodules(cls):
"""Does a git pull and then update the submodules to the latest version
AND finally ensure the submodule is on master
@warning if you run this from a module run that does a os.chdir, this
os.chdir will NOT persist here
"""
if ExecUtils.RunCmd('git pull')[0]:
raise Error(TermColor.ColorStr(
'unable to git pull as part of submodule update', 'RED'))
if ExecUtils.RunCmd('git submodule init && git submodule update')[0]:
raise Error(TermColor.ColorStr(
'git submodule update failed!', 'RED'))
| mit | 3,375,290,544,081,113,600 | 35.158974 | 168 | 0.636505 | false |
llvm/llvm-lnt | lnt/server/db/migrations/upgrade_10_to_11.py | 1 | 1936 | # Version 8 of the database updates FieldChanges as well as adds tables
# for Regression Tracking features.
import sqlalchemy
from sqlalchemy import String, Integer, Column, ForeignKey
# Import the original schema from upgrade_0_to_1 since upgrade_1_to_2 does not
# change the actual schema, but rather adds functionality vis-a-vis orders.
import lnt.server.db.migrations.upgrade_0_to_1 as upgrade_0_to_1
import lnt.server.db.migrations.upgrade_7_to_8 as upgrade_7_to_8
def add_baselines(test_suite):
"""Give test-suites a baseline order.
"""
# Grab the Base for the previous schema so that we have all
# the definitions we need.
base = upgrade_7_to_8.add_regressions(test_suite)
# Grab our db_key_name for our test suite so we can properly
# prefix our fields/table names.
db_key_name = test_suite.db_key_name
class Baseline(base):
"""Baselines to compare runs to."""
__tablename__ = db_key_name + '_Baseline'
id = Column("ID", Integer, primary_key=True)
name = Column("Name", String(32), unique=True)
comment = Column("Comment", String(256))
order_id = Column("OrderID", Integer,
ForeignKey("%s_Order.ID" % db_key_name), index=True)
return base
def upgrade_testsuite(engine, name):
# Grab Test Suite.
session = sqlalchemy.orm.sessionmaker(engine)()
test_suite = session.query(upgrade_0_to_1.TestSuite). \
filter_by(name=name).first()
assert (test_suite is not None)
# Add FieldChange to the test suite.
base = add_baselines(test_suite)
base.metadata.create_all(engine)
# Commit changes (also closing all relevant transactions with
# respect to Postgres like databases).
session.commit()
session.close()
def upgrade(engine):
# Create our FieldChangeField table and commit.
upgrade_testsuite(engine, 'nts')
upgrade_testsuite(engine, 'compile')
| apache-2.0 | 2,749,207,809,383,893,500 | 32.964912 | 78 | 0.682851 | false |
hack4impact/Givology | mainSite/source/proj/giv/captcha.py | 1 | 4110 | import urllib2, urllib
from proj.settings import *
API_SSL_SERVER="https://www.google.com/recaptcha/api"
API_SERVER="http://www.google.com/recaptcha/api"
VERIFY_SERVER="www.google.com"
class RecaptchaResponse(object):
def __init__(self, is_valid, error_code=None):
self.is_valid = is_valid
self.error_code = error_code
def displayhtml (public_key,
use_ssl = False,
error = None):
"""Gets the HTML to display for reCAPTCHA
public_key -- The public api key
use_ssl -- Should the request be sent over ssl?
error -- An error message to display (from RecaptchaResponse.error_code)"""
error_param = ''
if error:
error_param = '&error=%s' % error
if use_ssl:
server = API_SSL_SERVER
else:
server = API_SERVER
return """<script type="text/javascript" src="%(ApiServer)s/challenge?k=%(PublicKey)s%(ErrorParam)s"></script>
<noscript>
<iframe src="%(ApiServer)s/noscript?k=%(PublicKey)s%(ErrorParam)s" height="300" width="500" frameborder="0"></iframe><br />
<textarea name="recaptcha_challenge_field" rows="3" cols="40"></textarea>
<input type='hidden' name='recaptcha_response_field' value='manual_challenge' />
</noscript>
""" % {
'ApiServer' : server,
'PublicKey' : public_key,
'ErrorParam' : error_param,
}
def submit (recaptcha_challenge_field,
recaptcha_response_field,
private_key,
remoteip):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form
recaptcha_response_field -- The value of recaptcha_response_field from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and recaptcha_challenge_field and
len (recaptcha_response_field) and len (recaptcha_challenge_field)):
return RecaptchaResponse (is_valid = False, error_code = 'incorrect-captcha-sol')
def encode_if_necessary(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
params = urllib.urlencode ({
'privatekey': encode_if_necessary(private_key),
'remoteip' : encode_if_necessary(remoteip),
'challenge': encode_if_necessary(recaptcha_challenge_field),
'response' : encode_if_necessary(recaptcha_response_field),
})
request = urllib2.Request (
url = "http://%s/recaptcha/api/verify" % VERIFY_SERVER,
data = params,
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-agent": "reCAPTCHA Python"
}
)
httpresp = urllib2.urlopen (request)
return_values = httpresp.read ().splitlines ();
httpresp.close();
return_code = return_values [0]
if (return_code == "true"):
return RecaptchaResponse (is_valid=True)
else:
return RecaptchaResponse (is_valid=False, error_code = return_values [1])
def check_captcha(request):
captcha_challenge = request.POST.get('recaptcha_challenge_field')
captcha_response = request.POST.get('recaptcha_response_field')
captcha_result = None
ip = None
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR']
elif 'REMOTE_ADDR' in request.META:
ip = request.META['REMOTE_ADDR']
if captcha_response is not None and captcha_challenge is not None:
captcha_result = submit(captcha_challenge,
captcha_response,
recaptcha_private_key,
ip)
return captcha_result
def new_captcha_html(captcha_result):
if captcha_result is None:
captcha_html = displayhtml(recaptcha_public_key, use_ssl=True)
else:
captcha_html = displayhtml(recaptcha_public_key, use_ssl=True, error = captcha_result.error_code)
return captcha_html
| mit | 701,470,126,479,050,600 | 32.414634 | 125 | 0.62871 | false |
catalyst/l3overlay | src/l3overlay/l3overlayd/process/ipsec.py | 1 | 6516 | #
# IPsec overlay network manager (l3overlay)
# l3overlay/l3overlayd/process/ipsec.py - IPsec process manager
#
# Copyright (c) 2017 Catalyst.net Ltd
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
IPsec process manager.
'''
import subprocess
from l3overlay import util
from l3overlay.util.exception import L3overlayError
from l3overlay.util.worker import Worker
class UnexpectedReturnCodeError(L3overlayError):
'''
Exception to raise when the process returns an unexpected code.
'''
def __init__(self, command, code):
super().__init__("unexpected '%s' return code: %i" % (command, code))
# pylint: disable=too-many-instance-attributes
class Process(Worker):
'''
IPsec process manager.
'''
description = "ipsec process"
def __init__(self, daemon):
'''
Set internal fields for the IPsec process.
'''
super().__init__()
self.dry_run = daemon.dry_run
self.logger = daemon.logger
self.use_ipsec = daemon.use_ipsec
if not self.use_ipsec:
return
self.ipsec_manage = daemon.ipsec_manage
self.template_dir = daemon.template_dir
self.ipsec_conf = daemon.ipsec_conf
self.ipsec_secrets = daemon.ipsec_secrets
self.ipsec_conf_template = util.template_read(self.template_dir, "ipsec.conf")
self.ipsec_secrets_template = util.template_read(self.template_dir, "ipsec.secrets")
self.conns = dict()
self.secrets = dict()
for link in daemon.mesh_links.keys():
self.tunnel_add(link, daemon.ipsec_psk)
for link, data in daemon.ipsec_tunnels.items():
psk = data["ipsec-psk"] if data["ipsec-psk"] else daemon.ipsec_psk
self.tunnel_add(link, psk)
self.ipsec = util.command_path("ipsec") if not self.dry_run else util.command_path("true")
def start(self):
'''
Start the IPsec process.
'''
if not self.use_ipsec:
return
self.set_starting()
self.logger.info("starting IPsec process")
self.logger.debug("creating IPsec configuration file '%s'" % self.ipsec_conf)
if not self.dry_run:
with open(self.ipsec_conf, "w") as fil:
fil.write(self.ipsec_conf_template.render(
file=self.ipsec_conf,
ipsec_manage=self.ipsec_manage,
conns=self.conns,
))
self.logger.debug("creating IPsec secrets file '%s'" % self.ipsec_secrets)
if not self.dry_run:
with open(self.ipsec_secrets, "w") as fil:
fil.write(self.ipsec_secrets_template.render(
file=self.ipsec_secrets,
secrets=self.secrets,
))
self.logger.debug("checking IPsec status")
status = subprocess.call(
[self.ipsec, "status"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
if status == 0:
self.logger.debug("reloading IPsec secrets")
subprocess.check_output([self.ipsec, "rereadsecrets"], stderr=subprocess.STDOUT)
self.logger.debug("reloading IPsec configuration")
subprocess.check_output([self.ipsec, "reload"], stderr=subprocess.STDOUT)
elif status == 3:
self.logger.debug("starting IPsec")
subprocess.check_output([self.ipsec, "start"], stderr=subprocess.STDOUT)
else:
raise UnexpectedReturnCodeError("%s status" % self.ipsec, status)
self.logger.info("finished starting IPsec process")
self.set_started()
def stop(self):
'''
Stop the IPsec process.
'''
if not self.use_ipsec:
return
self.set_stopping()
self.logger.info("stopping IPsec process")
self.logger.debug("removing IPsec configuration file '%s'" % self.ipsec_conf)
if not self.dry_run:
util.file_remove(self.ipsec_conf)
self.logger.debug("removing IPsec secrets file '%s'" % self.ipsec_secrets)
if not self.dry_run:
util.file_remove(self.ipsec_secrets)
if self.ipsec_manage:
# When we manage IPsec, it is safe to stop it completely.
self.logger.debug("stopping IPsec")
if not self.dry_run:
subprocess.check_output([self.ipsec, "stop"], stderr=subprocess.STDOUT)
else:
# When we don't, reload the configuration without the tunnels
# configured, and shut down all of the tunnels.
self.logger.debug("reloading IPsec secrets")
if not self.dry_run:
subprocess.check_output([self.ipsec, "rereadsecrets"], stderr=subprocess.STDOUT)
self.logger.debug("reloading IPsec configuration")
if not self.dry_run:
subprocess.check_output([self.ipsec, "reload"], stderr=subprocess.STDOUT)
for conn in self.conns:
self.logger.debug("shutting down IPsec tunnel '%s'" % conn)
if not self.dry_run:
subprocess.check_output(
[self.ipsec, "down", conn],
stderr=subprocess.STDOUT,
)
self.logger.info("finished stopping IPsec process")
self.set_stopped()
def tunnel_add(self, link, psk):
'''
Add an IPsec tunnel and its corresponding PSK to the
database which gets used to configure the IPsec process.
'''
self.conns["%s-%s" % link] = link
if not psk in self.secrets:
self.secrets[psk] = set()
self.secrets[psk].update(link)
# pylint: disable=no-member
Worker.register(Process)
def create(daemon):
'''
Create a IPsec process object.
'''
return Process(daemon)
| gpl-3.0 | -8,255,464,561,798,610,000 | 29.166667 | 98 | 0.604819 | false |
mbareta/edx-platform-ft | lms/djangoapps/instructor_task/models.py | 1 | 12291 | """
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py schemamigration instructor_task --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/instructor_task/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
"""
from uuid import uuid4
import csv
import json
import hashlib
import os.path
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.base import ContentFile
from django.db import models, transaction
from openedx.core.storage import get_storage
from xmodule_django.models import CourseKeyField
# define custom states used by InstructorTask
QUEUING = 'QUEUING'
PROGRESS = 'PROGRESS'
class InstructorTask(models.Model):
"""
Stores information about background tasks that have been submitted to
perform work by an instructor (or course staff).
Examples include grading and rescoring.
`task_type` identifies the kind of task being performed, e.g. rescoring.
`course_id` uses the course run's unique id to identify the course.
`task_key` stores relevant input arguments encoded into key value for testing to see
if the task is already running (together with task_type and course_id).
`task_input` stores input arguments as JSON-serialized dict, for reporting purposes.
Examples include url of problem being rescored, id of student if only one student being rescored.
`task_id` stores the id used by celery for the background task.
`task_state` stores the last known state of the celery task
`task_output` stores the output of the celery task.
Format is a JSON-serialized dict. Content varies by task_type and task_state.
`requester` stores id of user who submitted the task
`created` stores date that entry was first created
`updated` stores date that entry was last modified
"""
class Meta(object):
app_label = "instructor_task"
task_type = models.CharField(max_length=50, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
task_key = models.CharField(max_length=255, db_index=True)
task_input = models.CharField(max_length=255)
task_id = models.CharField(max_length=255, db_index=True) # max_length from celery_taskmeta
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
task_output = models.CharField(max_length=1024, null=True)
requester = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True)
subtasks = models.TextField(blank=True) # JSON dictionary
def __repr__(self):
return 'InstructorTask<%r>' % ({
'task_type': self.task_type,
'course_id': self.course_id,
'task_input': self.task_input,
'task_id': self.task_id,
'task_state': self.task_state,
'task_output': self.task_output,
},)
def __unicode__(self):
return unicode(repr(self))
@classmethod
def create(cls, course_id, task_type, task_key, task_input, requester):
"""
Create an instance of InstructorTask.
"""
# create the task_id here, and pass it into celery:
task_id = str(uuid4())
json_task_input = json.dumps(task_input)
# check length of task_input, and return an exception if it's too long:
if len(json_task_input) > 255:
fmt = 'Task input longer than 255: "{input}" for "{task}" of "{course}"'
msg = fmt.format(input=json_task_input, task=task_type, course=course_id)
raise ValueError(msg)
# create the task, then save it:
instructor_task = cls(
course_id=course_id,
task_type=task_type,
task_id=task_id,
task_key=task_key,
task_input=json_task_input,
task_state=QUEUING,
requester=requester
)
instructor_task.save_now()
return instructor_task
@transaction.atomic
def save_now(self):
"""
Writes InstructorTask immediately, ensuring the transaction is committed.
"""
self.save()
@staticmethod
def create_output_for_success(returned_result):
"""
Converts successful result to output format.
Raises a ValueError exception if the output is too long.
"""
# In future, there should be a check here that the resulting JSON
# will fit in the column. In the meantime, just return an exception.
json_output = json.dumps(returned_result)
if len(json_output) > 1023:
raise ValueError("Length of task output is too long: {0}".format(json_output))
return json_output
@staticmethod
def create_output_for_failure(exception, traceback_string):
"""
Converts failed result information to output format.
Traceback information is truncated or not included if it would result in an output string
that would not fit in the database. If the output is still too long, then the
exception message is also truncated.
Truncation is indicated by adding "..." to the end of the value.
"""
tag = '...'
task_progress = {'exception': type(exception).__name__, 'message': unicode(exception.message)}
if traceback_string is not None:
# truncate any traceback that goes into the InstructorTask model:
task_progress['traceback'] = traceback_string
json_output = json.dumps(task_progress)
# if the resulting output is too long, then first shorten the
# traceback, and then the message, until it fits.
too_long = len(json_output) - 1023
if too_long > 0:
if traceback_string is not None:
if too_long >= len(traceback_string) - len(tag):
# remove the traceback entry entirely (so no key or value)
del task_progress['traceback']
too_long -= (len(traceback_string) + len('traceback'))
else:
# truncate the traceback:
task_progress['traceback'] = traceback_string[:-(too_long + len(tag))] + tag
too_long = 0
if too_long > 0:
# we need to shorten the message:
task_progress['message'] = task_progress['message'][:-(too_long + len(tag))] + tag
json_output = json.dumps(task_progress)
return json_output
@staticmethod
def create_output_for_revoked():
"""Creates standard message to store in output format for revoked tasks."""
return json.dumps({'message': 'Task revoked before running'})
class ReportStore(object):
"""
Simple abstraction layer that can fetch and store CSV files for reports
download. Should probably refactor later to create a ReportFile object that
can simply be appended to for the sake of memory efficiency, rather than
passing in the whole dataset. Doing that for now just because it's simpler.
"""
@classmethod
def from_config(cls, config_name):
"""
Return one of the ReportStore subclasses depending on django
configuration. Look at subclasses for expected configuration.
"""
# Convert old configuration parameters to those expected by
# DjangoStorageReportStore for backward compatibility
config = getattr(settings, config_name, {})
storage_type = config.get('STORAGE_TYPE', '').lower()
if storage_type == 's3':
return DjangoStorageReportStore(
storage_class='storages.backends.s3boto.S3BotoStorage',
storage_kwargs={
'bucket': config['BUCKET'],
'location': config['ROOT_PATH'],
'querystring_expire': 300,
'gzip': True,
},
)
elif storage_type == 'localfs':
return DjangoStorageReportStore(
storage_class='django.core.files.storage.FileSystemStorage',
storage_kwargs={
'location': config['ROOT_PATH'],
},
)
return DjangoStorageReportStore.from_config(config_name)
def _get_utf8_encoded_rows(self, rows):
"""
Given a list of `rows` containing unicode strings, return a
new list of rows with those strings encoded as utf-8 for CSV
compatibility.
"""
for row in rows:
yield [unicode(item).encode('utf-8') for item in row]
class DjangoStorageReportStore(ReportStore):
"""
ReportStore implementation that delegates to django's storage api.
"""
def __init__(self, storage_class=None, storage_kwargs=None):
if storage_kwargs is None:
storage_kwargs = {}
self.storage = get_storage(storage_class, **storage_kwargs)
@classmethod
def from_config(cls, config_name):
"""
By default, the default file storage specified by the `DEFAULT_FILE_STORAGE`
setting will be used. To configure the storage used, add a dict in
settings with the following fields::
STORAGE_CLASS : The import path of the storage class to use. If
not set, the DEFAULT_FILE_STORAGE setting will be used.
STORAGE_KWARGS : An optional dict of kwargs to pass to the storage
constructor. This can be used to specify a
different S3 bucket or root path, for example.
Reference the setting name when calling `.from_config`.
"""
return cls(
getattr(settings, config_name).get('STORAGE_CLASS'),
getattr(settings, config_name).get('STORAGE_KWARGS'),
)
def store(self, course_id, filename, buff):
"""
Store the contents of `buff` in a directory determined by hashing
`course_id`, and name the file `filename`. `buff` can be any file-like
object, ready to be read from the beginning.
"""
path = self.path_to(course_id, filename)
self.storage.save(path, buff)
def store_rows(self, course_id, filename, rows):
"""
Given a course_id, filename, and rows (each row is an iterable of
strings), write the rows to the storage backend in csv format.
"""
output_buffer = ContentFile('')
csvwriter = csv.writer(output_buffer)
csvwriter.writerows(self._get_utf8_encoded_rows(rows))
output_buffer.seek(0)
self.store(course_id, filename, output_buffer)
def links_for(self, course_id):
"""
For a given `course_id`, return a list of `(filename, url)` tuples.
Calls the `url` method of the underlying storage backend. Returned
urls can be plugged straight into an href
"""
course_dir = self.path_to(course_id)
try:
_, filenames = self.storage.listdir(course_dir)
except OSError:
# Django's FileSystemStorage fails with an OSError if the course
# dir does not exist; other storage types return an empty list.
return []
files = [(filename, os.path.join(course_dir, filename)) for filename in filenames]
files.sort(key=lambda f: self.storage.modified_time(f[1]), reverse=True)
return [
(filename, self.storage.url(full_path))
for filename, full_path in files
]
def path_to(self, course_id, filename=''):
"""
Return the full path to a given file for a given course.
"""
# FastTrac affiliate reports
if isinstance(course_id, basestring):
hashed_course_id = hashlib.sha1(course_id).hexdigest()
else:
hashed_course_id = hashlib.sha1(course_id.to_deprecated_string()).hexdigest()
return os.path.join(hashed_course_id, filename)
| agpl-3.0 | 6,095,885,579,733,219,000 | 39.564356 | 109 | 0.629973 | false |
meta-it/misc-addons | web_debranding/models/web_planner.py | 1 | 1046 | # -*- coding: utf-8 -*-
import re
from openerp import models, api
class Planner(models.Model):
_inherit = 'web.planner'
@api.model
def render(self, template_id, planner_app):
res = super(Planner, self).render(template_id, planner_app)
params = self.env['ir.config_parameter'].get_debranding_parameters()
planner_footer = params.get('web_debranding.planner_footer')
planner_footer = '<p>' + str(planner_footer) + '</p>'
res = re.sub(r'<p>[^<]*to contact our accounting experts by using the[\s\S]*?</div>', planner_footer, res)
res = re.sub(r'<p>[^<]*If you need help, do not hesitate to contact our experts[\s\S]*?</div>', planner_footer, res)
res = re.sub(r'<h4>Don\'t hesitate to[\s\S]*logo.png"/>', '', res)
res = re.sub(r'<p>Once it\'s fully working[\s\S]*odoo_logo.png"/>', planner_footer, res)
res = re.sub(r'<div class="mt32">[\s\S]*Fabien Pinckaers, Founder[\s\S]*?</div>', planner_footer, res)
return self.env['ir.translation']._debrand(res)
| lgpl-3.0 | -2,570,573,885,413,056,500 | 48.809524 | 124 | 0.616635 | false |
radjkarl/dataArtist | dataArtist/items/GridROI.py | 1 | 14750 | # coding=utf-8
from __future__ import division
from __future__ import absolute_import
import pyqtgraph_karl as pg
import numpy as np
from math import cos, sin, pi
import cv2
from qtpy import QtCore
from .PseudoSquareROI import PseudoSquareROI
from dataArtist.items.QPainterPath import QPainterPath
class GridROI(pg.ROI):
'''
An ROI displaying mini ROIs of different shapes as a grid
'''
# TODO: default argument is mutable: Default argument values are evaluated only once at function definition time,
# which means that modifying the default value of the argument will affect all subsequent calls of the function.
def __init__(self, pos=[20, 20], size=[20, 20], grid=[4, 5],
shape='Rect', gap=[0, 0], subgrid=([], []),
subgrid_width=0.05, pen='w', **kwargs):
'''
shape = ['Rect', 'Square', 'Circular', 'Pseudosquare']
'''
self.opts = {'shape': shape,
'grid': np.asarray(grid),
'gap': np.asfarray(gap),
'subgrid': subgrid,
'subgrid_width': subgrid_width
}
# TODO: limit max cell size while rescale
self.maxCellSize = size / self.opts['grid']
self.cells = []
self._createCells()
self._createSubgrid()
# cannot set brush at the moment, so:
if 'brush' in kwargs:
kwargs.pop('brush')
pg.ROI.__init__(self, pos, size, pen=pen, **kwargs)
self.translatable = False
self.mouseHovering = False
self._setCellSize(self.state['size'])
self._setCellPos(pos)
self.layout_rescaling = False
self.addScaleHandle([1, 1], [0, 0])
self.addScaleHandle([0, 0], [1, 1])
self.addScaleHandle([1, 0], [0, 1])
self.addScaleHandle([0, 1], [1, 0])
self.addRotateHandle([0.5, 1], [0.5, 0.5])
def getCellParameters(self, array, fn=np.mean):
out = np.arange(len(self.cells),
dtype=float).reshape(self.opts['grid'])
s = array.shape
for (i, j), n in np.ndenumerate(out):
m = self.cells[int(n)].getMask(s)
out[i, j] = fn(array[m])
return out
def saveState(self):
s = pg.ROI.saveState(self)
o = self.opts
s['gap'] = tuple(o['gap'])
s['grid'] = tuple(o['grid'])
s['shape'] = o['shape']
return s
def painterPath(self):
'''
Return a qpainterpath including all cells
'''
p = self.cells[0].painterPath()
for c in self.cells[1:]:
p.addPath(c.painterPath())
return p
def _createCells(self):
grid = self.opts['grid']
cellClass = {'Rect': RectROI,
'Circle': CircleROI,
'Pseudosquare': CellPseudoSquareROI}[self.opts['shape']]
self.layout_rescaling = True
for c in self.cells:
self.vb.removeItem(c)
self.cells = [cellClass(pos=[1, 1]) for _ in range(grid[0] * grid[1])]
i_scaleCell = -(grid[0] * grid[1] - grid[1] + 1)
self._scaleCell = c = self.cells[i_scaleCell]
c.setScaleCell()
c.sigRegionChanged.connect(self._cellResized)
def _createSubgrid(self):
for c in self.cells:
for line in c.subgrid:
self.vb.removeItem(line)
s = self.opts['subgrid']
w = self.opts['subgrid_width']
for c in self.cells:
for pos in s[0]:
c.subgrid.append(SubLine(c, orientation=0, pos=pos,
thickness=w))
for pos in s[1]:
c.subgrid.append(SubLine(c, orientation=1, pos=pos,
thickness=w))
for n, line in enumerate(self._scaleCell.subgrid):
line.setScaleLine()
line.sigRegionChanged.connect(lambda line, n=n:
self._lineResized(line, n))
def setPen(self, pen):
pg.ROI.setPen(self, pen)
for c in self.cells:
c.setPen(pen)
for line in c.subgrid:
line.setPen(pen)
def setBrush(self, pen):
pass
# TODO
# pg.ROI.setB(pen)
# for c in self.cells:
# c.setBrush(pen)
# #raises: AttributeError: 'RectROI' object has no attribute 'setBrush'
def getMask(self, shape):
m = self.cells[0].getMask(shape)
for c in self.cells[1:]:
m += c.getMask(shape)
return m
def __iter__(self):
return iter(self.cells)
def __len__(self):
return len(self.cells)
def _lineResized(self, line, n):
if not self.layout_rescaling:
#size = line.state['size']
pos = line.state['pos']
thick, pos = line.fromState()
for c in self.cells:
ln = c.subgrid[n]
if ln != line:
ln.thickness = thick
ln.pos = pos
ln.updatePos()
ln.updateSize()
def _cellResized(self, cell):
if not self.layout_rescaling:
size = cell.state['size']
self.opts['gap'] = (self.state['size'] - (
size * self.opts['grid'])) / (self.opts['grid'] - 1)
for c in self.cells:
if c != cell:
c.setSize(size)
self._setCellPos(self.state['pos'], True)
def setAngle(self, angle, **kwargs):
for c in self.cells:
c.setAngle(angle, **kwargs)
for line in c.subgrid:
line.setAngle(angle, **kwargs)
self._setCellPos(self.state['pos'])
pg.ROI.setAngle(self, angle, **kwargs)
def setPos(self, pos, **kwargs):
pg.ROI.setPos(self, pos, **kwargs)
self._setCellPos(pos)
def setSubGrid(self, s):
self.opts['subgrid'] = s
self.refresh()
def setGrid(self, x=None, y=None):
g = self.opts['grid']
if x is not None:
g[0] = x
if y is not None:
g[1] = y
self.refresh()
def setCellShape(self, shape):
self.opts['shape'] = shape
self.refresh()
def refresh(self):
self._createCells()
self._setCellSize(self.state['size'])
self._setCellPos(self.state['pos'])
[self.vb.addItem(c) for c in self.cells]
self._createSubgrid()
[[self.vb.addItem(line) for line in c.subgrid] for c in self.cells]
def setSize(self, size, update=True, finish=True):
pg.ROI.setSize(self, size, update, finish)
self.layout_rescaling = True
self._setCellSize(size)
self._setCellPos(self.state['pos'])
self.layout_rescaling = False
self.maxCellSize = size / self.opts['grid']
def _setCellSize(self, size):
size_cell = (size - (self.opts['grid'] - 1)
* self.opts['gap']) / self.opts['grid']
for c in self.cells:
c.setSize(size_cell)
for line in c.subgrid:
line.updateSize()
@staticmethod
def _rotatePoint(point, angle, center):
if angle == 0:
return point
x = point[0]
y = point[1]
cx = center[0]
cy = center[1]
point[0] = cos(angle) * (x - cx) - sin(angle) * (y - cy) + cx
point[1] = sin(angle) * (x - cx) + cos(angle) * (y - cy) + cy
def _setCellPos(self, pos, ignoreScaleCell=False):
size_cell = self._scaleCell.state['size']
rad = self.state['angle'] * pi / 180
# center of rotation:
c = self.state['pos']
if self.handles:
# centre defined by both edges:
c += 0.5 * self.handles[1]['item'].pos()
n = 0
for x in range(self.opts['grid'][0]):
for y in range(self.opts['grid'][1]):
cell = self.cells[n]
n += 1
if ignoreScaleCell and cell == self._scaleCell:
for line in cell.subgrid:
line.updatePos()
continue
p = pos + [x, y] * (size_cell + self.opts['gap'])
self._rotatePoint(p, rad, c)
cell.setPos(p)
for line in cell.subgrid:
line.updatePos()
def setViewBox(self, v):
'''
add grid and its cells to the ViewBox
'''
self.vb = v
v.addItem(self)
[v.addItem(c) for c in self.cells]
[[self.vb.addItem(line) for line in c.subgrid] for c in self.cells]
def show(self):
[c.show() for c in self.cells]
[[line.show() for line in c.subgrid] for c in self.cells]
pg.ROI.show(self)
def hide(self):
[c.hide() for c in self.cells]
[[line.hide() for line in c.subgrid] for c in self.cells]
pg.ROI.hide(self)
def close(self):
[self.vb.removeItem(c) for c in self.cells]
self.vb.removeItem(self)
class _CellBase(object):
'''
Base class for all cells in a grid
'''
def __init__(self, *args, **kwargs):
self.subgrid = []
self.translatable = False
self.mouseHovering = False
class SubLine(pg.ROI):
'''
one line for the subgrid
'''
def __init__(self, cell, orientation, pos, thickness):
pg.ROI.__init__(self, pos=(1, 1), size=(1, 1))
self.translatable = False
self.mouseHovering = False
self.pos = pos
self.thickness = thickness
if orientation == 0:
self.i = 0
self.j = 1
else:
self.i = 1
self.j = 0
self.cell = cell
def fromState(self):
'''
update thickness and position from current state
'''
j = self.j
s = self.state
cs = self.cell.state
p = self.pos = (s['pos'][j] - cs['pos'][j]) / cs['size'][j]
t = self.thickness = s['size'][j] / cs['size'][j]
return t, p
def setScaleLine(self):
self.addScaleHandle([0.5, 1], [0.5, 0])
self.addScaleHandle([0.5, 0], [0.5, 1])
def updateSize(self):
s = self.cell.state['size']
pg.ROI.setSize(self, (s[self.i], self.thickness * s[self.j]))
def updatePos(self):
p = self.cell.state['pos'].copy()
s = self.cell.state['size']
j = self.j
p[j] += s[j] * self.pos
pg.ROI.setPos(self, p)
class RectROI(pg.ROI, _CellBase):
def __init__(self, *args, **kwargs):
pg.ROI.__init__(self, *args, **kwargs)
_CellBase.__init__(self, *args, **kwargs)
def setScaleCell(self):
self.addScaleHandle([1, 0], [0, 1])
self.setPen('y')
def painterPath(self):
p = QPainterPath()
a = self.boundingRect()
a.moveTo(self.state['pos'])
p.addRect(a)
return p
def getMask(self, shape):
p = self.state['pos']
s = self.state['size']
center = p + s / 2
a = self.state['angle']
# opencv convention:
shape = (shape[1], shape[0])
arr = np.zeros(shape, dtype=np.uint8)
# draw rotated rectangle:
vertices = np.int0(cv2.boxPoints((center, s, a)))
cv2.drawContours(arr, [vertices],
0,
color=1,
thickness=-1)
return arr.astype(bool).T
class CircleROI(_CellBase, pg.EllipseROI):
def __init__(self, *args, **kwargs):
pg.ROI.__init__(self, *args, **kwargs)
_CellBase.__init__(self, *args, **kwargs)
self._ratioEllispeRectangle = 1 # only changed in CellPseudoSquareROI
def setScaleCell(self):
self.addScaleHandle([cos(1), sin(0)], [0, 1])
self.setPen('y')
def painterPath(self):
p = QPainterPath()
a = self.boundingRect()
a.moveTo(self.state['pos'])
p.addEllipse(a)
return p
def getMask(self, shape):
'''
returns bool array
'''
p = self.state['pos']
s = self.state['size']
center = p + s / 2
a = self.state['angle']
# opencv convention:
shape = (shape[1], shape[0])
arr = np.zeros(shape, dtype=np.uint8)
# draw ellipse:
cv2.ellipse(arr,
(int(center[0]), int(center[1])),
(int(s[0] / 2 * self._ratioEllispeRectangle),
int(s[1] / 2 * self._ratioEllispeRectangle)),
int(a),
startAngle=0,
endAngle=360,
color=1,
thickness=-1)
return arr.astype(bool).T
class CellPseudoSquareROI(_CellBase, PseudoSquareROI):
def __init__(self, *args, **kwargs):
PseudoSquareROI.__init__(self, *args, **kwargs)
_CellBase.__init__(self, *args, **kwargs)
def setScaleCell(self):
self.addScaleHandle([1, 0], [0, 1])
self.setPen('y')
def painterPath(self):
p = QPainterPath()
roundness = int(99 * float(self._alen) / 16 / 90)
r = QtCore.QRectF(self._rect)
r.moveTo(self.state['pos'])
p.addRoundRect(r, roundness)
return p
if __name__ == '__main__':
from pyqtgraph.Qt import QtGui
app = QtWidgets.QApplication([])
w = pg.GraphicsWindow(size=(1000, 800), border=True)
w.setWindowTitle('pyqtgraph example: ROI Examples')
w1 = w.addLayout(row=0, col=0)
#label1 = w1.addLabel('test', row=1, col=0)
v = w1.addViewBox(row=1, col=0, lockAspect=True)
v2 = w1.addViewBox(row=2, col=0, lockAspect=True)
img1b = pg.ImageItem()
v2.addItem(img1b)
v3 = w1.addViewBox(row=3, col=0, lockAspect=True)
img1c = pg.ImageItem()
v3.addItem(img1c)
# Create image to display
arr = np.ones((100, 100), dtype=float)
arr[45:55, 45:55] = 0
arr[25, :] = 5
arr[:, 25] = 5
arr[75, :] = 5
arr[:, 75] = 5
arr[50, :] = 10
arr[:, 50] = 10
arr += np.sin(np.linspace(0, 20, 100)).reshape(1, 100)
arr += np.random.normal(size=(100, 100))
img1a = pg.ImageItem(arr)
v.addItem(img1a)
r = GridROI([20, 20], [20, 20], pen=(0, 9),
subgrid=([0.3, 0.5, 1], []), shape='Pseudosquare')
r.setViewBox(v)
cell = r.cells[0]
v.autoRange(False)
def update(roi):
img1b.setImage(roi.getArrayRegion(arr, img1a), levels=(0, arr.max()))
img1c.setImage(np.int0(r.getMask(arr.shape)))
# cell.sigRegionChanged.connect(update)
# update(cell)
app.exec_()
| gpl-3.0 | -2,420,680,918,911,564,000 | 28.324056 | 118 | 0.51722 | false |
jekahy/EIASR | src/canny.py | 1 | 3842 | # coding: utf8
from math import pi
import numpy as np
from scipy.signal import convolve2d
SOBEL_X = np.array([
[ 1, 0, -1],
[ 2, 0, -2],
[ 1, 0, -1],
])
SOBEL_Y = np.array([
[ 1, 2, 1],
[ 0, 0, 0],
[-1, -2, -1],
])
class GradientImage(object):
def __init__(self, magnitudes, angles):
self.magnitudes = magnitudes
self.angles = angles
@property
def w(self):
return self.magnitudes.shape[0]
@property
def h(self):
return self.magnitudes.shape[1]
@classmethod
def from_partials(cls, dxs, dys):
magnitudes = np.sqrt(dxs ** 2 + dys ** 2)
angles = np.arctan2(dys, dxs)
return cls(magnitudes, angles)
def gradient(in_):
dxs = convolve2d(in_, SOBEL_X, 'same', 'symm')
dys = convolve2d(in_, SOBEL_Y, 'same', 'symm')
return GradientImage.from_partials(dxs, dys)
def thin_nonmaximum(gradient_image):
thinned = np.copy(gradient_image.magnitudes)
for idx, s in np.ndenumerate(gradient_image.magnitudes):
s_nl = _neighbour_in_direction(
gradient_image.magnitudes, idx,
gradient_image.angles[idx])
s_nr = _neighbour_in_direction(
gradient_image.magnitudes, idx,
gradient_image.angles[idx] + pi)
# TODO: consider angle at nl, nr
if s < s_nl or s < s_nr:
thinned[idx] = 0
return GradientImage(thinned, gradient_image.angles)
def thin_hysteresis(gradient_image, t_high=0.2, t_low=0.1):
# 8 pixel neighborhood
x = [-1, 0, 1, -1, 1, -1, 0, 1]
y = [-1, -1, -1, 0, 0, 1, 1, 1]
magnitudes = gradient_image.magnitudes
# Dimensions
xdim, ydim = magnitudes.shape
# Max magnitude
max_magn = magnitudes.max()
# Pixels > t_high are kept automatically
thinned = np.where(magnitudes > (t_high * max_magn), magnitudes, 0)
# Pixels > t_low will be ad ded later if they prove to be
# adjacent to another pixel which has been included in the thinned list
cands = np.where(magnitudes > (t_low * max_magn), magnitudes, 0)
# Create an initial list of strong edge pixels
prevx, prevy = thinned.nonzero()
# If the previous loop of testing found no new pixels to move from
# the cands list to the edge list, then stop
while len(prevx) != 0:
newx, newy = [], []
# Loop over new edge pixels discovered on previous iteration
for ii in range(len(prevx)):
# Loop through 8 pixel neighborhood
for ij in range(len(x)):
xidx = prevx[ii] + x[ij]
yidx = prevy[ii] + y[ij]
# Check if pixel index falls within image boundary
if xidx >= 0 and xidx < xdim and yidx >= 0 and yidx < ydim:
# Check if pixel is on the cands list but has not yet been added to the thinned list
if cands[xidx][yidx] and not thinned[xidx][yidx]:
# Transfer to thinned list
thinned[xidx][yidx] = cands[xidx][yidx]
# Keep track of indices for next loop iteration
newx.append(xidx)
newy.append(yidx)
# Update for next iteration
prevx = newx
prevy = newy
return GradientImage(thinned, gradient_image.angles)
NEIGHBOURS = [
( 0, 1),
( 1, 1),
( 1, 0),
( 1, -1),
( 0, -1),
(-1, -1),
(-1, 0),
(-1, 1),
]
def _neighbour_in_direction(a, (x, y), direction):
w, h = a.shape
ndir = len(NEIGHBOURS)
discrete_direction = int((direction / (2*pi) * ndir + 0.5 * ndir) % ndir)
dx, dy = NEIGHBOURS[discrete_direction]
nx, ny = x + dx, y + dy
if not (0 <= nx < w and 0 <= ny < h):
return 0
return a[nx, ny]
| mit | -4,143,740,423,269,810,000 | 27.671642 | 104 | 0.558303 | false |
volpino/Yeps-EURAC | lib/galaxy/jobs/runners/sge.py | 1 | 13219 | import os, logging, threading, time
from Queue import Queue, Empty
from galaxy import model
from paste.deploy.converters import asbool
import pkg_resources
try:
pkg_resources.require( "DRMAA_python" )
DRMAA = __import__( "DRMAA" )
except:
DRMAA = None
log = logging.getLogger( __name__ )
if DRMAA is not None:
DRMAA_state = {
DRMAA.Session.UNDETERMINED: 'process status cannot be determined',
DRMAA.Session.QUEUED_ACTIVE: 'job is queued and waiting to be scheduled',
DRMAA.Session.SYSTEM_ON_HOLD: 'job is queued and in system hold',
DRMAA.Session.USER_ON_HOLD: 'job is queued and in user hold',
DRMAA.Session.USER_SYSTEM_ON_HOLD: 'job is queued and in user and system hold',
DRMAA.Session.RUNNING: 'job is running',
DRMAA.Session.SYSTEM_SUSPENDED: 'job is system suspended',
DRMAA.Session.USER_SUSPENDED: 'job is user suspended',
DRMAA.Session.DONE: 'job finished normally',
DRMAA.Session.FAILED: 'job finished, but failed',
}
sge_template = """#!/bin/sh
#$ -S /bin/sh
GALAXY_LIB="%s"
if [ "$GALAXY_LIB" != "None" ]; then
if [ -n "$PYTHONPATH" ]; then
PYTHONPATH="$GALAXY_LIB:$PYTHONPATH"
else
PYTHONPATH="$GALAXY_LIB"
fi
export PYTHONPATH
fi
cd %s
%s
"""
class SGEJobState( object ):
def __init__( self ):
"""
Encapsulates state related to a job that is being run via SGE and
that we need to monitor.
"""
self.job_wrapper = None
self.job_id = None
self.old_state = None
self.running = False
self.job_file = None
self.ofile = None
self.efile = None
self.runner_url = None
class SGEJobRunner( object ):
"""
Job runner backed by a finite pool of worker threads. FIFO scheduling
"""
STOP_SIGNAL = object()
def __init__( self, app ):
"""Initialize this job runner and start the monitor thread"""
# Check if SGE was importable, fail if not
if DRMAA is None:
raise Exception( "SGEJobRunner requires DRMAA_python which was not found" )
self.app = app
# 'watched' and 'queue' are both used to keep track of jobs to watch.
# 'queue' is used to add new watched jobs, and can be called from
# any thread (usually by the 'queue_job' method). 'watched' must only
# be modified by the monitor thread, which will move items from 'queue'
# to 'watched' and then manage the watched jobs.
self.watched = []
self.queue = Queue()
self.default_cell = self.determine_sge_cell( self.app.config.default_cluster_job_runner )
self.ds = DRMAA.Session()
self.ds.init( self.default_cell )
self.monitor_thread = threading.Thread( target=self.monitor )
self.monitor_thread.start()
log.debug( "ready" )
def determine_sge_cell( self, url ):
"""Determine what SGE cell we are using"""
url_split = url.split("/")
if url_split[0] == 'sge:':
return url_split[2]
# this could happen if sge is started, but is not the default runner
else:
return ''
def determine_sge_queue( self, url ):
"""Determine what SGE queue we are submitting to"""
url_split = url.split("/")
queue = url_split[3]
if queue == "":
# None == server's default queue
queue = None
return queue
def queue_job( self, job_wrapper ):
"""Create SGE script for a job and submit it to the SGE queue"""
try:
job_wrapper.prepare()
command_line = job_wrapper.get_command_line()
except:
job_wrapper.fail( "failure preparing job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
runner_url = job_wrapper.tool.job_runner
# This is silly, why would we queue a job with no command line?
if not command_line:
job_wrapper.finish( '', '' )
return
# Check for deletion before we change state
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug( "Job %s deleted by user before it entered the SGE queue" % job_wrapper.job_id )
job_wrapper.cleanup()
return
# Change to queued state immediately
job_wrapper.change_state( model.Job.states.QUEUED )
if self.determine_sge_cell( runner_url ) != self.default_cell:
# TODO: support multiple cells
log.warning( "(%s) Using multiple SGE cells is not supported. This job will be submitted to the default cell." % job_wrapper.job_id )
sge_queue_name = self.determine_sge_queue( runner_url )
# define job attributes
ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job_wrapper.job_id)
efile = "%s/database/pbs/%s.e" % (os.getcwd(), job_wrapper.job_id)
jt = self.ds.createJobTemplate()
jt.remoteCommand = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job_wrapper.job_id)
jt.outputPath = ":%s" % ofile
jt.errorPath = ":%s" % efile
if sge_queue_name is not None:
jt.setNativeSpecification( "-q %s" % sge_queue_name )
script = sge_template % (job_wrapper.galaxy_lib_dir, os.path.abspath( job_wrapper.working_directory ), command_line)
fh = file( jt.remoteCommand, "w" )
fh.write( script )
fh.close()
os.chmod( jt.remoteCommand, 0750 )
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug( "Job %s deleted by user before it entered the SGE queue" % job_wrapper.job_id )
self.cleanup( ( ofile, efile, jt.remoteCommand ) )
job_wrapper.cleanup()
return
galaxy_job_id = job_wrapper.job_id
log.debug("(%s) submitting file %s" % ( galaxy_job_id, jt.remoteCommand ) )
log.debug("(%s) command is: %s" % ( galaxy_job_id, command_line ) )
# runJob will raise if there's a submit problem
job_id = self.ds.runJob(jt)
if sge_queue_name is None:
log.debug("(%s) queued in default queue as %s" % (galaxy_job_id, job_id) )
else:
log.debug("(%s) queued in %s queue as %s" % (galaxy_job_id, sge_queue_name, job_id) )
# store runner information for tracking if Galaxy restarts
job_wrapper.set_runner( runner_url, job_id )
# Store SGE related state information for job
sge_job_state = SGEJobState()
sge_job_state.job_wrapper = job_wrapper
sge_job_state.job_id = job_id
sge_job_state.ofile = ofile
sge_job_state.efile = efile
sge_job_state.job_file = jt.remoteCommand
sge_job_state.old_state = 'new'
sge_job_state.running = False
sge_job_state.runner_url = runner_url
# delete the job template
self.ds.deleteJobTemplate( jt )
# Add to our 'queue' of jobs to monitor
self.queue.put( sge_job_state )
def monitor( self ):
"""
Watches jobs currently in the PBS queue and deals with state changes
(queued to running) and job completion
"""
while 1:
# Take any new watched jobs and put them on the monitor list
try:
while 1:
sge_job_state = self.queue.get_nowait()
if sge_job_state is self.STOP_SIGNAL:
# TODO: This is where any cleanup would occur
self.ds.exit()
return
self.watched.append( sge_job_state )
except Empty:
pass
# Iterate over the list of watched jobs and check state
self.check_watched_items()
# Sleep a bit before the next state check
time.sleep( 1 )
def check_watched_items( self ):
"""
Called by the monitor thread to look at each watched job and deal
with state changes.
"""
new_watched = []
for sge_job_state in self.watched:
job_id = sge_job_state.job_id
galaxy_job_id = sge_job_state.job_wrapper.job_id
old_state = sge_job_state.old_state
try:
state = self.ds.getJobProgramStatus( job_id )
except DRMAA.InvalidJobError:
# we should only get here if an orphaned job was put into the queue at app startup
log.debug("(%s/%s) job left SGE queue" % ( galaxy_job_id, job_id ) )
self.finish_job( sge_job_state )
continue
except Exception, e:
# so we don't kill the monitor thread
log.exception("(%s/%s) Unable to check job status" % ( galaxy_job_id, job_id ) )
log.warning("(%s/%s) job will now be errored" % ( galaxy_job_id, job_id ) )
sge_job_state.job_wrapper.fail( "Cluster could not complete job" )
continue
if state != old_state:
log.debug("(%s/%s) state change: %s" % ( galaxy_job_id, job_id, DRMAA_state[state] ) )
if state == DRMAA.Session.RUNNING and not sge_job_state.running:
sge_job_state.running = True
sge_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
if state == DRMAA.Session.DONE:
self.finish_job( sge_job_state )
continue
if state == DRMAA.Session.FAILED:
sge_job_state.job_wrapper.fail( "Cluster could not complete job" )
sge_job_state.job_wrapper.cleanup()
continue
sge_job_state.old_state = state
new_watched.append( sge_job_state )
# Replace the watch list with the updated version
self.watched = new_watched
def finish_job( self, sge_job_state ):
"""
Get the output/error for a finished job, pass to `job_wrapper.finish`
and cleanup all the SGE temporary files.
"""
ofile = sge_job_state.ofile
efile = sge_job_state.efile
job_file = sge_job_state.job_file
# collect the output
try:
ofh = file(ofile, "r")
efh = file(efile, "r")
stdout = ofh.read()
stderr = efh.read()
except:
stdout = ''
stderr = 'Job output not returned from cluster'
log.debug(stderr)
try:
sge_job_state.job_wrapper.finish( stdout, stderr )
except:
log.exception("Job wrapper finish method failed")
# clean up the sge files
self.cleanup( ( ofile, efile, job_file ) )
def cleanup( self, files ):
if not asbool( self.app.config.get( 'debug', False ) ):
for file in files:
if os.access( file, os.R_OK ):
os.unlink( file )
def put( self, job_wrapper ):
"""Add a job to the queue (by job identifier)"""
self.queue_job( job_wrapper )
def shutdown( self ):
"""Attempts to gracefully shut down the monitor thread"""
log.info( "sending stop signal to worker threads" )
self.queue.put( self.STOP_SIGNAL )
log.info( "sge job runner stopped" )
def stop_job( self, job ):
"""Attempts to delete a job from the SGE queue"""
try:
self.ds.control( job.job_runner_external_id, DRMAA.Session.TERMINATE )
log.debug( "(%s/%s) Removed from SGE queue at user's request" % ( job.id, job.job_runner_external_id ) )
except DRMAA.InvalidJobError:
log.debug( "(%s/%s) User killed running job, but it was already dead" % ( job.id, job.job_runner_external_id ) )
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
sge_job_state = SGEJobState()
sge_job_state.ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job.id)
sge_job_state.efile = "%s/database/pbs/%s.e" % (os.getcwd(), job.id)
sge_job_state.job_file = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job.id)
sge_job_state.job_id = str( job.job_runner_external_id )
sge_job_state.runner_url = job_wrapper.tool.job_runner
job_wrapper.command_line = job.command_line
sge_job_state.job_wrapper = job_wrapper
if job.state == model.Job.states.RUNNING:
log.debug( "(%s/%s) is still in running state, adding to the SGE queue" % ( job.id, job.job_runner_external_id ) )
sge_job_state.old_state = DRMAA.Session.RUNNING
sge_job_state.running = True
self.queue.put( sge_job_state )
elif job.state == model.Job.states.QUEUED:
log.debug( "(%s/%s) is still in SGE queued state, adding to the SGE queue" % ( job.id, job.job_runner_external_id ) )
sge_job_state.old_state = DRMAA.Session.QUEUED
sge_job_state.running = False
self.queue.put( sge_job_state )
| mit | -2,320,704,567,637,543,400 | 40.180685 | 146 | 0.577956 | false |
sharad/calibre | src/calibre/gui2/dbus_export/menu.py | 1 | 14852 | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
# Support for excporting Qt's MenuBars/Menus over DBUS. The API is defined in
# dbus-menu.xml from the libdbusmenu project https://launchpad.net/libdbusmenu
import dbus
from PyQt5.Qt import (
QApplication, QMenu, QIcon, QKeySequence, QObject, QEvent, QTimer, pyqtSignal, Qt)
from calibre.utils.dbus_service import Object, BusName, method as dbus_method, dbus_property, signal as dbus_signal
from calibre.gui2.dbus_export.utils import (
setup_for_cli_run, swap_mnemonic_char, key_sequence_to_dbus_shortcut, icon_to_dbus_menu_icon)
null = object()
def PropDict(mapping=()):
return dbus.Dictionary(mapping, signature='sv')
def create_properties_for_action(ac, previous=None):
ans = PropDict()
if ac.isSeparator():
ans['type'] = 'separator'
if not ac.isVisible():
ans['visible'] = False
return ans
text = ac.text() or ac.iconText()
if text:
ans['label'] = swap_mnemonic_char(text)
if not ac.isEnabled():
ans['enabled'] = False
if not ac.isVisible() or ac.property('blocked') is True:
ans['visible'] = False
if ac.menu() is not None:
ans['children-display'] = 'submenu'
if ac.isCheckable():
exclusive = ac.actionGroup() is not None and ac.actionGroup().isExclusive()
ans['toggle-type'] = 'radio' if exclusive else 'checkmark'
ans['toggle-state'] = int(ac.isChecked())
shortcuts = ac.shortcuts()
if shortcuts:
sc = dbus.Array(signature='as')
for s in shortcuts:
if not s.isEmpty():
for x in key_sequence_to_dbus_shortcut(s):
sc.append(dbus.Array(x, signature='s'))
if sc:
ans['shortcut'] = sc[:1] # Unity fails to display the shortcuts at all if more than one is specified
if ac.isIconVisibleInMenu():
icon = ac.icon()
if previous and previous.get('x-qt-icon-cache-key') == icon.cacheKey():
for x in 'icon-data x-qt-icon-cache-key'.split():
ans[x] = previous[x]
else:
data = icon_to_dbus_menu_icon(ac.icon())
if data is not None:
ans['icon-data'] = data
ans['x-qt-icon-cache-key'] = icon.cacheKey()
return ans
def menu_actions(menu):
try:
return menu.actions()
except TypeError:
if isinstance(menu, QMenu):
return QMenu.actions(menu)
raise
class DBusMenu(QObject):
handle_event_signal = pyqtSignal(object, object, object, object)
def __init__(self, object_path, parent=None, bus=None):
QObject.__init__(self, parent)
# Unity barfs is the Event DBUS method does not return immediately, so
# handle it asynchronously
self.handle_event_signal.connect(self.handle_event, type=Qt.QueuedConnection)
self.dbus_api = DBusMenuAPI(self, object_path, bus=bus)
self.set_status = self.dbus_api.set_status
self._next_id = 0
self.action_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.actions_changed)
self.layout_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.layouts_changed)
self.init_maps()
@property
def object_path(self):
return self.dbus_api._object_path
def init_maps(self, qmenu=None):
self.action_changes = set()
self.layout_changes = set()
self.qmenu = qmenu
self._id_to_action, self._action_to_id = {}, {}
self._action_properties = {}
@property
def next_id(self):
self._next_id += 1
return self._next_id
def id_to_action(self, action_id):
if self.qmenu is None:
return None
return self._id_to_action.get(action_id)
def action_to_id(self, action):
if self.qmenu is None:
return None
return self._action_to_id.get(action)
def action_properties(self, action_id, restrict_to=None):
if self.qmenu is None:
return {}
ans = self._action_properties.get(action_id, PropDict())
if restrict_to:
ans = PropDict({k:v for k, v in ans.iteritems() if k in restrict_to})
return ans
def publish_new_menu(self, qmenu=None):
self.init_maps(qmenu)
if qmenu is not None:
qmenu.destroyed.connect(lambda obj=None:self.publish_new_menu())
ac = qmenu.menuAction()
self.add_action(ac)
self.dbus_api.LayoutUpdated(self.dbus_api.revision, 0)
def set_visible(self, visible):
ac = self.id_to_action(0)
if ac is not None and self.qmenu is not None:
changed = False
blocked = not visible
for ac in menu_actions(ac.menu()):
ac_id = self.action_to_id(ac)
if ac_id is not None:
old = ac.property('blocked')
if old is not blocked:
ac.setProperty('blocked', blocked)
self.action_changes.add(ac_id)
changed = True
if changed:
self.action_changed_timer.start()
def add_action(self, ac):
ac_id = 0 if ac.menu() is self.qmenu else self.next_id
self._id_to_action[ac_id] = ac
self._action_to_id[ac] = ac_id
self._action_properties[ac_id] = create_properties_for_action(ac)
if ac.menu() is not None:
self.add_menu(ac.menu())
def add_menu(self, menu):
menu.installEventFilter(self)
for ac in menu_actions(menu):
self.add_action(ac)
def eventFilter(self, obj, ev):
ac = getattr(obj, 'menuAction', lambda : None)()
ac_id = self.action_to_id(ac)
if ac_id is not None:
etype = ev.type()
if etype == QEvent.ActionChanged:
ac_id = self.action_to_id(ev.action())
self.action_changes.add(ac_id)
self.action_changed_timer.start()
elif etype == QEvent.ActionAdded:
self.layout_changes.add(ac_id)
self.layout_changed_timer.start()
self.add_action(ev.action())
elif etype == QEvent.ActionRemoved:
self.layout_changes.add(ac_id)
self.layout_changed_timer.start()
self.action_removed(ev.action())
return False
def actions_changed(self):
updated_props = dbus.Array(signature='(ia{sv})')
removed_props = dbus.Array(signature='(ias)')
for ac_id in self.action_changes:
ac = self.id_to_action(ac_id)
if ac is None:
continue
old_props = self.action_properties(ac_id)
new_props = self._action_properties[ac_id] = create_properties_for_action(ac, old_props)
removed = set(old_props) - set(new_props)
if removed:
removed_props.append((ac_id, dbus.Array(removed, signature='as')))
updated = PropDict({k:v for k, v in new_props.iteritems() if v != old_props.get(k, null)})
if updated:
updated_props.append((ac_id, updated))
self.action_changes = set()
if updated_props or removed_props:
self.dbus_api.ItemsPropertiesUpdated(updated_props, removed_props)
return updated_props, removed_props
def layouts_changed(self):
changes = set()
for ac_id in self.layout_changes:
if ac_id in self._id_to_action:
changes.add(ac_id)
self.layout_changes = set()
if changes:
self.dbus_api.revision += 1
for change in changes:
self.dbus_api.LayoutUpdated(self.dbus_api.revision, change)
return changes
def action_is_in_a_menu(self, ac):
all_menus = {ac.menu() for ac in self._action_to_id}
all_menus.discard(None)
return bool(set(ac.associatedWidgets()).intersection(all_menus))
def action_removed(self, ac):
if not self.action_is_in_a_menu(ac):
ac_id = self._action_to_id.pop(ac, None)
self._id_to_action.pop(ac_id, None)
self._action_properties.pop(ac_id, None)
def get_layout(self, parent_id, depth, property_names):
# Ensure any pending updates are done, as they are needed now
self.actions_changed()
self.layouts_changed()
property_names = property_names or None
props = self.action_properties(parent_id, property_names)
return parent_id, props, self.get_layout_children(parent_id, depth, property_names)
def get_layout_children(self, parent_id, depth, property_names):
ans = dbus.Array(signature='(ia{sv}av)')
ac = self.id_to_action(parent_id)
if ac is not None and depth != 0 and ac.menu() is not None:
for child in menu_actions(ac.menu()):
child_id = self.action_to_id(child)
if child_id is not None:
props = self.action_properties(child_id, property_names)
ans.append((child_id, props, self.get_layout_children(child_id, depth - 1, property_names)))
return ans
def get_properties(self, ids=None, property_names=None):
property_names = property_names or None
ans = dbus.Array(signature='(ia{sv})')
for action_id in (ids or self._id_to_action):
ans.append((action_id, self.action_properties(action_id, property_names)))
return ans
def handle_event(self, action_id, event, data, timestamp):
ac = self.id_to_action(action_id)
if event == 'clicked':
if ac.isCheckable():
ac.toggle()
ac.triggered.emit(ac.isCheckable() and ac.isChecked())
def handle_about_to_show(self, ac):
child_ids = {self.action_to_id(x) for x in menu_actions(ac.menu())}
child_ids.discard(None)
ac_id = self.action_to_id(ac)
ac.menu().aboutToShow.emit()
if ac_id in self.layout_changes or child_ids.intersection(self.action_changes):
return True
return False
class DBusMenuAPI(Object):
IFACE = 'com.canonical.dbusmenu'
def __init__(self, menu, object_path, bus=None):
if bus is None:
bus = dbus.SessionBus()
Object.__init__(self, bus, object_path)
self.status = 'normal'
self.menu = menu
self.revision = 0
@dbus_property(IFACE, signature='u')
def Version(self):
return 3 # GTK 3 uses 3, KDE 4 uses 2
@dbus_property(IFACE, signature='s', emits_changed_signal=True)
def Status(self):
return self.status
def set_status(self, normal=True):
self.status = 'normal' if normal else 'notice'
self.PropertiesChanged(self.IFACE, {'Status': self.status}, [])
@dbus_property(IFACE, signature='s')
def TextDirection(self):
return 'ltr' if QApplication.instance().isLeftToRight() else 'rtl'
@dbus_property(IFACE, signature='as')
def IconThemePath(self):
return dbus.Array(signature='s')
@dbus_method(IFACE, in_signature='iias', out_signature='u(ia{sv}av)')
def GetLayout(self, parentId, recursionDepth, propertyNames):
layout = self.menu.get_layout(parentId, recursionDepth, propertyNames)
return self.revision, layout
@dbus_method(IFACE, in_signature='aias', out_signature='a(ia{sv})')
def GetGroupProperties(self, ids, propertyNames):
return self.menu.get_properties(ids, propertyNames)
@dbus_method(IFACE, in_signature='is', out_signature='v')
def GetProperty(self, id, name):
return self.menu.action_properties(id).get(name, '')
@dbus_method(IFACE, in_signature='isvu', out_signature='')
def Event(self, id, eventId, data, timestamp):
''' This is called by the applet to notify the application an event happened on a
menu item. eventId can be one of the following::
* "clicked"
* "hovered"
* "opened"
* "closed"
Vendor specific events can be added by prefixing them with "x-<vendor>-"'''
if self.menu.id_to_action(id) is not None:
self.menu.handle_event_signal.emit(id, eventId, data, timestamp)
@dbus_method(IFACE, in_signature='a(isvu)', out_signature='ai')
def EventGroup(self, events):
''' Used to pass a set of events as a single message for possibily
several different menuitems. This is done to optimize DBus traffic.
Should return a list of ids that are not found. events is a list of
events in the same format as used for the Event method.'''
missing = dbus.Array(signature='u')
for id, eventId, data, timestamp in events:
if self.menu.id_to_action(id) is not None:
self.menu.handle_event_signal.emit(id, eventId, data, timestamp)
else:
missing.append(id)
return missing
@dbus_method(IFACE, in_signature='i', out_signature='b')
def AboutToShow(self, id):
ac = self.menu.id_to_action(id)
if ac is not None and ac.menu() is not None:
return self.menu.handle_about_to_show(ac)
return False
@dbus_method(IFACE, in_signature='ai', out_signature='aiai')
def AboutToShowGroup(self, ids):
updates_needed = dbus.Array(signature='i')
id_errors = dbus.Array(signature='i')
for ac_id in ids:
ac = self.menu.id_to_action(id)
if ac is not None and ac.menu() is not None:
if self.menu.handle_about_to_show(ac):
updates_needed.append(ac_id)
else:
id_errors.append(ac_id)
return updates_needed, id_errors
@dbus_signal(IFACE, 'a(ia{sv})a(ias)')
def ItemsPropertiesUpdated(self, updatedProps, removedProps):
pass
@dbus_signal(IFACE, 'ui')
def LayoutUpdated(self, revision, parent):
pass
@dbus_signal(IFACE, 'iu')
def ItemActivationRequested(self, id, timestamp):
pass
def test():
setup_for_cli_run()
app = QApplication([])
bus = dbus.SessionBus()
dbus_name = BusName('com.calibre-ebook.TestDBusMenu', bus=bus, do_not_queue=True)
m = QMenu()
ac = m.addAction(QIcon(I('window-close.png')), 'Quit', app.quit)
ac.setShortcut(QKeySequence('Ctrl+Q'))
menu = DBusMenu('/Menu', bus=bus)
menu.publish_new_menu(m)
app.exec_()
del dbus_name
if __name__ == '__main__':
test()
| gpl-3.0 | 4,996,963,048,490,460,000 | 37.677083 | 115 | 0.597832 | false |
tensorflow/agents | tf_agents/networks/nest_map.py | 1 | 8386 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network layer that allows mapping multiple inputs."""
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
import copy
import typing
import tensorflow.compat.v2 as tf
from tf_agents.networks import network
from tf_agents.networks import sequential
from tf_agents.typing import types
from tf_agents.utils import nest_utils
def NestFlatten() -> tf.keras.layers.Layer: # pylint: disable=invalid-name
"""Returns a Keras layer that takes a nest of inputs, and returns a list.
Useful in combination with `NestMap` to combine processed inputs:
```python
# Process inputs in dictionary {"inp1": ..., "inp2": ...}, then
# flatten the resulting tensors into a list, and finally pass this
# list to tf.keras.layers.Add() to sum the values element-wise.
net = tf_agents.networks.Sequence([
NestMap({"inp1": layer1, "inp2": layer2}),
NestFlatten(),
tf.keras.layers.Add(),
])
combined_outputs, next_state = net({"inp1": inp1, "inp2": inp2}, state)
```
"""
return tf.keras.layers.Lambda(tf.nest.flatten)
class NestMap(network.Network):
"""The `NestMap` network processes nested inputs via nested layers.
It is a TF-Agents network that can be used to process nested inputs.
Stateful Keras layers (e.g. LSTMCell, RNN, LSTM, TF-Agents DynamicUnroll)
are all supported. The `state_spec` of `NestMap` has a structure matching
that of `nested_layers`.
`NestMap` can be used in conjunction with `NestFlatten` and a combiner
(e.g. `tf.keras.layers.Add` or `tf.keras.layers.Concatenate`) to process
and aggregate in a preprocessing step.
Usage:
```python
net = NestMap({"inp1": layer1, "inp2": layer2})
outputs, next_state = net({"inp1": inp1, "inp2": inp2}, state)
```
"""
def __init__(self,
nested_layers: types.NestedLayer,
input_spec: typing.Optional[types.NestedTensorSpec] = None,
name: typing.Optional[typing.Text] = None):
"""Create a Sequential Network.
Args:
nested_layers: A nest of layers and/or networks. These will be used
to process the inputs (input nest structure will have to match this
structure). Any layers that are subclasses of
`tf.keras.layers.{RNN,LSTM,GRU,...}` are wrapped in
`tf_agents.keras_layers.RNNWrapper`.
input_spec: (Optional.) A nest of `tf.TypeSpec` representing the
input observations. The structure of `input_spec` must match
that of `nested_layers`.
name: (Optional.) Network name.
Raises:
TypeError: If any of the layers are not instances of keras `Layer`.
ValueError: If `input_spec` is provided but its nest structure does
not match that of `nested_layers`.
RuntimeError: If not `tf.executing_eagerly()`; as this is required to
be able to create deep copies of layers in `layers`.
"""
if not tf.executing_eagerly():
raise RuntimeError(
'Not executing eagerly - cannot make deep copies of `nested_layers`.')
flat_nested_layers = tf.nest.flatten(nested_layers)
for layer in flat_nested_layers:
if not isinstance(layer, tf.keras.layers.Layer):
raise TypeError(
'Expected all layers to be instances of keras Layer, but saw'
': \'{}\''.format(layer))
if input_spec is not None:
nest_utils.assert_same_structure(
nested_layers, input_spec,
message=(
'`nested_layers` and `input_spec` do not have matching structures'
))
flat_input_spec = tf.nest.flatten(input_spec)
else:
flat_input_spec = [None] * len(flat_nested_layers)
# Wrap in Sequential if necessary.
flat_nested_layers = [
sequential.Sequential([m], s) if not isinstance(m, network.Network)
else m
for (s, m) in zip(flat_input_spec, flat_nested_layers)
]
flat_nested_layers_state_specs = [m.state_spec for m in flat_nested_layers]
nested_layers = tf.nest.pack_sequence_as(nested_layers, flat_nested_layers)
# We use flattened layers and states here instead of tf.nest.map_structure
# for several reason. One is that we perform several operations against
# the layers and we want to avoid calling into tf.nest.map* multiple times.
# But the main reason is that network states have a different *structure*
# than the layers; e.g., `nested_layers` may just be tf.keras.layers.LSTM,
# but the states would then have structure `[.,.]`. Passing these in
# as args to tf.nest.map_structure causes it to fail. Instead we would
# have to use nest.map_structure_up_to -- but that function is not part
# of the public TF API. However, if we do everything in flatland and then
# use pack_sequence_as, we bypass the more rigid structure tests.
state_spec = tf.nest.pack_sequence_as(
nested_layers, flat_nested_layers_state_specs)
super(NestMap, self).__init__(input_tensor_spec=input_spec,
state_spec=state_spec,
name=name)
self._nested_layers = nested_layers
@property
def nested_layers(self) -> types.NestedNetwork:
# Return a shallow copy so users don't modify the layers list.
return tf.nest.map_structure(lambda m: m, self._nested_layers)
def copy(self, **kwargs) -> 'NestMap':
"""Make a copy of a `NestMap` instance.
**NOTE** A copy of a `NestMap` instance always performs a deep copy
of the underlying layers, so the new instance will not share weights
with the original - but it will start with the same weights.
Args:
**kwargs: Args to override when recreating this network. Commonly
overridden args include 'name'.
Returns:
A deep copy of this network.
"""
new_kwargs = dict(self._saved_kwargs, **kwargs)
if 'nested_layers' not in new_kwargs:
new_nested_layers = [copy.deepcopy(m) for m in self._nested_layers]
new_kwargs['nested_layers'] = new_nested_layers
return type(self)(**new_kwargs)
def call(self, inputs, network_state=(), **kwargs):
nest_utils.assert_same_structure(
self._nested_layers, inputs,
allow_shallow_nest1=True,
message=(
'`self.nested_layers` and `inputs` do not have matching structures')
)
if network_state:
nest_utils.assert_same_structure(
self.state_spec, network_state,
allow_shallow_nest1=True,
message=(
'network_state and state_spec do not have matching structure'))
nested_layers_state = network_state
else:
nested_layers_state = tf.nest.map_structure(
lambda _: (), self._nested_layers)
# Here we must use map_structure_up_to because nested_layers_state has a
# "deeper" structure than self._nested_layers. For example, an LSTM
# layer's state is composed of a list with two tensors. The
# tf.nest.map_structure function would raise an error if two
# "incompatible" structures are passed in this way.
def _mapper(inp, layer, state): # pylint: disable=invalid-name
return layer(inp, network_state=state, **kwargs)
outputs_and_next_state = nest_utils.map_structure_up_to(
self._nested_layers, _mapper,
inputs, self._nested_layers, nested_layers_state)
flat_outputs_and_next_state = nest_utils.flatten_up_to(
self._nested_layers, outputs_and_next_state)
flat_outputs, flat_next_state = zip(*flat_outputs_and_next_state)
outputs = tf.nest.pack_sequence_as(
self._nested_layers, flat_outputs)
next_network_state = tf.nest.pack_sequence_as(
self._nested_layers, flat_next_state)
return outputs, next_network_state
| apache-2.0 | 7,011,251,411,921,298,000 | 38.744076 | 80 | 0.673503 | false |
AmI-2014/Python-Lab1 | fibonacci.py | 1 | 1295 | '''
Created on Mar 18, 2014
@author: Dario Bonino <dario.bonino@polito.it>
Copyright (c) 2014 Dario Bonino
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
'''
def fib(order):
# initialization, we use a tuple
a = (0, 1)
# the resulting array
fibonacci = []
# init while variable
i = 0
# fill the array
while i < order:
a = (a[1], a[0] + a[1])
fibonacci.append(a[0])
i+=1
return fibonacci
if __name__ == '__main__':
# get the series order as a string
order_as_string = raw_input("Insert the Fibonacci's series order:\n>")
# convert the string to an integer number
order = int(order_as_string)
# get the Fibonacci's series value
values = fib(order)
# print the values
print values
| apache-2.0 | -4,332,876,673,079,846,400 | 24.9 | 74 | 0.657143 | false |
mapzen/vector-datasource | integration-test/480-rest_area-services.py | 2 | 3053 | # -*- encoding: utf-8 -*-
from shapely.wkt import loads as wkt_loads
import dsl
from . import FixtureTest
class RestAreaServices(FixtureTest):
def test_rest_area_node(self):
self.generate_fixtures(dsl.way(159773030, wkt_loads('POINT (-76.73912905210828 40.99079246918038)'), {u'source': u'openstreetmap.org', u'highway': u'rest_area', u'name': u'Foo Rest Area'})) # noqa
self.assert_has_feature(
16, 18798, 24573, 'pois',
{'kind': 'rest_area', 'id': 159773030, 'min_zoom': 13})
def test_rest_area_way(self):
# Way: Crystal Springs Rest Area (97057565)
self.generate_fixtures(dsl.way(97057565, wkt_loads('POLYGON ((-122.365488944754 37.54048597695269, -122.363673359734 37.53894968362569, -122.363521634282 37.53881541316188, -122.363421831454 37.53868392047339, -122.363355445955 37.53852749658311, -122.363020823511 37.53696630264469, -122.36330406232 37.5367516065384, -122.365488944754 37.54048597695269))'), {u'drinking_water': u'yes', u'toilets': u'yes', u'handicapped_accessible': u'yes', u'vending': u'yes', u'name': u'Crystal Springs Rest Area', u'sanitation': u'no', u'area': u'yes', u'route': u'280', u'way_area': u'35229.6', u'pet_area': u'yes', u'phone': u'yes', u'picnic_tables': u'yes', u'source': u'openstreetmap.org', u'addr:county': u'San Mateo', u'attribution': u'Caltrans', u'caltrans:district': u'4', u'highway': u'rest_area', u'description': u'Near San Francisco Reservoir'})) # noqa
self.assert_has_feature(
16, 10492, 25385, 'landuse',
{'kind': 'rest_area', 'id': 97057565, 'sort_rank': 44})
def test_service_area_node(self):
# NOTE: this has been remapped as an area now. the test data here
# is superseded by the 1698-too-many-service-areas test.
# node: Tiffin River
self.generate_fixtures(dsl.way(200412620, wkt_loads('POINT (-84.41292493378698 41.6045519557572)'), {u'source': u'openstreetmap.org', u'name': u'Tiffin River', u'highway': u'services'})) # noqa
self.assert_has_feature(
16, 17401, 24424, 'pois',
{'kind': 'service_area', 'id': 200412620, 'min_zoom': 17})
def test_service_area_way(self):
# Way: Nicole Driveway (274732386)
self.generate_fixtures(dsl.way(274732386, wkt_loads('POLYGON ((-120.123766060274 38.09757738412661, -120.123761209371 38.0977196908478, -120.123658621766 38.0979925683359, -120.123633379106 38.0982482663423, -120.123585319239 38.098378271151, -120.123533216952 38.09837445372108, -120.123577234401 38.09825915310519, -120.123617928083 38.09797468287368, -120.123713957987 38.09768759586379, -120.123702639215 38.09747749355018, -120.123762826339 38.09746978790201, -120.123766060274 38.09757738412661))'), {u'source': u'openstreetmap.org', u'way_area': u'744.019', u'name': u'Nicole Driveway', u'highway': u'services', u'area': u'yes'})) # noqa
self.assert_has_feature(
16, 10900, 25256, 'landuse',
{'kind': 'service_area', 'id': 274732386, 'sort_rank': 45})
| mit | -6,235,184,751,376,499,000 | 77.282051 | 861 | 0.677039 | false |
janusnic/dj-21v | unit_07/mysite/blog/models.py | 1 | 1898 | from django.db import models
import datetime
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True, verbose_name='slug')
description = models.TextField(max_length=4096)
def __str__(self):
return '%s' % (self.name)
class Tag(models.Model):
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True, verbose_name='slug')
def __str__(self):
return '%s' % (self.name)
@python_2_unicode_compatible
class Article(models.Model):
ARTICLE_STATUS = (
('D', 'Not Reviewed'),
('P', 'Published'),
('E', 'Expired'),
)
title = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True, verbose_name='slug')
status = models.IntegerField(default=0)
content = models.TextField()
status = models.CharField(max_length=1, choices=ARTICLE_STATUS, default='D')
category = models.ForeignKey(Category, verbose_name="the related category")
tags = models.ManyToManyField(Tag, verbose_name="the related tags", related_name="keyword_set", blank=True)
views = models.IntegerField(default=0)
publish_date = models.DateTimeField(auto_now=True, editable=False, help_text="Please use the following format: <em>YYYY-MM-DD</em>.")
created_date = models.DateTimeField(auto_now_add=True, editable=False)
def was_published_recently(self):
return self.publish_date >= timezone.now() - datetime.timedelta(days=1)
was_published_recently.admin_order_field = 'publish_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
def __str__(self):
return '%s' % (self.title)
| mit | 9,192,965,130,482,229,000 | 39.382979 | 137 | 0.68862 | false |