repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
showyou/anzu | refs/heads/master | generator/tmpgenerator.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit("/",1)[0]
conf_path = exec_path+"/common/config.json"
import sys
sys.path.insert(0,exec_path)
from common import auth_api, model
from common import readReplyTable
import reply
# 解析結果に基づいて文章生成(または行動を起こす)
#import model
#import scheduler
import datetime
#from sqlalchemy import and_
import random
import string
import sys
import simplejson
def quickGenerate():
#sched = scheduler.Scheduler()
#sched.schedule()
u = LoadUserData(conf_path)
table, footer= readReplyTable.read(exec_path+"/common/replyTable.json")
dbSession = model.startSession(u)
if False:
if( sched.has_schedule() ):
str = doSchedule(sched)
else:
rep = dbSession.query(model.RetQueue)
if( rep.count() > 0 ):
str, reply_id = reply.do(table, rep,dbSession)
sendMessage(str,reply_id)
def LoadUserData(fileName):
#ファイルを開いて、データを読み込んで変換する
#データ形式は(user,password)
try:
file = open(fileName,'r')
a = simplejson.loads(file.read())
file.close()
except IOError:
print "IOError"
sys.exit(1)
return a
# Twitterにメッセージ投げる
def sendMessage(str, reply_id):
userdata = LoadUserData(conf_path)
tw = auth_api.connect(userdata["consumer_token"],
userdata["consumer_secret"], exec_path+"/common/")
str = string.replace(str,'yystart','')
str = string.replace(str,'yyend','')
tw.update_status(str, reply_id)
if __name__ == "__main__":
text = u"かきくけこ".encode("utf-8")
sendMessage(text,-1)
|
gribovskiy/CATS2 | refs/heads/master | scripts/settings-interface/settings_interface.py | 1 | import zmq
import threading
import time
class Request:
"""Set/get request for CATS2 settings variable."""
def __init__(self, requestType = '', requestData = ''):
self.requestType = requestType
self.requestData = requestData
class CatsSettingsInterface:
"""Provides the read/write access to CATS2 settings."""
def __init__(self):
self.posted_requests = set()
self.sent_requests = set()
self.lock = threading.Lock()
self.value_by_path = dict()
# Create and connect sockets
self.context = zmq.Context(1)
self.subscriber = self.context.socket(zmq.SUB)
self.subscriber.setsockopt(zmq.SUBSCRIBE, '')
# Connects to the address to listen to CATS
self.subscriber.connect('tcp://127.0.0.1:5558')
print('Settings subscriber connected!')
self.publisher = self.context.socket(zmq.PUB)
# Connects to the address to publish to CATS
self.publisher.connect('tcp://127.0.0.1:5557')
print('Settings publisher connected!')
self.incoming_thread = threading.Thread(target = self.recieve_settings)
self.outgoing_thread = threading.Thread(target = self.send_settings)
self.stop = False
self.incoming_thread.start()
self.outgoing_thread.start()
def recieve_settings(self):
"""Manages the incoming data stream from CATS.
Only set requests that are replies on our get requests are managed at
the moment.
"""
while not self.stop:
try:
[name, device, command, data] = self.subscriber.recv_multipart(flags=zmq.NOBLOCK)
print('Received settings from cats: ' + name + ';' + device + ';' + command + ';' + data)
if (command == 'set'):
self.lock.acquire()
values = []
if data:
settings = data.split(';')
for settings_value in settings:
values.append(float(settings_value))
self.value_by_path[device] = values
self.sent_requests.discard(device)
self.lock.release()
except zmq.ZMQError, e:
time.sleep(0.1)
continue
def send_settings(self):
"""Manages the outcoming data stream from CATS.
The requests to send are taken from posted_requests set, the values for
set requests are taken from value_by_path dictionary. Once sent the
request's path (it's kind of id) is moved to the sent_requests set.
"""
while not self.stop:
self.lock.acquire()
if (len(self.posted_requests) > 0):
request = self.posted_requests.pop()
self.lock.release()
if (request.requestType == 'get'):
data = ''
name = 'cats'
device = request.requestData
command = 'get'
# print('Sent request: ' + name + ';' + device + ';' + command + ';' + data)
self.lock.acquire()
self.sent_requests.add(request.requestData)
if request.requestData not in self.value_by_path:
self.value_by_path[request.requestData] = None
self.lock.release()
self.publisher.send_multipart([name, device, command, data])
elif (request.requestType == 'set'):
self.lock.acquire()
values = self.value_by_path[request.requestData]
self.lock.release()
data = ''
for value in values:
data += str(value) + ';'
# remove the last ';'
data = data[0:-1]
name = 'cats'
device = request.requestData
command = 'set'
self.publisher.send_multipart([name, device, command, data])
#print('Sent request: ' + name + ';' + device + ';' + command + ';' + data)
else:
self.lock.release()
time.sleep(0.1)
def get_value(self, path):
"""Requests the settings value from CATS until it received.
The resulted settings value is read from value_by_path dictionary.
"""
request = Request('get', path)
self.lock.acquire()
self.posted_requests.add(request)
# print('Post request: ' + request.requestType + ';' + request.requestData)
self.lock.release()
timeout = 0.1
time.sleep(timeout)
counter = timeout
waiting_answer = True
while waiting_answer:
counter += timeout
if (counter >= 1.):
counter = 0
request = Request('get', path)
self.lock.acquire()
self.posted_requests.add(request)
# print('Re-post request: ' + request.requestType + ';' + request.requestData)
self.lock.release()
time.sleep(timeout)
self.lock.acquire()
waiting_answer = (path in self.sent_requests) or (request in self.posted_requests)
self.lock.release()
return self.value_by_path[path]
def set_value(self, path, value):
"""Updates the settings value in CATS2."""
request = Request('set', path)
self.lock.acquire()
self.value_by_path[path] = value # we assume that this value will not \
# be rewritten from the CATS settings\
# before sent
self.posted_requests.add(request)
# print('Post request: ' + request.requestType + ';' + request.requestData)
self.lock.release()
def stop_all(self):
"""Stops all threads."""
self.stop = True
while self.outgoing_thread.isAlive():
time.sleep(0.1)
print('Settings sending thread finished')
while self.incoming_thread.isAlive():
time.sleep(0.1)
print('Settings receiving thread finished') |
alex/warehouse | refs/heads/master | warehouse/utils/compression.py | 3 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
from collections.abc import Sequence
ENCODINGS = ["identity", "gzip"]
DEFAULT_ENCODING = "identity"
BUFFER_MAX = 1 * 1024 * 1024 # We'll buffer up to 1MB
def _compressor(request, response):
# Skip items with a Vary: Cookie/Authorization Header because we don't know
# if they are safe from the CRIME attack.
if (response.vary is not None and
(set(response.vary) & {"Cookie", "Authorization"})):
return
# Avoid compression if we've already got a Content-Encoding.
if "Content-Encoding" in response.headers:
return
# Ensure that the Accept-Encoding header gets added to the response.
vary = set(response.vary if response.vary is not None else [])
vary.add("Accept-Encoding")
response.vary = vary
# Negotiate the correct encoding from our request.
target_encoding = request.accept_encoding.best_match(
ENCODINGS,
default_match=DEFAULT_ENCODING,
)
# If we have a Sequence, we'll assume that we aren't streaming the
# response because it's probably a list or similar.
streaming = not isinstance(response.app_iter, Sequence)
# If our streaming content is small enough to easily buffer in memory
# then we'll just convert it to a non streaming response.
if (streaming and response.content_length is not None and
response.content_length <= BUFFER_MAX):
response.body
streaming = False
if streaming:
response.encode_content(encoding=target_encoding, lazy=True)
# We need to remove the content_length from this response, since
# we no longer know what the length of the content will be.
response.content_length = None
# If this has a streaming response, then we need to adjust the ETag
# header, if it has one, so that it reflects this. We don't just append
# ;gzip to this because we don't want people to try and use it to infer
# any information about it.
if response.etag is not None:
md5_digest = hashlib.md5((response.etag + ";gzip").encode("utf8"))
md5_digest = md5_digest.digest()
md5_digest = base64.b64encode(md5_digest)
md5_digest = md5_digest.replace(b"\n", b"").decode("utf8")
response.etag = md5_digest.strip("=")
else:
original_length = len(response.body)
response.encode_content(encoding=target_encoding, lazy=False)
# If the original length is less than our new, compressed length
# then we'll go back to the original. There is no reason to encode
# the content if it increases the length of the body.
if original_length < len(response.body):
response.decode_content()
# If we've added an encoding to the content, then we'll want to
# recompute the ETag.
if response.content_encoding is not None:
response.md5_etag()
def compression_tween_factory(handler, registry):
def compression_tween(request):
response = handler(request)
# We use a response callback here so that it happens after all of the
# other response callbacks are called. This is important because
# otherwise we won't be able to check Vary headers and such that are
# set by response callbacks.
request.add_response_callback(_compressor)
return response
return compression_tween
|
alexkogon/ansible | refs/heads/devel | v1/ansible/module_common.py | 83 | # (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# from python and deps
from cStringIO import StringIO
import inspect
import os
import shlex
# from Ansible
from ansible import errors
from ansible import utils
from ansible import constants as C
from ansible import __version__
from ansible.utils.unicode import to_bytes
REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = "# POWERSHELL_COMMON"
REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
REPLACER_SELINUX = "<<SELINUX_SPECIAL_FILESYSTEMS>>"
class ModuleReplacer(object):
"""
The Replacer is used to insert chunks of code into modules before
transfer. Rather than doing classical python imports, this allows for more
efficient transfer in a no-bootstrapping scenario by not moving extra files
over the wire, and also takes care of embedding arguments in the transferred
modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion basic.py into the module
from the module_utils/ directory in the source tree.
All modules are required to import at least basic, though there will also
be other snippets.
# POWERSHELL_COMMON
Also results in the inclusion of the common code in powershell.ps1
"""
# ******************************************************************************
def __init__(self, strip_comments=False):
this_file = inspect.getfile(inspect.currentframe())
self.snippet_path = os.path.join(os.path.dirname(this_file), 'module_utils')
self.strip_comments = strip_comments # TODO: implement
# ******************************************************************************
def slurp(self, path):
if not os.path.exists(path):
raise errors.AnsibleError("imported module support code does not exist at %s" % path)
fd = open(path)
data = fd.read()
fd.close()
return data
def _find_snippet_imports(self, module_data, module_path):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_style = 'old'
if REPLACER in module_data:
module_style = 'new'
elif 'from ansible.module_utils.' in module_data:
module_style = 'new'
elif 'WANT_JSON' in module_data:
module_style = 'non_native_want_json'
output = StringIO()
lines = module_data.split('\n')
snippet_names = []
for line in lines:
if REPLACER in line:
output.write(self.slurp(os.path.join(self.snippet_path, "basic.py")))
snippet_names.append('basic')
if REPLACER_WINDOWS in line:
ps_data = self.slurp(os.path.join(self.snippet_path, "powershell.ps1"))
output.write(ps_data)
snippet_names.append('powershell')
elif line.startswith('from ansible.module_utils.'):
tokens=line.split(".")
import_error = False
if len(tokens) != 3:
import_error = True
if " import *" not in line:
import_error = True
if import_error:
raise errors.AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
snippet_name = tokens[2].split()[0]
snippet_names.append(snippet_name)
output.write(self.slurp(os.path.join(self.snippet_path, snippet_name + ".py")))
else:
if self.strip_comments and line.startswith("#") or line == '':
pass
output.write(line)
output.write("\n")
if not module_path.endswith(".ps1"):
# Unixy modules
if len(snippet_names) > 0 and not 'basic' in snippet_names:
raise errors.AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
else:
# Windows modules
if len(snippet_names) > 0 and not 'powershell' in snippet_names:
raise errors.AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
return (output.getvalue(), module_style)
# ******************************************************************************
def modify_module(self, module_path, complex_args, module_args, inject):
with open(module_path) as f:
# read in the module source
module_data = f.read()
(module_data, module_style) = self._find_snippet_imports(module_data, module_path)
complex_args_json = utils.jsonify(complex_args)
# We force conversion of module_args to str because module_common calls shlex.split,
# a standard library function that incorrectly handles Unicode input before Python 2.7.3.
# Note: it would be better to do all this conversion at the border
# (when the data is originally parsed into data structures) but
# it's currently coming from too many sources to make that
# effective.
try:
encoded_args = repr(module_args.encode('utf-8'))
except UnicodeDecodeError:
encoded_args = repr(module_args)
try:
encoded_complex = repr(complex_args_json.encode('utf-8'))
except UnicodeDecodeError:
encoded_complex = repr(complex_args_json.encode('utf-8'))
# these strings should be part of the 'basic' snippet which is required to be included
module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
module_data = module_data.replace(REPLACER_SELINUX, ','.join(C.DEFAULT_SELINUX_SPECIAL_FS))
module_data = module_data.replace(REPLACER_ARGS, encoded_args)
module_data = module_data.replace(REPLACER_COMPLEX, encoded_complex)
if module_style == 'new':
facility = C.DEFAULT_SYSLOG_FACILITY
if 'ansible_syslog_facility' in inject:
facility = inject['ansible_syslog_facility']
module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
lines = module_data.split("\n")
shebang = None
if lines[0].startswith("#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
if interpreter_config in inject:
interpreter = to_bytes(inject[interpreter_config], errors='strict')
lines[0] = shebang = "#!%s %s" % (interpreter, " ".join(args[1:]))
module_data = "\n".join(lines)
return (module_data, module_style, shebang)
|
ThangBK2009/android-source-browsing.platform--external--gtest | refs/heads/master | test/gtest_env_var_test.py | 2408 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
|
hxford/bcloud | refs/heads/master | bcloud/const.py | 10 |
# Copyright (C) 2014-2015 LiuLang <gsushzhsosgsu@gmail.com>
# Use of this source code is governed by GPLv3 license that can be found
# in http://www.gnu.org/licenses/gpl-3.0.html
'''
这个模块保存着网络连接时需要共用的一些常量.
与界面相关的常量, 都位于Config.py.
'''
from bcloud import Config
_ = Config._
BAIDU_URL = 'http://www.baidu.com/'
PASSPORT_BASE = 'https://passport.baidu.com/'
PASSPORT_URL = PASSPORT_BASE + 'v2/api/'
PASSPORT_LOGIN = PASSPORT_BASE + 'v2/api/?login'
REFERER = PASSPORT_BASE + 'v2/?login'
#USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0'
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Firefox/31.0 Iceweasel/31.2.0'
PAN_URL = 'http://pan.baidu.com/'
PAN_API_URL = PAN_URL + 'api/'
PAN_REFERER = 'http://pan.baidu.com/disk/home'
SHARE_REFERER = PAN_URL + 'share/manage'
# 一般的服务器名
PCS_URL = 'http://pcs.baidu.com/rest/2.0/pcs/'
# 上传的服务器名
PCS_URL_C = 'http://c.pcs.baidu.com/rest/2.0/pcs/'
PCS_URLS_C = 'https://c.pcs.baidu.com/rest/2.0/pcs/'
# 下载的服务器名
PCS_URL_D = 'http://d.pcs.baidu.com/rest/2.0/pcs/'
## 以下常量是模拟的PC客户端的参数.
CHANNEL_URL = 'https://channel.api.duapp.com/rest/2.0/channel/channel?'
PC_USER_AGENT = 'netdisk;4.5.0.7;PC;PC-Windows;5.1.2600;WindowsBaiduYunGuanJia'
PC_DEVICE_ID = '08002788772E'
PC_DEVICE_NAME = '08002788772E'
PC_DEVICE_TYPE = '2'
PC_CLIENT_TYPE = '8'
PC_APP_ID = '1981342'
PC_DEVUID = 'BDIMXV2%2DO%5FFD60326573E54779892088D1378B27C6%2DC%5F0%2DD%5F42563835636437366130302d6662616539362064%2DM%5F08002788772E%2DV%5F0C94CA83'
PC_VERSION = '4.5.0.7'
## HTTP 请求时的一些常量
CONTENT_FORM = 'application/x-www-form-urlencoded'
CONTENT_FORM_UTF8 = CONTENT_FORM + '; charset=UTF-8'
ACCEPT_HTML = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
ACCEPT_JSON = 'application/json, text/javascript, */*; q=0.8'
class State:
'''下载状态常量'''
DOWNLOADING = 0
WAITING = 1
PAUSED = 2
FINISHED = 3
CANCELED = 4
ERROR = 5
class UploadState:
UPLOADING = 0
WAITING = 1
PAUSED = 2
FINISHED = 3
CANCELED = 4
ERROR = 5
class UploadMode:
'''上传时, 如果服务器端已存在同名文件时的操作方式'''
IGNORE = 0
OVERWRITE = 1
NEWCOPY = 2
DownloadMode = UploadMode
UPLOAD_ONDUP = ('', 'overwrite', 'newcopy')
# 视图模式
ICON_VIEW, TREE_VIEW = 0, 1
class ValidatePathState:
'''文件路径检验结果'''
OK = 0
LENGTH_ERROR = 1
CHAR_ERROR2 = 2
CHAR_ERROR3 = 3
ValidatePathStateText = (
'',
_('Max characters in filepath shall no more than 1000'),
_('Filepath should not contain \\ ? | " > < : *'),
_('\\r \\n \\t \\0 \\x0B or SPACE should not appear in start or end of filename'),
)
class TargetInfo:
'''拖放类型编号'''
URI_LIST = 0
PLAIN_TEXT = 1
RAW = 2
TEXT_JSON = 3
class TargetType:
'''拖放类型'''
URI_LIST = 'text/uri-list'
PLAIN_TEXT = 'text/plain'
RAW = 'application/octet-stream'
TEXT_JSON = 'application/json'
|
Cuuuurzel/KiPyCalc | refs/heads/master | sympy/vector/functions.py | 66 | from sympy.vector.coordsysrect import CoordSysCartesian
from sympy.vector.dyadic import Dyadic
from sympy.vector.vector import Vector, BaseVector
from sympy.vector.scalar import BaseScalar
from sympy import sympify, diff, integrate, S
def express(expr, system, system2=None, variables=False):
"""
Global function for 'express' functionality.
Re-expresses a Vector, Dyadic or scalar(sympyfiable) in the given
coordinate system.
If 'variables' is True, then the coordinate variables (base scalars)
of other coordinate systems present in the vector/scalar field or
dyadic are also substituted in terms of the base scalars of the
given system.
Parameters
==========
expr : Vector/Dyadic/scalar(sympyfiable)
The expression to re-express in CoordSysCartesian 'system'
system: CoordSysCartesian
The coordinate system the expr is to be expressed in
system2: CoordSysCartesian
The other coordinate system required for re-expression
(only for a Dyadic Expr)
variables : boolean
Specifies whether to substitute the coordinate variables present
in expr, in terms of those of parameter system
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import Symbol, cos, sin
>>> N = CoordSysCartesian('N')
>>> q = Symbol('q')
>>> B = N.orient_new_axis('B', q, N.k)
>>> from sympy.vector import express
>>> express(B.i, N)
(cos(q))*N.i + (sin(q))*N.j
>>> express(N.x, B, variables=True)
B.x*cos(q) - B.y*sin(q)
>>> d = N.i.outer(N.i)
>>> express(d, B, N) == (cos(q))*(B.i|N.i) + (-sin(q))*(B.j|N.i)
True
"""
if expr == 0 or expr == Vector.zero:
return expr
if not isinstance(system, CoordSysCartesian):
raise TypeError("system should be a CoordSysCartesian \
instance")
if isinstance(expr, Vector):
if system2 is not None:
raise ValueError("system2 should not be provided for \
Vectors")
#Given expr is a Vector
if variables:
#If variables attribute is True, substitute
#the coordinate variables in the Vector
system_list = []
for x in expr.atoms():
if (isinstance(x, (BaseScalar, BaseVector))
and x.system != system):
system_list.append(x.system)
system_list = set(system_list)
subs_dict = {}
for f in system_list:
subs_dict.update(f.scalar_map(system))
expr = expr.subs(subs_dict)
#Re-express in this coordinate system
outvec = Vector.zero
parts = expr.separate()
for x in parts:
if x != system:
temp = system.rotation_matrix(x) * parts[x].to_matrix(x)
outvec += matrix_to_vector(temp, system)
else:
outvec += parts[x]
return outvec
elif isinstance(expr, Dyadic):
if system2 is None:
system2 = system
if not isinstance(system2, CoordSysCartesian):
raise TypeError("system2 should be a CoordSysCartesian \
instance")
outdyad = Dyadic.zero
var = variables
for k, v in expr.components.items():
outdyad += (express(v, system, variables=var) *
(express(k.args[0], system, variables=var) |
express(k.args[1], system2, variables=var)))
return outdyad
else:
if system2 is not None:
raise ValueError("system2 should not be provided for \
Vectors")
if variables:
#Given expr is a scalar field
system_set = set([])
expr = sympify(expr)
#Subsitute all the coordinate variables
for x in expr.atoms():
if isinstance(x, BaseScalar)and x.system != system:
system_set.add(x.system)
subs_dict = {}
for f in system_set:
subs_dict.update(f.scalar_map(system))
return expr.subs(subs_dict)
return expr
def curl(vect, coord_sys):
"""
Returns the curl of a vector field computed wrt the base scalars
of the given coordinate system.
Parameters
==========
vect : Vector
The vector operand
coord_sys : CoordSysCartesian
The coordinate system to calculate the curl in
Examples
========
>>> from sympy.vector import CoordSysCartesian, curl
>>> R = CoordSysCartesian('R')
>>> v1 = R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k
>>> curl(v1, R)
0
>>> v2 = R.x*R.y*R.z*R.i
>>> curl(v2, R)
R.x*R.y*R.j + (-R.x*R.z)*R.k
"""
return coord_sys.delop.cross(vect).doit()
def divergence(vect, coord_sys):
"""
Returns the divergence of a vector field computed wrt the base
scalars of the given coordinate system.
Parameters
==========
vect : Vector
The vector operand
coord_sys : CoordSysCartesian
The cooordinate system to calculate the divergence in
Examples
========
>>> from sympy.vector import CoordSysCartesian, divergence
>>> R = CoordSysCartesian('R')
>>> v1 = R.x*R.y*R.z * (R.i+R.j+R.k)
>>> divergence(v1, R)
R.x*R.y + R.x*R.z + R.y*R.z
>>> v2 = 2*R.y*R.z*R.j
>>> divergence(v2, R)
2*R.z
"""
return coord_sys.delop.dot(vect).doit()
def gradient(scalar, coord_sys):
"""
Returns the vector gradient of a scalar field computed wrt the
base scalars of the given coordinate system.
Parameters
==========
scalar : SymPy Expr
The scalar field to compute the gradient of
coord_sys : CoordSysCartesian
The coordinate system to calculate the gradient in
Examples
========
>>> from sympy.vector import CoordSysCartesian, gradient
>>> R = CoordSysCartesian('R')
>>> s1 = R.x*R.y*R.z
>>> gradient(s1, R)
R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k
>>> s2 = 5*R.x**2*R.z
>>> gradient(s2, R)
10*R.x*R.z*R.i + 5*R.x**2*R.k
"""
return coord_sys.delop(scalar).doit()
def is_conservative(field):
"""
Checks if a field is conservative.
Paramaters
==========
field : Vector
The field to check for conservative property
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy.vector import is_conservative
>>> R = CoordSysCartesian('R')
>>> is_conservative(R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k)
True
>>> is_conservative(R.z*R.j)
False
"""
#Field is conservative irrespective of system
#Take the first coordinate system in the result of the
#separate method of Vector
if not isinstance(field, Vector):
raise TypeError("field should be a Vector")
if field == Vector.zero:
return True
coord_sys = list(field.separate())[0]
return curl(field, coord_sys).simplify() == Vector.zero
def is_solenoidal(field):
"""
Checks if a field is solenoidal.
Paramaters
==========
field : Vector
The field to check for solenoidal property
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy.vector import is_solenoidal
>>> R = CoordSysCartesian('R')
>>> is_solenoidal(R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k)
True
>>> is_solenoidal(R.y * R.j)
False
"""
#Field is solenoidal irrespective of system
#Take the first coordinate system in the result of the
#separate method in Vector
if not isinstance(field, Vector):
raise TypeError("field should be a Vector")
if field == Vector.zero:
return True
coord_sys = list(field.separate())[0]
return divergence(field, coord_sys).simplify() == S(0)
def scalar_potential(field, coord_sys):
"""
Returns the scalar potential function of a field in a given
coordinate system (without the added integration constant).
Parameters
==========
field : Vector
The vector field whose scalar potential function is to be
calculated
coord_sys : CoordSysCartesian
The coordinate system to do the calculation in
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy.vector import scalar_potential, gradient
>>> R = CoordSysCartesian('R')
>>> scalar_potential(R.k, R) == R.z
True
>>> scalar_field = 2*R.x**2*R.y*R.z
>>> grad_field = gradient(scalar_field, R)
>>> scalar_potential(grad_field, R)
2*R.x**2*R.y*R.z
"""
#Check whether field is conservative
if not is_conservative(field):
raise ValueError("Field is not conservative")
if field == Vector.zero:
return S(0)
#Express the field exntirely in coord_sys
#Subsitute coordinate variables also
if not isinstance(coord_sys, CoordSysCartesian):
raise TypeError("coord_sys must be a CoordSysCartesian")
field = express(field, coord_sys, variables=True)
dimensions = coord_sys.base_vectors()
scalars = coord_sys.base_scalars()
#Calculate scalar potential function
temp_function = integrate(field.dot(dimensions[0]), scalars[0])
for i, dim in enumerate(dimensions[1:]):
partial_diff = diff(temp_function, scalars[i + 1])
partial_diff = field.dot(dim) - partial_diff
temp_function += integrate(partial_diff, scalars[i + 1])
return temp_function
def scalar_potential_difference(field, coord_sys, point1, point2):
"""
Returns the scalar potential difference between two points in a
certain coordinate system, wrt a given field.
If a scalar field is provided, its values at the two points are
considered. If a conservative vector field is provided, the values
of its scalar potential function at the two points are used.
Returns (potential at point2) - (potential at point1)
The position vectors of the two Points are calculated wrt the
origin of the coordinate system provided.
Parameters
==========
field : Vector/Expr
The field to calculate wrt
coord_sys : CoordSysCartesian
The coordinate system to do the calculations in
point1 : Point
The initial Point in given coordinate system
position2 : Point
The second Point in the given coordinate system
Examples
========
>>> from sympy.vector import CoordSysCartesian, Point
>>> from sympy.vector import scalar_potential_difference
>>> R = CoordSysCartesian('R')
>>> P = R.origin.locate_new('P', R.x*R.i + R.y*R.j + R.z*R.k)
>>> vectfield = 4*R.x*R.y*R.i + 2*R.x**2*R.j
>>> scalar_potential_difference(vectfield, R, R.origin, P)
2*R.x**2*R.y
>>> Q = R.origin.locate_new('O', 3*R.i + R.j + 2*R.k)
>>> scalar_potential_difference(vectfield, R, P, Q)
-2*R.x**2*R.y + 18
"""
if not isinstance(coord_sys, CoordSysCartesian):
raise TypeError("coord_sys must be a CoordSysCartesian")
if isinstance(field, Vector):
#Get the scalar potential function
scalar_fn = scalar_potential(field, coord_sys)
else:
#Field is a scalar
scalar_fn = field
#Express positions in required coordinate system
origin = coord_sys.origin
position1 = express(point1.position_wrt(origin), coord_sys,
variables=True)
position2 = express(point2.position_wrt(origin), coord_sys,
variables=True)
#Get the two positions as substitution dicts for coordinate variables
subs_dict1 = {}
subs_dict2 = {}
scalars = coord_sys.base_scalars()
for i, x in enumerate(coord_sys.base_vectors()):
subs_dict1[scalars[i]] = x.dot(position1)
subs_dict2[scalars[i]] = x.dot(position2)
return scalar_fn.subs(subs_dict2) - scalar_fn.subs(subs_dict1)
def matrix_to_vector(matrix, system):
"""
Converts a vector in matrix form to a Vector instance.
It is assumed that the elements of the Matrix represent the
measure numbers of the components of the vector along basis
vectors of 'system'.
Parameters
==========
matrix : SymPy Matrix, Dimensions: (3, 1)
The matrix to be converted to a vector
system : CoordSysCartesian
The coordinate system the vector is to be defined in
Examples
========
>>> from sympy import ImmutableMatrix as Matrix
>>> m = Matrix([1, 2, 3])
>>> from sympy.vector import CoordSysCartesian, matrix_to_vector
>>> C = CoordSysCartesian('C')
>>> v = matrix_to_vector(m, C)
>>> v
C.i + 2*C.j + 3*C.k
>>> v.to_matrix(C) == m
True
"""
outvec = Vector.zero
vects = system.base_vectors()
for i, x in enumerate(matrix):
outvec += x * vects[i]
return outvec
def _path(from_object, to_object):
"""
Calculates the 'path' of objects starting from 'from_object'
to 'to_object', along with the index of the first common
ancestor in the tree.
Returns (index, list) tuple.
"""
if from_object._root != to_object._root:
raise ValueError("No connecting path found between " +
str(from_object) + " and " + str(to_object))
other_path = []
obj = to_object
while obj._parent is not None:
other_path.append(obj)
obj = obj._parent
other_path.append(obj)
object_set = set(other_path)
from_path = []
obj = from_object
while obj not in object_set:
from_path.append(obj)
obj = obj._parent
index = len(from_path)
i = other_path.index(obj)
while i >= 0:
from_path.append(other_path[i])
i -= 1
return index, from_path
|
diofeher/django-nfa | refs/heads/master | build/lib/django/contrib/localflavor/nl/nl_provinces.py | 15 | from django.utils.translation import ugettext_lazy as _
PROVINCE_CHOICES = (
('DR', _('Drente')),
('FL', _('Flevoland')),
('FR', _('Friesland')),
('GL', _('Gelderland')),
('GR', _('Groningen')),
('LB', _('Limburg')),
('NB', _('Noord-Brabant')),
('NH', _('Noord-Holland')),
('OV', _('Overijssel')),
('UT', _('Utrecht')),
('ZE', _('Zeeland')),
('ZH', _('Zuid-Holland')),
)
|
kitanata/resume | refs/heads/master | env/lib/python2.7/site-packages/pip/index.py | 343 | """Routines related to PyPI, indexes"""
import sys
import os
import re
import mimetypes
import posixpath
from pip.log import logger
from pip.util import Inf, normalize_name, splitext, is_prerelease
from pip.exceptions import (DistributionNotFound, BestVersionAlreadyInstalled,
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.backwardcompat import urlparse, url2pathname
from pip.download import PipSession, url_to_path, path_to_url
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, pkg_resources
from pip._vendor.requests.exceptions import SSLError
__all__ = ['PackageFinder']
DEFAULT_MIRROR_HOSTNAME = "last.pypi.python.org"
INSECURE_SCHEMES = {
"http": ["https"],
}
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_wheel=True, allow_external=[], allow_unverified=[],
allow_all_external=False, allow_all_prereleases=False,
process_dependency_links=False, session=None):
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
self.cache = PageCache()
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.use_wheel = use_wheel
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
self._have_warned_dependency_links = False
# The Session we'll use to make requests
self.session = session or PipSession()
def add_dependency_links(self, links):
## FIXME: this shouldn't be global list this, it should only
## apply to requirements of the package that specifies the
## dependency_links value
## FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
if not self._have_warned_dependency_links:
logger.deprecated(
"1.6",
"Dependency Links processing has been deprecated with an "
"accelerated time schedule and will be removed in pip 1.6",
)
self._have_warned_dependency_links = True
self.dependency_links.extend(links)
def _sort_locations(self, locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
is_find_link = url in self.find_links
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if is_find_link and os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url and os.path.isdir(path):
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _link_sort_key(self, link_tuple):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
parsed_version, link, _ = link_tuple
if self.use_wheel:
support_num = len(supported_tags)
if link == INSTALLED_VERSION:
pri = 1
elif link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel("%s is not a supported wheel for this platform. It can't be sorted." % wheel.filename)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (parsed_version, pri)
else:
return parsed_version
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the existing ordering as secondary.
See the docstring for `_link_sort_key` for details.
This function is isolated for easier unit testing.
"""
return sorted(applicable_versions, key=self._link_sort_key, reverse=True)
def find_requirement(self, req, upgrade):
def mkurl_pypi_url(url):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(mkurl_pypi_url(self.index_urls[0]), trusted=True)
# This will also cache the page, so it's okay that we get it again later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(Link(self.index_urls[0], trusted=True), url_name, req) or req.url_name
if url_name is not None:
locations = [
mkurl_pypi_url(url)
for url in self.index_urls] + self.find_links
else:
locations = list(self.find_links)
for version in req.absolute_versions:
if url_name is not None and main_index_url is not None:
locations = [
posixpath.join(main_index_url.url, version)] + locations
file_locations, url_locations = self._sort_locations(locations)
_flocations, _ulocations = self._sort_locations(self.dependency_links)
file_locations.extend(_flocations)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
locations = [Link(url, trusted=True) for url in url_locations]
# We explicitly do not trust links that came from dependency_links
locations.extend([Link(url) for url in _ulocations])
logger.debug('URLs to search for versions for %s:' % req)
for location in locations:
logger.debug('* %s' % location)
# Determine if this url used a secure transport mechanism
parsed = urlparse.urlparse(str(location))
if parsed.scheme in INSECURE_SCHEMES:
secure_schemes = INSECURE_SCHEMES[parsed.scheme]
if len(secure_schemes) == 1:
ctx = (location, parsed.scheme, secure_schemes[0],
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using %s if %s has it available" %
ctx)
elif len(secure_schemes) > 1:
ctx = (location, parsed.scheme, ", ".join(secure_schemes),
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using one of %s if %s has any of "
"them available" % ctx)
else:
ctx = (location, parsed.scheme)
logger.warn("%s uses an insecure transport scheme (%s)." %
ctx)
found_versions = []
found_versions.extend(
self._package_versions(
# We trust every directly linked archive in find_links
[Link(url, '-f', trusted=True) for url in self.find_links], req.name.lower()))
page_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s' % page.url)
logger.indent += 2
try:
page_versions.extend(self._package_versions(page.links, req.name.lower()))
finally:
logger.indent -= 2
dependency_versions = list(self._package_versions(
[Link(url) for url in self.dependency_links], req.name.lower()))
if dependency_versions:
logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
file_versions = list(self._package_versions(
[Link(url) for url in file_locations], req.name.lower()))
if not found_versions and not page_versions and not dependency_versions and not file_versions:
logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external %s to allow)." % req.name)
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions at all found for %s' % req)
installed_version = []
if req.satisfied_by is not None:
installed_version = [(req.satisfied_by.parsed_version, INSTALLED_VERSION, req.satisfied_by.version)]
if file_versions:
file_versions.sort(reverse=True)
logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions]))
#this is an intentional priority ordering
all_versions = installed_version + file_versions + found_versions + page_versions + dependency_versions
applicable_versions = []
for (parsed_version, link, version) in all_versions:
if version not in req.req:
logger.info("Ignoring link %s, version %s doesn't match %s"
% (link, version, ','.join([''.join(s) for s in req.req.specs])))
continue
elif is_prerelease(version) and not (self.allow_all_prereleases or req.prereleases):
# If this version isn't the already installed one, then
# ignore it if it's a pre-release.
if link is not INSTALLED_VERSION:
logger.info("Ignoring link %s, version %s is a pre-release (use --pre to allow)." % (link, version))
continue
applicable_versions.append((parsed_version, link, version))
applicable_versions = self._sort_versions(applicable_versions)
existing_applicable = bool([link for parsed_version, link, version in applicable_versions if link is INSTALLED_VERSION])
if not upgrade and existing_applicable:
if applicable_versions[0][1] is INSTALLED_VERSION:
logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
% req.satisfied_by.version)
else:
logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
% (req.satisfied_by.version, applicable_versions[0][2]))
return None
if not applicable_versions:
logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
% (req, ', '.join([version for parsed_version, link, version in all_versions])))
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external to allow).")
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions matching the version for %s' % req)
if applicable_versions[0][1] is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
% (req.satisfied_by.version, ', '.join([version for parsed_version, link, version in applicable_versions[1:]]) or 'none'))
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.info('Using version %s (newest of versions: %s)' %
(applicable_versions[0][2], ', '.join([version for parsed_version, link, version in applicable_versions])))
selected_version = applicable_versions[0][1]
if (selected_version.internal is not None
and not selected_version.internal):
logger.warn("%s an externally hosted file and may be "
"unreliable" % req.name)
if (selected_version.verifiable is not None
and not selected_version.verifiable):
logger.warn("%s is potentially insecure and "
"unverifiable." % req.name)
if selected_version._deprecated_regex:
logger.deprecated(
"1.7",
"%s discovered using a deprecated method of parsing, "
"in the future it will no longer be discovered" % req.name
)
return selected_version
def _find_url_name(self, index_url, url_name, req):
"""Finds the true URL name of a package, when the given name isn't quite correct.
This is usually used to implement case-insensitivity."""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
## FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.fatal('Cannot fetch index base URL %s' % index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.notify('Real name of requirement %s is %s' % (url_name, base))
return base
return None
def _get_pages(self, locations, req):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
yield page
for link in page.rel_links():
normalized = normalize_name(req.name).lower()
if (not normalized in self.allow_external
and not self.allow_all_external):
self.need_warn_external = True
logger.debug("Not searching %s for files because external "
"urls are disallowed." % link)
continue
if (link.trusted is not None
and not link.trusted
and not normalized in self.allow_unverified):
logger.debug("Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files." % link)
self.need_warn_unverified = True
continue
all_locations.append(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates"
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
for v in self._link_package_versions(link, search_name):
yield v
def _known_extensions(self):
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
if self.use_wheel:
return extensions + (wheel_ext,)
return extensions
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in self._known_extensions():
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one' % (link))
self.logged_links.add(link)
return []
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
logger.debug('Skipping %s because the wheel filename is invalid' % link)
return []
if wheel.name.lower() != search_name.lower():
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if not wheel.supported():
logger.debug('Skipping %s because it is not compatible with this Python' % link)
return []
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for binary
# wheels on linux that deals with the inherent problems of
# binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if ((
not platform.startswith('win')
and not platform.startswith('macosx')
)
and comes_from is not None
and urlparse.urlparse(comes_from.url).netloc.endswith(
"pypi.python.org")):
if not wheel.supported(tags=supported_tags_noarch):
logger.debug(
"Skipping %s because it is a pypi-hosted binary "
"Wheel on an unsupported platform" % link
)
return []
version = wheel.version
if not version:
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if (link.internal is not None
and not link.internal
and not normalize_name(search_name).lower() in self.allow_external
and not self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
logger.debug("Skipping %s because it is externally hosted." % link)
self.need_warn_external = True
return []
if (link.verifiable is not None
and not link.verifiable
and not (normalize_name(search_name).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify it's integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
logger.debug("Skipping %s because it is an insecure and "
"unverifiable file." % link)
self.need_warn_unverified = True
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)]
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req,
cache=self.cache,
session=self.session,
)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0)+level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class HTMLPage(object):
"""Represents one page, along with its URL"""
## FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
_href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S)
def __init__(self, content, url, headers=None, trusted=None):
self.content = content
self.parsed = html5lib.parse(self.content, namespaceHTMLElements=False)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
if session is None:
session = PipSession()
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals())
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(url,
session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s' % url)
resp = session.get(url, headers={"Accept": "text/html"})
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
# redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
# Unless we issue a HEAD request on every url we cannot know
# ahead of time for sure if something is HTML or not. However we
# can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug('Skipping page %s because of Content-Type: %s' %
(link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(req, link, exc, url, cache=cache, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(
req, link, "connection error: %s" % exc, url,
cache=cache,
)
except requests.Timeout:
cls._handle_fail(req, link, "timed out", url, cache=cache)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(req, link, reason, url,
cache=cache,
level=2,
meth=logger.notify,
)
else:
if cache is not None:
cache.add_page([url, resp.url], inst)
return inst
@staticmethod
def _handle_fail(req, link, reason, url, cache=None, level=1, meth=None):
if meth is None:
meth = logger.info
meth("Could not fetch URL %s: %s", link, reason)
meth("Will skip URL %s when looking for download links for %s" %
(link.url, req))
if cache is not None:
cache.add_page_failure(url, level)
@staticmethod
def _get_content_type(url, session=None):
"""Get the Content-Type of the given url, using a HEAD request"""
if session is None:
session = PipSession()
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if not scheme in ('http', 'https', 'ftp', 'ftps'):
## FIXME: some warning or something?
## assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@property
def api_version(self):
if not hasattr(self, "_api_version"):
_api_version = None
metas = [x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"]
if metas:
try:
_api_version = int(metas[0].get("value", None))
except (TypeError, ValueError):
_api_version = None
self._api_version = _api_version
return self._api_version
@property
def base_url(self):
if not hasattr(self, "_base_url"):
base = self.parsed.find(".//base")
if base is not None and base.get("href"):
self._base_url = base.get("href")
else:
self._base_url = self.url
return self._base_url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(anchor.get("rel")
and "internal" in anchor.get("rel").split())
yield Link(url, self, internal=internal)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
yield Link(url, self, trusted=False)
def scraped_rel_links(self):
# Can we get rid of this horrible horrible method?
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = href_match.group(1) or href_match.group(2) or href_match.group(3)
if not url:
continue
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self, trusted=False, _deprecated_regex=True)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None,
_deprecated_regex=False):
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
self._deprecated_regex = _deprecated_regex
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
return self.url == other.url
def __ne__(self, other):
return self.url != other.url
def __lt__(self, other):
return self.url < other.url
def __le__(self, other):
return self.url <= other.url
def __gt__(self, other):
return self.url > other.url
def __ge__(self, other):
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)')
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info)
def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name
|
sbbic/core | refs/heads/master | wizards/com/sun/star/wizards/web/data/CGPublish.py | 9 | #
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
from ...common.ConfigGroup import ConfigGroup
'''
A Class which describes the publishing arguments
in a session.
Each session can contain different publishers, which are configured
through such a CGPublish object.
'''
class CGPublish(ConfigGroup):
def __init__(self):
self.cp_Publish = bool()
self.cp_URL = str()
self.cp_Username = str()
self.password = str()
self.overwriteApproved = bool()
self.url = str()
def setURL(self, path):
try:
self.cp_URL = self.root.getFileAccess().getURL(path)
self.overwriteApproved = False
except Exception as ex:
ex.printStackTrace()
def getURL(self):
try:
return self.root.getFileAccess().getPath(self.cp_URL, None)
except Exception as e:
e.printStackTrace()
return ""
|
inoue0124/TensorFlow_Keras | refs/heads/master | chapter4/leaky_relu_tensorflow.py | 1 | import numpy as np
import tensorflow as tf
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
np.random.seed(0)
tf.set_random_seed(123)
'''
Generate data
'''
mnist = datasets.fetch_mldata('MNIST original', data_home='.')
n = len(mnist.data)
N = 10000 # use a part of the mnist
train_size = 0.8
indices = np.random.permutation(range(n))[:N] # choose random indices up to N
print (indices)
X = mnist.data[indices]
y = mnist.target[indices]
Y = np.eye(10)[y.astype(int)] # convert to 1-of-K
X_train, X_test, Y_train, Y_test =\
train_test_split(X, Y, train_size=train_size)
'''
Set the model
'''
n_in = len(X[0]) # 784
n_hidden = 200
n_out = len(Y[0]) # 10
def lrelu(x, alpha=0.01):
return tf.maximum(alpha*x,x)
x = tf.placeholder(tf.float32, shape=[None, n_in])
t = tf.placeholder(tf.float32, shape=[None, n_out])
W0 = tf.Variable(tf.truncated_normal([n_in, n_hidden], stddev=0.01))
b0 = tf.Variable(tf.zeros([n_hidden]))
h0 = lrelu(tf.matmul(x,W0) + b0)
W1 = tf.Variable(tf.truncated_normal([n_hidden, n_hidden], stddev=0.01))
b1 = tf.Variable(tf.zeros([n_hidden]))
h1 = lrelu(tf.matmul(h0,W1) + b1)
W2 = tf.Variable(tf.truncated_normal([n_hidden, n_hidden], stddev=0.01))
b2 = tf.Variable(tf.zeros([n_hidden]))
h2 = lrelu(tf.matmul(h1,W2) + b2)
W3 = tf.Variable(tf.truncated_normal([n_hidden, n_hidden], stddev=0.01))
b3 = tf.Variable(tf.zeros([n_hidden]))
h3 = lrelu(tf.matmul(h2,W3) + b3)
W4 = tf.Variable(tf.truncated_normal([n_hidden, n_out], stddev=0.01))
b4 = tf.Variable(tf.zeros([n_out]))
y = tf.nn.softmax(tf.matmul(h3,W4) + b4)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(t*tf.log(y),reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(t,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
'''
train setting
'''
epochs = 50
batch_size = 200
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
n_batches = (int)(N * train_size) // batch_size
for epoch in range(epochs):
X_, Y_ = shuffle(X_train, Y_train)
for i in range(n_batches):
start = i * batch_size
end = start + batch_size
sess.run(train_step, feed_dict={x:X_[start:end],t:Y_[start:end]})
loss = cross_entropy.eval(session=sess,feed_dict={x:X_,t:Y_})
acc = accuracy.eval(session=sess, feed_dict={x:X_,t:Y_})
print('epoch:', epoch, ' loss:', loss, ' accuracy:', acc)
'''
evaluation of the model
'''
accuracy_rate = accuracy.eval(session=sess, feed_dict={x:X_test,t:Y_test})
print('accuracy: ', accuracy_rate)
|
SelvorWhim/competitive | refs/heads/master | Codewars/Kebabize.py | 1 | def capital_to_kebab(c):
return "-" + c.lower() if c.isupper() else c if c.isalpha() else ""
def kebabize(s):
return "".join(capital_to_kebab(c) for c in s).lstrip("-")
|
Jusedawg/SickRage | refs/heads/develop | lib/sqlalchemy/dialects/mysql/zxjdbc.py | 78 | # mysql/zxjdbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+zxjdbc
:name: zxjdbc for Jython
:dbapi: zxjdbc
:connectstring: mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/<database>
:driverurl: http://dev.mysql.com/downloads/connector/j/
Character Sets
--------------
SQLAlchemy zxjdbc dialects pass unicode straight through to the
zxjdbc/JDBC layer. To allow multiple character sets to be sent from the
MySQL Connector/J JDBC driver, by default SQLAlchemy sets its
``characterEncoding`` connection property to ``UTF-8``. It may be
overriden via a ``create_engine`` URL parameter.
"""
import re
from ... import types as sqltypes, util
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import BIT, MySQLDialect, MySQLExecutionContext
class _ZxJDBCBit(BIT):
def result_processor(self, dialect, coltype):
"""Converts boolean or byte arrays from MySQL Connector/J to longs."""
def process(value):
if value is None:
return value
if isinstance(value, bool):
return int(value)
v = 0
for i in value:
v = v << 8 | (i & 0xff)
value = v
return value
return process
class MySQLExecutionContext_zxjdbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
jdbc_db_name = 'mysql'
jdbc_driver_name = 'com.mysql.jdbc.Driver'
execution_ctx_cls = MySQLExecutionContext_zxjdbc
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
sqltypes.Time: sqltypes.Time,
BIT: _ZxJDBCBit
}
)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs))
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. Assuming latin1.")
return 'latin1'
def _driver_kwargs(self):
"""return kw arg dict to be sent to connect()."""
return dict(characterEncoding='UTF-8', yearIsDateType='false')
def _extract_error_code(self, exception):
# e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist
# [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' ()
m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.dbversion):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
dialect = MySQLDialect_zxjdbc
|
laurent-george/bokeh | refs/heads/master | bokeh/util/string.py | 43 | """ Functions useful for string manipulations or encoding.
"""
from __future__ import absolute_import
def encode_utf8(u):
""" Encode a UTF-8 string to a sequence of bytes.
Args:
u (str) : the string to encode
Returns:
bytes
"""
import sys
if sys.version_info[0] == 2:
u = u.encode('utf-8')
return u
def decode_utf8(u):
""" Decode a sequence of bytes to a UTF-8 string
Args:
u (str) : the bytes to decode
Returns:
UTF-8 string
"""
import sys
if sys.version_info[0] == 2:
u = u.decode('utf-8')
return u
def nice_join(seq, sep=", "):
""" Join together sequences of strings into English-friendly phrases using
the conjunction ``or`` when appropriate.
Args:
seq (seq[str]) : a sequence of strings to nicely join
sep (str, optional) : a sequence delimiter to use (default: ", ")
Returns:
a joined string
Examples:
>>> nice_join(["a", "b", "c"])
'a, b or c'
"""
seq = [str(x) for x in seq]
if len(seq) <= 1:
return sep.join(seq)
else:
return "%s or %s" % (sep.join(seq[:-1]), seq[-1]) |
apocalypsebg/odoo | refs/heads/8.0 | addons/base_report_designer/plugin/openerp_report_designer/bin/script/AddAttachment.py | 384 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import os
import uno
import unohelper
import xmlrpclib
import base64
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.tools import *
from LoginTest import *
from lib.rpc import *
database="test"
uid = 3
class AddAttachment(unohelper.Base, XJobExecutor ):
Kind = {
'PDF' : 'pdf',
'OpenOffice': 'sxw',
}
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.aSearchResult = []
desktop=getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo=oDoc2.getDocumentInfo()
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
if docinfo.getUserFieldValue(2) <> "" and docinfo.getUserFieldValue(3) <> "":
self.win = DBModalDialog(60, 50, 180, 70, "Add Attachment to Server")
self.win.addFixedText("lblResourceType", 2 , 5, 100, 10, "Select Appropriate Resource Type:")
self.win.addComboListBox("lstResourceType", -2, 25, 176, 15,True)
self.win.addButton('btnOkWithoutInformation', -2 , -5, 25 , 15,'OK' ,actionListenerProc = self.btnOkWithoutInformation_clicked )
else:
self.win = DBModalDialog(60, 50, 180, 190, "Add Attachment to Server")
self.win.addFixedText("lblModuleName",2 , 9, 42, 20, "Select Module:")
self.win.addComboListBox("lstmodel", -2, 5, 134, 15,True)
self.lstModel = self.win.getControl( "lstmodel" )
self.dModel = {}
# Open a new connexion to the server
ids = self.sock.execute(database, uid, self.password, 'ir.module.module', 'search', [('name','=','base_report_model'),('state', '=', 'installed')])
if not len(ids):
# If the module 'base_report_model' is not installed, use the default model
self.dModel = {
"Partner":'res.partner',
}
else:
ids =self.sock.execute(database, uid, self.password, 'base.report.model' , 'search', [])
res = self.sock.execute(database, uid, self.password, 'base.report.model' , 'read', ids, ['name','model_id'])
models = self.sock.execute(database, uid, self.password, 'ir.model' , 'read', map(lambda x:x['model_id'][0], res), ['model'])
models = dict(map(lambda x:(x['id'],x['model']), models))
self.dModel = dict(map(lambda x: (x['name'],models[x['model_id'][0]]), res))
for item in self.dModel.keys():
self.lstModel.addItem(item, self.lstModel.getItemCount())
self.win.addFixedText("lblSearchName",2 , 25, 60, 10, "Enter Search String:")
self.win.addEdit("txtSearchName", 2, 35, 149, 15,)
self.win.addButton('btnSearch', -2 , 35, 25 , 15,'Search' ,actionListenerProc = self.btnSearch_clicked )
self.win.addFixedText("lblSearchRecord", 2 , 55, 60, 10, "Search Result:")
self.win.addComboListBox("lstResource", -2, 65, 176, 70, False )
self.lstResource = self.win.getControl( "lstResource" )
self.win.addFixedText("lblResourceType", 2 , 137, 100, 20, "Select Appropriate Resource Type:")
self.win.addComboListBox("lstResourceType", -2, 147, 176, 15,True )
self.win.addButton('btnOkWithInformation', -2 , -5, 25 , 15,'OK' ,actionListenerProc = self.btnOkWithInformation_clicked )
self.lstResourceType = self.win.getControl( "lstResourceType" )
for kind in self.Kind.keys():
self.lstResourceType.addItem( kind, self.lstResourceType.getItemCount() )
self.win.addButton('btnCancel', -2 - 27 , -5 , 30 , 15, 'Cancel' ,actionListenerProc = self.btnCancel_clicked )
self.win.doModalDialog("lstResourceType", self.Kind.keys()[0])
def btnSearch_clicked(self, oActionEvent):
modelSelectedItem = self.win.getListBoxSelectedItem("lstmodel")
if modelSelectedItem == "":
return
desktop=getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo=oDoc2.getDocumentInfo()
self.aSearchResult =self.sock.execute( database, uid, self.password, self.dModel[modelSelectedItem], 'name_search', self.win.getEditText("txtSearchName"))
self.win.removeListBoxItems("lstResource", 0, self.win.getListBoxItemCount("lstResource"))
if self.aSearchResult == []:
ErrorDialog("No search result found.", "", "Search Error.")
return
for result in self.aSearchResult:
self.lstResource.addItem(result[1],result[0])
def _send_attachment(self, name, data, res_model, res_id):
desktop = getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo = oDoc2.getDocumentInfo()
params = {
'name': name,
'datas': base64.encodestring( data ),
'datas_fname': name,
'res_model' : res_model,
'res_id' : int(res_id),
}
return self.sock.execute( database, uid, self.password, 'ir.attachment', 'create', params )
def send_attachment(self, model, resource_id):
desktop = getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo = oDoc2.getDocumentInfo()
if oDoc2.getURL() == "":
ErrorDialog("You should save your file.", "", "Saving Error.")
return None
url = oDoc2.getURL()
if self.Kind[self.win.getListBoxSelectedItem("lstResourceType")] == "pdf":
url = self.doc2pdf(url[7:])
if url == None:
ErrorDialog( "Problem in creating PDF.", "", "PDF Error.")
return None
url = url[7:]
data = read_data_from_file( get_absolute_file_path( url ) )
return self._send_attachment( os.path.basename( url ), data, model, resource_id )
def btnOkWithoutInformation_clicked(self, oActionEvent):
desktop = getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo = oDoc2.getDocumentInfo()
if self.win.getListBoxSelectedItem("lstResourceType") == "":
ErrorDialog("You have to select a resource type.", "", "Selection Error." )
return
res = self.send_attachment( docinfo.getUserFieldValue(3), docinfo.getUserFieldValue(2) )
self.win.endExecute()
def btnOkWithInformation_clicked(self, oActionEvent):
if self.win.getListBoxSelectedItem("lstResourceType") == "":
ErrorDialog( "You have to select a resource type.", "", "Selection Error." )
return
if self.win.getListBoxSelectedItem("lstResource") == "" or self.win.getListBoxSelectedItem("lstmodel") == "":
ErrorDialog("You have to select Model and Resource.", "", "Selection Error.")
return
resourceid = None
for s in self.aSearchResult:
if s[1] == self.win.getListBoxSelectedItem("lstResource"):
resourceid = s[0]
break
if resourceid == None:
ErrorDialog("No resource is selected.", "", "Resource Error." )
return
res = self.send_attachment( self.dModel[self.win.getListBoxSelectedItem('lstmodel')], resourceid )
self.win.endExecute()
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
def doc2pdf(self, strFile):
oDoc = None
strFilterSubName = ''
strUrl = convertToURL( strFile )
desktop = getDesktop()
oDoc = desktop.loadComponentFromURL( strUrl, "_blank", 0, Array(self._MakePropertyValue("Hidden",True)))
if oDoc:
strFilterSubName = ""
# select appropriate filter
if oDoc.supportsService("com.sun.star.presentation.PresentationDocument"):
strFilterSubName = "impress_pdf_Export"
elif oDoc.supportsService("com.sun.star.sheet.SpreadsheetDocument"):
strFilterSubName = "calc_pdf_Export"
elif oDoc.supportsService("com.sun.star.text.WebDocument"):
strFilterSubName = "writer_web_pdf_Export"
elif oDoc.supportsService("com.sun.star.text.GlobalDocument"):
strFilterSubName = "writer_globaldocument_pdf_Export"
elif oDoc.supportsService("com.sun.star.text.TextDocument"):
strFilterSubName = "writer_pdf_Export"
elif oDoc.supportsService("com.sun.star.drawing.DrawingDocument"):
strFilterSubName = "draw_pdf_Export"
elif oDoc.supportsService("com.sun.star.formula.FormulaProperties"):
strFilterSubName = "math_pdf_Export"
elif oDoc.supportsService("com.sun.star.chart.ChartDocument"):
strFilterSubName = "chart_pdf_Export"
else:
pass
filename = len(strFilterSubName) > 0 and convertToURL( os.path.splitext( strFile )[0] + ".pdf" ) or None
if len(strFilterSubName) > 0:
oDoc.storeToURL( filename, Array(self._MakePropertyValue("FilterName", strFilterSubName ),self._MakePropertyValue("CompressMode", "1" )))
oDoc.close(True)
# Can be None if len(strFilterSubName) <= 0
return filename
def _MakePropertyValue(self, cName="", uValue=u""):
oPropertyValue = createUnoStruct( "com.sun.star.beans.PropertyValue" )
if cName:
oPropertyValue.Name = cName
if uValue:
oPropertyValue.Value = uValue
return oPropertyValue
if __name__<>"package" and __name__=="__main__":
AddAttachment(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( AddAttachment, "org.openoffice.openerp.report.addattachment", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
vmax-feihu/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/admin_scripts/management/commands/base_command.py | 131 | from optparse import make_option
from django.core.management.base import BaseCommand
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--option_a','-a', action='store', dest='option_a', default='1'),
make_option('--option_b','-b', action='store', dest='option_b', default='2'),
make_option('--option_c','-c', action='store', dest='option_c', default='3'),
)
help = 'Test basic commands'
requires_model_validation = False
args = '[labels ...]'
def handle(self, *labels, **options):
print('EXECUTE:BaseCommand labels=%s, options=%s' % (labels, sorted(options.items())))
|
PHSCRC/phsled | refs/heads/master | scrollText.py | 1 | #!/usr/bin/env python
import time, sys
from Adafruit_LEDBackpack import LEDBackpack
from LEDLetterValues import *
from timeit import default_timer
grids = [LEDBackpack(address=i) for i in range(0x70, 0x74)]
wait_time = float(sys.argv[2] if len(sys.argv) > 2 else raw_input("Wait time: "))
text = sys.argv[1] if len(sys.argv) > 1 else raw_input("What should I scroll: ")
printcon = textTo2D(text)
print "Press CTRL+C to exit"
def main():
scrolled = 0
while True:
start = default_timer()
for y, v in enumerate(printcon):
buffers = [0x00, 0x00, 0x00, 0x00]
for x, i in enumerate(v):
if i:
a = x - scrolled
if a >= 0 and a < len(grids) * 16:
buffers[a // 16] = buffers[a // 16] | 1 << (a % 16)
for i, grid in enumerate(grids):
grid.setBufferRow(y, buffers[i], update=False)
for i in grids:
i.writeDisplay()
final = default_timer()-start
if final <= wait_time:
time.sleep(wait_time - final)
scrolled += 1
if scrolled >= len(printcon[0]) / 2 + 6:
scrolled = 0
if "--once" in sys.argv:
exit(0)
if __name__ == "__main__":
try:
main()
except BaseException:
for i in grids:
for y in range(8):
i.setBufferRow(y, 0x00, update=False)
i.writeDisplay()
|
sunlightlabs/openstates | refs/heads/master | scrapers/hi/events.py | 2 | from utils import LXMLMixin
import datetime as dt
from openstates.scrape import Scraper, Event
from .utils import get_short_codes
from requests import HTTPError
import pytz
URL = "http://www.capitol.hawaii.gov/upcominghearings.aspx"
TIMEZONE = pytz.timezone("Pacific/Honolulu")
class HIEventScraper(Scraper, LXMLMixin):
seen_hearings = []
chambers = {"lower": "House", "upper": "Senate", "joint": "Joint"}
def get_related_bills(self, href):
ret = []
try:
page = self.lxmlize(href)
except HTTPError:
return ret
bills = page.xpath(".//a[contains(@href, 'Bills')]")
for bill in bills:
try:
row = next(bill.iterancestors(tag="tr"))
except StopIteration:
continue
tds = row.xpath("./td")
descr = tds[1].text_content()
for i in ["\r\n", "\xa0"]:
descr = descr.replace(i, "")
ret.append(
{
"bill_id": bill.text_content(),
"type": "consideration",
"descr": descr,
}
)
return ret
def scrape(self):
get_short_codes(self)
page = self.lxmlize(URL)
table = page.xpath("//table[@id='ctl00_ContentPlaceHolderCol1_GridView1']")[0]
for event in table.xpath(".//tr")[1:]:
tds = event.xpath("./td")
committee = tds[0].text_content().strip()
if self.short_ids.get(committee):
descr = "{} {}".format(
self.chambers[self.short_ids[committee]["chamber"]],
self.short_ids[committee]["name"],
)
else:
descr = [x.text_content() for x in tds[1].xpath(".//span")]
if len(descr) != 1:
raise Exception
descr = descr[0].replace(".", "").strip()
when = tds[2].text_content().strip()
where = tds[3].text_content().strip()
notice = tds[4].xpath(".//a")[0]
notice_href = notice.attrib["href"]
notice_name = notice.text
# the listing page shows the same hearing in multiple rows.
# combine these -- get_related_bills() will take care of adding the bills
# and descriptions
if notice_href in self.seen_hearings:
continue
else:
self.seen_hearings.append(notice_href)
when = dt.datetime.strptime(when, "%m/%d/%Y %I:%M %p")
when = TIMEZONE.localize(when)
event = Event(
name=descr,
start_date=when,
classification="committee-meeting",
description=descr,
location_name=where,
)
if "/" in committee:
committees = committee.split("/")
else:
committees = [committee]
for committee in committees:
if "INFO" not in committee and committee in self.short_ids:
committee = "{} {}".format(
self.chambers[self.short_ids[committee]["chamber"]],
self.short_ids[committee]["name"],
)
event.add_committee(committee, note="host")
event.add_source(URL)
event.add_document(notice_name, notice_href, media_type="text/html")
for bill in self.get_related_bills(notice_href):
a = event.add_agenda_item(description=bill["descr"].strip())
a.add_bill(bill["bill_id"], note=bill["type"])
yield event
|
servo/servo | refs/heads/master | tests/wpt/web-platform-tests/resource-timing/resources/gzip_xml.py | 7 | import gzip as gzip_module
import os
from six import BytesIO
from wptserve.utils import isomorphic_decode
def main(request, response):
dir_path = os.path.dirname(os.path.realpath(isomorphic_decode(__file__)))
file_path = os.path.join(dir_path, u'resource_timing_test0.xml')
f = open(file_path, u'rb')
output = f.read()
out = BytesIO()
with gzip_module.GzipFile(fileobj=out, mode="w") as f:
f.write(output)
output = out.getvalue()
headers = [(b"Content-type", b"text/plain"),
(b"Content-Encoding", b"gzip"),
(b"Content-Length", len(output))]
return headers, output
|
baylee-d/cos.io | refs/heads/develop | common/blocks/collapsebox.py | 1 | from wagtail.wagtailcore.blocks import RichTextBlock, CharBlock, ListBlock, \
StructBlock
class CollapseEntryBlock(StructBlock):
title = CharBlock()
content = RichTextBlock()
class Meta:
form_template = 'common/block_forms/collapse_entry.html'
template = 'common/blocks/collapse_entry.html'
class CollapseBoxListBlock(ListBlock):
def __init__(self, **kwargs):
return super(CollapseBoxListBlock, self).__init__(
CollapseEntryBlock(), **kwargs)
class CollapseBoxBlock(StructBlock):
title = CharBlock()
list = CollapseBoxListBlock()
class Meta:
template = 'common/blocks/collapse_box_block.html'
icon = 'list-ul'
|
bealdav/OCB | refs/heads/patch-1 | addons/mrp/wizard/stock_move.py | 78 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_move_consume(osv.osv_memory):
_name = "stock.move.consume"
_description = "Consume Products"
_columns = {
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot'),
}
#TOFIX: product_uom should not have different category of default UOM of product. Qty should be convert into UOM of original move line before going in consume and scrap
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(stock_move_consume, self).default_get(cr, uid, fields, context=context)
move = self.pool.get('stock.move').browse(cr, uid, context['active_id'], context=context)
if 'product_id' in fields:
res.update({'product_id': move.product_id.id})
if 'product_uom' in fields:
res.update({'product_uom': move.product_uom.id})
if 'product_qty' in fields:
res.update({'product_qty': move.product_qty})
if 'location_id' in fields:
res.update({'location_id': move.location_id.id})
return res
def do_move_consume(self, cr, uid, ids, context=None):
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
move_ids = context['active_ids']
for data in self.browse(cr, uid, ids, context=context):
move_obj.action_consume(cr, uid, move_ids,
data.product_qty, data.location_id.id, restrict_lot_id=data.restrict_lot_id.id,
context=context)
return {'type': 'ir.actions.act_window_close'}
|
petrus-v/odoo | refs/heads/8.0 | addons/sale/report/invoice_report.py | 336 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class account_invoice_report(osv.osv):
_inherit = 'account.invoice.report'
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_depends = {
'account.invoice': ['section_id'],
}
def _select(self):
return super(account_invoice_report, self)._select() + ", sub.section_id as section_id"
def _sub_select(self):
return super(account_invoice_report, self)._sub_select() + ", ai.section_id as section_id"
def _group_by(self):
return super(account_invoice_report, self)._group_by() + ", ai.section_id"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tos-kamiya/pyrem_torq | refs/heads/master | src/test/test_pyrem_torq_treeseq.py | 1 | import sys, os
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from pyrem_torq.treeseq import *
class TestTorqTreeseq(unittest.TestCase):
def testRemoveStrattrs(self):
seq = [ 'a', 1, 'b', 2, 'c' ]
self.assertEquals(seq_remove_strattrs(seq), [ 'a', 'b', 'c' ])
seq = [ 'a', [ 'B', 1, 'b' ], 2, 'c' ]
self.assertEquals(seq_remove_strattrs(seq), [ 'a', [ 'B', 'b' ], 'c' ])
def testEncloseStrattrs(self):
seq = [ 'a', 1, 'b', 2, 'c' ]
self.assertEquals(seq_enclose_strattrs(seq), [ 'a', ( 1, 'b' ), ( 2, 'c' ) ])
seq = [ 'a', [ 'B', 1, 'b' ], 2, 'c' ]
self.assertEquals(seq_enclose_strattrs(seq), [ 'a', [ 'B', ( 1, 'b' ) ], ( 2, 'c' ) ])
def testEncloseStrattrsToIllegalData(self):
seq = [ 'a', 1, 'b', 'c' ]
with self.assertRaises(IndexError):
seq_enclose_strattrs(seq)
seq = [ 'a', [ 'B', 1, 'b' ], 'c' ]
with self.assertRaises(IndexError):
seq_enclose_strattrs(seq)
def testDiscloseStrattrs(self):
seq = [ 'a', ( 1, 'b' ), ( 2, 'c' ) ]
self.assertEquals(seq_disclose_strattrs(seq), [ 'a', 1, 'b', 2, 'c' ])
seq = [ 'a', [ 'B', ( 1, 'b' ) ], ( 2, 'c' ) ]
self.assertEquals(seq_disclose_strattrs(seq), [ 'a', [ 'B', 1, 'b' ], 2, 'c' ])
def testDiscloseStrattrsToIllegalData(self):
seq = [ 'a', ( 1, 'b' ), 'c' ]
with self.assertRaises(TypeError):
seq_disclose_strattrs(seq)
seq = [ 'a', [ 'B', ( 1, 'b' ) ], 'c' ]
with self.assertRaises(TypeError):
seq_disclose_strattrs(seq)
def testSplitAndMergeStrattrs(self):
seq = [ 'a', 1, 'b', 2, 'c' ]
atrSeq, strSeq = seq_split_strattrs(seq)
self.assertEquals(strSeq, [ 'a', 'b', 'c' ])
self.assertEquals(atrSeq, [ 'a', 1, 2 ])
mergedSeq = seq_merge_strattrs(atrSeq, strSeq)
self.assertEquals(mergedSeq, seq)
seq = [ 'a', [ 'B', 1, 'b' ], 2, 'c' ]
atrSeq, strSeq = seq_split_strattrs(seq)
self.assertEquals(strSeq, [ 'a', [ 'B', 'b' ], 'c' ])
self.assertEquals(atrSeq, [ 'a', [ 'B', 1 ], 2 ])
mergedSeq = seq_merge_strattrs(atrSeq, strSeq)
self.assertEquals(mergedSeq, seq)
#def TestSuite(TestTorqTreeseq):
# return unittest.makeSuite(TestTorqTreeseq)
if __name__ == '__main__':
unittest.main()
|
noahwilliamsson/micropython | refs/heads/wpa-enterprise | tests/basics/fun2.py | 119 | # calling a function from a function
def f(x):
print(x + 1)
def g(x):
f(2 * x)
f(4 * x)
g(3)
|
DARKPOP/external_chromium_org | refs/heads/dark-5.1 | chrome/common/extensions/docs/server2/fail_on_access_file_system.py | 121 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from file_system import FileSystem
class FailOnAccessFileSystem(FileSystem):
# All this needs to do is implement GetIdentity. All other methods will
# automatically fail with NotImplementedErrors.
def GetIdentity(self):
return '42'
|
forbidden-ali/LaZagne | refs/heads/master | Linux/src/softwares/chats/jitsi.py | 11 | from base64 import b64decode
import hashlib, os, re
import binascii, array
from Crypto.Cipher import AES
from config.header import Header
from config.constant import *
from config.write_output import print_debug, print_output
from config.moduleInfo import ModuleInfo
# From https://github.com/mitsuhiko/python-pbkdf2
from pbkdf2 import pbkdf2_bin
class Jitsi(ModuleInfo):
def __init__(self):
options = {'command': '-j', 'action': 'store_true', 'dest': 'jitsi', 'help': 'jitsi'}
suboptions = [{'command': '-ma', 'action': 'store', 'dest': 'master_pwd', 'help': 'enter the master password manually', 'title': 'Advanced jitsi option'}]
ModuleInfo.__init__(self, 'jitsi', 'chats', options, suboptions)
self.keylen = 32
self.iterations = 1024
self.padding = '\f'
self.account_id = ''
self.master_password_used = False
self.masterpass = ' '
def get_salt(self):
salt_array = [12, 10, 15, 14, 11, 14, 14, 15]
salt = array.array('b', salt_array)
hexsalt = binascii.hexlify(salt)
return binascii.unhexlify(hexsalt)
def get_path(self):
directory = '~/.jitsi'
directory = os.path.expanduser(directory) + os.sep + 'sip-communicator.properties'
if os.path.exists(directory):
return directory
else:
return 'JITSI_NOT_EXISTS'
def get_info(self, file_properties):
values = {}
f = open(file_properties,'r')
line = f.readline()
cpt = 0
pwdFound = []
while line:
if 'ACCOUNT_UID' in line:
m = re.match(r"(.*)ACCOUNT_UID=(.*$)",line)
if m:
# password found
if cpt > 0:
pwdFound.append(values)
cpt = 0
values = {}
values['Account id'] = m.group(2)
cpt += 1
if 'ENCRYPTED_PASSWORD' in line:
m = re.match(r"(.*)ENCRYPTED_PASSWORD=(.*$)",line)
if m:
values['Password'] = self.decrypt_password(m.group(2)).replace('\x06', '')
cpt += 1
if 'credentialsstorage.MASTER' in line:
m = re.match(r"(.*)credentialsstorage.MASTER=(.*$)",line)
if m:
values['Masterpass used'] = True
self.master_password_used = True
line = f.readline()
if len(values) != 0:
pwdFound.append(values)
# print the results
print_output('Jitsi', pwdFound)
f.close()
def decrypt_password(self, encrypted_pass):
salt = self.get_salt()
if self.master_password_used and constant.jitsi_masterpass:
self.masterpass = constant.jitsi_masterpass
elif self.master_password_used and not constant.jitsi_masterpass:
return '[!] A master password is used, the password cannot be decrypted. Provide a masterpassword using the -ma option'
# --- Decrypting the password ---
# generate hash
secret = pbkdf2_bin(bytes(self.masterpass), salt, self.iterations, self.keylen, hashfunc=hashlib.sha1)
# decrypt password
cipher = AES.new(secret)
plaintext = cipher.decrypt(b64decode(encrypted_pass)).rstrip(self.padding)
return plaintext
# main function
def run(self):
# print the title
Header().title_info('Jitsi')
file_properties = self.get_path()
if file_properties == 'JITSI_NOT_EXISTS':
print_debug('INFO', 'Jitsi not installed.')
else:
self.get_info(file_properties)
|
40223244/cdb-2 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/logging/handlers.py | 736 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import errno, logging, socket, os, pickle, struct, time, re
from codecs import BOM_UTF8
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
try:
import threading
except ImportError: #pragma: no cover
threading = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except OSError as err:
if err.errno == errno.ENOENT:
sres = None
else:
raise
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
try:
s.connect((self.host, self.port))
return s
except socket.error:
s.close()
raise
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else: #pragma: no cover
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
self._connect_unixsocket(address)
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.socktype = socktype
self.formatter = None
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('u%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip()
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
if threading:
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
zhjunlang/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/chardet/mbcharsetprober.py | 2923 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
atsolakid/edx-platform | refs/heads/master | lms/djangoapps/mobile_api/test_milestones.py | 30 | """
Milestone related tests for the mobile_api
"""
from mock import patch
from courseware.tests.helpers import get_request_for_user
from courseware.tests.test_entrance_exam import answer_entrance_exam_problem, add_entrance_exam_milestone
from util.milestones_helpers import (
add_prerequisite_course,
fulfill_course_milestone,
seed_milestone_relationship_types,
)
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
class MobileAPIMilestonesMixin(object):
"""
Tests the Mobile API decorators for milestones.
The two milestones currently supported in these tests are entrance exams and
pre-requisite courses. If either of these milestones are unfulfilled,
the mobile api will appropriately block content until the milestone is
fulfilled.
"""
MILESTONE_MESSAGE = {
'developer_message':
'Cannot access content with unfulfilled pre-requisites or unpassed entrance exam.'
}
ALLOW_ACCESS_TO_MILESTONE_COURSE = False # pylint: disable=invalid-name
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_unfulfilled_prerequisite_course(self):
""" Tests the case for an unfulfilled pre-requisite course """
self._add_prerequisite_course()
self.init_course_access()
self._verify_unfulfilled_milestone_response()
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_unfulfilled_prerequisite_course_for_staff(self):
self._add_prerequisite_course()
self.user.is_staff = True
self.user.save()
self.init_course_access()
self.api_response()
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_fulfilled_prerequisite_course(self):
"""
Tests the case when a user fulfills existing pre-requisite course
"""
self._add_prerequisite_course()
add_prerequisite_course(self.course.id, self.prereq_course.id)
fulfill_course_milestone(self.prereq_course.id, self.user)
self.init_course_access()
self.api_response()
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
def test_unpassed_entrance_exam(self):
"""
Tests the case where the user has not passed the entrance exam
"""
self._add_entrance_exam()
self.init_course_access()
self._verify_unfulfilled_milestone_response()
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
def test_unpassed_entrance_exam_for_staff(self):
self._add_entrance_exam()
self.user.is_staff = True
self.user.save()
self.init_course_access()
self.api_response()
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
def test_passed_entrance_exam(self):
"""
Tests access when user has passed the entrance exam
"""
self._add_entrance_exam()
self._pass_entrance_exam()
self.init_course_access()
self.api_response()
def _add_entrance_exam(self):
""" Sets up entrance exam """
seed_milestone_relationship_types()
self.course.entrance_exam_enabled = True
self.entrance_exam = ItemFactory.create( # pylint: disable=attribute-defined-outside-init
parent=self.course,
category="chapter",
display_name="Entrance Exam Chapter",
is_entrance_exam=True,
in_entrance_exam=True
)
self.problem_1 = ItemFactory.create( # pylint: disable=attribute-defined-outside-init
parent=self.entrance_exam,
category='problem',
display_name="The Only Exam Problem",
graded=True,
in_entrance_exam=True
)
add_entrance_exam_milestone(self.course, self.entrance_exam)
self.course.entrance_exam_minimum_score_pct = 0.50
self.course.entrance_exam_id = unicode(self.entrance_exam.location)
modulestore().update_item(self.course, self.user.id)
def _add_prerequisite_course(self):
""" Helper method to set up the prerequisite course """
seed_milestone_relationship_types()
self.prereq_course = CourseFactory.create() # pylint: disable=attribute-defined-outside-init
add_prerequisite_course(self.course.id, self.prereq_course.id)
def _pass_entrance_exam(self):
""" Helper function to pass the entrance exam """
request = get_request_for_user(self.user)
answer_entrance_exam_problem(self.course, request, self.problem_1)
def _verify_unfulfilled_milestone_response(self):
"""
Verifies the response depending on ALLOW_ACCESS_TO_MILESTONE_COURSE
Since different endpoints will have different behaviours towards milestones,
setting ALLOW_ACCESS_TO_MILESTONE_COURSE (default is False) to True, will
not return a 204. For example, when getting a list of courses a user is
enrolled in, although a user may have unfulfilled milestones, the course
should still show up in the course enrollments list.
"""
if self.ALLOW_ACCESS_TO_MILESTONE_COURSE:
self.api_response()
else:
response = self.api_response(expected_response_code=204)
self.assertEqual(response.data, self.MILESTONE_MESSAGE)
|
tdegrunt/or-tools | refs/heads/master | examples/python/secret_santa2.py | 34 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Secret Santa problem II in Google CP Solver.
From Maple Primes: 'Secret Santa Graph Theory'
http://www.mapleprimes.com/blog/jpmay/secretsantagraphtheory
'''
Every year my extended family does a 'secret santa' gift exchange.
Each person draws another person at random and then gets a gift for
them. At first, none of my siblings were married, and so the draw was
completely random. Then, as people got married, we added the restriction
that spouses should not draw each others names. This restriction meant
that we moved from using slips of paper on a hat to using a simple
computer program to choose names. Then people began to complain when
they would get the same person two years in a row, so the program was
modified to keep some history and avoid giving anyone a name in their
recent history. This year, not everyone was participating, and so after
removing names, and limiting the number of exclusions to four per person,
I had data something like this:
Name: Spouse, Recent Picks
Noah: Ava. Ella, Evan, Ryan, John
Ava: Noah, Evan, Mia, John, Ryan
Ryan: Mia, Ella, Ava, Lily, Evan
Mia: Ryan, Ava, Ella, Lily, Evan
Ella: John, Lily, Evan, Mia, Ava
John: Ella, Noah, Lily, Ryan, Ava
Lily: Evan, John, Mia, Ava, Ella
Evan: Lily, Mia, John, Ryan, Noah
'''
Note: I interpret this as the following three constraints:
1) One cannot be a Secret Santa of one's spouse
2) One cannot be a Secret Santa for somebody two years in a row
3) Optimization: maximize the time since the last time
This model also handle single persons, something the original
problem don't mention.
Compare with the following models:
* Google CP Solver: http://www.hakank.org/google_or_tools/secret_santa.py
* MiniZinc: http://www.hakank.org/minizinc/secret_santa2.mzn
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main(singe=0):
# Create the solver.
solver = pywrapcp.Solver('Secret Santa problem II')
#
# data
#
#
# The matrix version of earlier rounds.
# M means that no earlier Santa has been assigned.
# Note: Ryan and Mia has the same recipient for years 3 and 4,
# and Ella and John has for year 4.
# This seems to be caused by modification of
# original data.
#
n_no_single = 8
M = n_no_single + 1
rounds_no_single = [
# N A R M El J L Ev
[0, M, 3, M, 1, 4, M, 2], # Noah
[M, 0, 4, 2, M, 3, M, 1], # Ava
[M, 2, 0, M, 1, M, 3, 4], # Ryan
[M, 1, M, 0, 2, M, 3, 4], # Mia
[M, 4, M, 3, 0, M, 1, 2], # Ella
[1, 4, 3, M, M, 0, 2, M], # John
[M, 3, M, 2, 4, 1, 0, M], # Lily
[4, M, 3, 1, M, 2, M, 0] # Evan
]
#
# Rounds with a single person (fake data)
#
n_with_single = 9
M = n_with_single + 1
rounds_single = [
# N A R M El J L Ev S
[0, M, 3, M, 1, 4, M, 2, 2], # Noah
[M, 0, 4, 2, M, 3, M, 1, 1], # Ava
[M, 2, 0, M, 1, M, 3, 4, 4], # Ryan
[M, 1, M, 0, 2, M, 3, 4, 3], # Mia
[M, 4, M, 3, 0, M, 1, 2, M], # Ella
[1, 4, 3, M, M, 0, 2, M, M], # John
[M, 3, M, 2, 4, 1, 0, M, M], # Lily
[4, M, 3, 1, M, 2, M, 0, M], # Evan
[1, 2, 3, 4, M, 2, M, M, 0] # Single
]
if single == 1:
n = n_with_single
Noah, Ava, Ryan, Mia, Ella, John, Lily, Evan, Single = range(n)
rounds = rounds_single
else:
n = n_no_single
Noah, Ava, Ryan, Mia, Ella, John, Lily, Evan = range(n)
rounds = rounds_no_single
M = n + 1
persons = ['Noah', 'Ava', 'Ryan', 'Mia', 'Ella',
'John', 'Lily', 'Evan', 'Single']
spouses = [
Ava, # Noah
Noah, # Ava
Mia, # Rya
Ryan, # Mia
John, # Ella
Ella, # John
Evan, # Lily
Lily, # Evan
-1 # Single has no spouse
]
#
# declare variables
#
santas = [solver.IntVar(0, n - 1, 'santas[%i]' % i)
for i in range(n)]
santa_distance = [solver.IntVar(0, M, 'santa_distance[%i]' % i)
for i in range(n)]
# total of 'distance', to maximize
z = solver.IntVar(0, n * n * n, 'z')
#
# constraints
#
solver.Add(solver.AllDifferent(santas))
solver.Add(z == solver.Sum(santa_distance))
# Can't be one own's Secret Santa
# (i.e. ensure that there are no fix-point in the array.)
for i in range(n):
solver.Add(santas[i] != i)
# no Santa for a spouses
for i in range(n):
if spouses[i] > -1:
solver.Add(santas[i] != spouses[i])
# optimize 'distance' to earlier rounds:
for i in range(n):
solver.Add(santa_distance[i] ==
solver.Element(rounds[i], santas[i]))
# cannot be a Secret Santa for the same person
# two years in a row.
for i in range(n):
for j in range(n):
if rounds[i][j] == 1:
solver.Add(santas[i] != j)
# objective
objective = solver.Maximize(z, 1)
#
# solution and search
#
db = solver.Phase(santas,
solver.CHOOSE_MIN_SIZE_LOWEST_MIN,
solver.ASSIGN_CENTER_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print 'total distances:', z.Value()
print 'santas:', [santas[i].Value() for i in range(n)]
for i in range(n):
print '%s\tis a Santa to %s (distance %i)' % \
(persons[i],
persons[santas[i].Value()],
santa_distance[i].Value())
# print 'distance:', [santa_distance[i].Value()
# for i in range(n)]
print
print 'num_solutions:', num_solutions
print 'failures:', solver.Failures()
print 'branches:', solver.Branches()
print 'WallTime:', solver.WallTime(), 'ms'
single = 0
if __name__ == '__main__':
print 'Secret Santas without single'
main(single)
print '\nSecret Santas with single:'
single = 1
main(single)
|
feigames/Odoo | refs/heads/master | addons/base_gengo/wizard/__init__.py | 434 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_gengo_translations
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mensler/ansible | refs/heads/devel | lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py | 69 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: profitbricks_nic
short_description: Create or Remove a NIC.
description:
- This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
version_added: "2.0"
options:
datacenter:
description:
- The datacenter in which to operate.
required: true
server:
description:
- The server name or ID.
required: true
name:
description:
- The name or ID of the NIC. This is only required on deletes, but not on create.
required: true
lan:
description:
- The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
required: true
subscription_user:
description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
required: false
subscription_password:
description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
required: false
wait:
description:
- wait for the operation to complete before returning
required: false
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
state:
description:
- Indicate desired state of the resource
required: false
default: 'present'
choices: ["present", "absent"]
requirements: [ "profitbricks" ]
author: Matt Baldwin (baldwin@stackpointcloud.com)
'''
EXAMPLES = '''
# Create a NIC
- profitbricks_nic:
datacenter: Tardis One
server: node002
lan: 2
wait_timeout: 500
state: present
# Remove a NIC
- profitbricks_nic:
datacenter: Tardis One
server: node002
name: 7341c2454f
wait_timeout: 500
state: absent
'''
import re
import uuid
import time
HAS_PB_SDK = True
try:
from profitbricks.client import ProfitBricksService, NIC
except ImportError:
HAS_PB_SDK = False
uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
if not promise:
return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
operation_result = profitbricks.get_request(
request_id=promise['requestId'],
status=True)
if operation_result['metadata']['status'] == "DONE":
return
elif operation_result['metadata']['status'] == "FAILED":
raise Exception(
'Request failed to complete ' + msg + ' "' + str(
promise['requestId']) + '" to complete.')
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
def create_nic(module, profitbricks):
"""
Creates a NIC.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the nic creates, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
lan = module.params.get('lan')
name = module.params.get('name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server = s['id']
break
try:
n = NIC(
name=name,
lan=lan
)
nic_response = profitbricks.create_nic(datacenter, server, n)
if wait:
_wait_for_completion(profitbricks, nic_response,
wait_timeout, "create_nic")
return nic_response
except Exception as e:
module.fail_json(msg="failed to create the NIC: %s" % str(e))
def delete_nic(module, profitbricks):
"""
Removes a NIC
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the NIC was removed, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
name = module.params.get('name')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
server_found = False
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server_found = True
server = s['id']
break
if not server_found:
return False
# Locate UUID for NIC
nic_found = False
if not (uuid_match.match(name)):
nic_list = profitbricks.list_nics(datacenter, server)
for n in nic_list['items']:
if name == n['properties']['name']:
nic_found = True
name = n['id']
break
if not nic_found:
return False
try:
nic_response = profitbricks.delete_nic(datacenter, server, name)
return nic_response
except Exception as e:
module.fail_json(msg="failed to remove the NIC: %s" % str(e))
def main():
module = AnsibleModule(
argument_spec=dict(
datacenter=dict(),
server=dict(),
name=dict(default=str(uuid.uuid4()).replace('-','')[:10]),
lan=dict(),
subscription_user=dict(),
subscription_password=dict(no_log=True),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=600),
state=dict(default='present'),
)
)
if not HAS_PB_SDK:
module.fail_json(msg='profitbricks required for this module')
if not module.params.get('subscription_user'):
module.fail_json(msg='subscription_user parameter is required')
if not module.params.get('subscription_password'):
module.fail_json(msg='subscription_password parameter is required')
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required')
if not module.params.get('server'):
module.fail_json(msg='server parameter is required')
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')
profitbricks = ProfitBricksService(
username=subscription_user,
password=subscription_password)
state = module.params.get('state')
if state == 'absent':
if not module.params.get('name'):
module.fail_json(msg='name parameter is required')
try:
(changed) = delete_nic(module, profitbricks)
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(msg='failed to set nic state: %s' % str(e))
elif state == 'present':
if not module.params.get('lan'):
module.fail_json(msg='lan parameter is required')
try:
(nic_dict) = create_nic(module, profitbricks)
module.exit_json(nics=nic_dict)
except Exception as e:
module.fail_json(msg='failed to set nic state: %s' % str(e))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
TwinkleChawla/nova | refs/heads/master | nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py | 146 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from sqlalchemy import Index, MetaData, Table
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
def _get_deleted_expire_index(table):
members = sorted(['deleted', 'expire'])
for idx in table.indexes:
if sorted(idx.columns.keys()) == members:
return idx
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
if _get_deleted_expire_index(reservations):
LOG.info(_LI('Skipped adding reservations_deleted_expire_idx '
'because an equivalent index already exists.'))
return
# Based on expire_reservations query
# from: nova/db/sqlalchemy/api.py
index = Index('reservations_deleted_expire_idx',
reservations.c.deleted, reservations.c.expire)
index.create(migrate_engine)
|
da1z/intellij-community | refs/heads/master | python/testData/mover/commentIntoCompound_afterDown.py | 83 |
def f():
#comment
if True:
a = 1
else:
a = 2
|
invenfantasy/kubernetes | refs/heads/master | examples/cluster-dns/images/frontend/client.py | 504 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import requests
import socket
from urlparse import urlparse
def CheckServiceAddress(address):
hostname = urlparse(address).hostname
service_address = socket.gethostbyname(hostname)
print service_address
def GetServerResponse(address):
print 'Send request to:', address
response = requests.get(address)
print response
print response.content
def Main():
parser = argparse.ArgumentParser()
parser.add_argument('address')
args = parser.parse_args()
CheckServiceAddress(args.address)
GetServerResponse(args.address)
if __name__ == "__main__":
Main()
|
code-for-india/sahana_shelter_worldbank | refs/heads/hackathon | modules/s3/s3codecs/pdf.py | 1 | # -*- coding: utf-8 -*-
"""
S3 Adobe PDF codec
@copyright: 2011-13 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3RL_PDF"]
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from copy import deepcopy
import os
from gluon import *
from gluon import current
from gluon.storage import Storage
from gluon.contenttype import contenttype
from gluon.languages import lazyT
from ..s3codec import S3Codec
from ..s3utils import s3_unicode
# Import the specialist libraries
try:
from PIL import Image
from PIL import ImageOps
from PIL import ImageStat
PILImported = True
except(ImportError):
try:
import Image
import ImageOps
import ImageStat
PILImported = True
except(ImportError):
PILImported = False
try:
from reportlab.lib.enums import TA_CENTER, TA_RIGHT
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import canvas
from reportlab.lib.fonts import tt2ps
from reportlab.rl_config import canvas_basefontname as _baseFontName
from reportlab.platypus import BaseDocTemplate, SimpleDocTemplate, PageTemplate
from reportlab.platypus.frames import Frame
from reportlab.platypus import Spacer, PageBreak, FrameBreak, Paragraph
from reportlab.platypus import Table, TableStyle
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
from reportlab.lib.units import cm
from reportlab.lib import colors
from reportlab.lib.colors import Color
from reportlab.lib.pagesizes import A4, LETTER, landscape, portrait
from reportlab.platypus.flowables import Flowable
reportLabImported = True
except ImportError:
reportLabImported = False
BaseDocTemplate = object
inch = 72.0
canvas = Storage()
canvas.Canvas = None
PDF_WIDTH = 0
PDF_HEIGHT = 1
# =============================================================================
class S3RL_PDF(S3Codec):
"""
Simple Report Labs PDF format codec
"""
def __init__(self):
"""
Constructor
"""
# Error codes
T = current.T
self.ERROR = Storage(
PIL_ERROR = "PIL (Python Image Library) not installed, images cannot be embedded in the PDF report",
RL_ERROR = "Python needs the ReportLab module installed for PDF export"
)
# -------------------------------------------------------------------------
def encode(self, resource, **attr):
"""
Export data as a PDF document
@param resource: the resource
@param attr: dictionary of keyword arguments, in s3_rest_controller
passed through from the calling controller
@keyword request: the S3Request
@keyword method: "read" to not include a list view when no
component is specified
@keyword list_fields: fields to include in lists
@keyword pdf_componentname: enforce this component
@keyword pdf_groupby: how to group the results
@keyword pdf_orderby: how to sort rows (within any level of grouping)
@keyword pdf_callback: callback to be used rather than request
@keyword pdf_title: the title of the report
@keyword pdf_filename: the filename for the report
@keyword rheader: HTML page header (override by pdf_header)
@keyword rfooter: HTML page footer (override by pdf_footer)
@keyword pdf_header: callback to generate the HTML header
(overrides rheader)
@keyword pdf_footer: callback to generate the HTML footer,
or static HTML (overrides rfooter)
@keyword pdf_header_padding: add this amount of space between
the header and the body
@keyword pdf_footer_padding: add this amount of space between
the body and the footer
@keyword pdf_hide_comments: don't show the comments in a table
@keyword pdf_table_autogrow: Indicates that a table should grow to
fill the available space. Valid values:
H - Horizontal
V - Vertical
B - Both
@keyword pdf_paper_alignment: Portrait (default) or Landscape
@keyword use_colour: True to add colour to the cells. default False
"""
if not PILImported:
current.session.warning = self.ERROR.PIL_ERROR
if not reportLabImported:
current.session.error = self.ERROR.RL_ERROR
redirect(URL(extension=""))
# Settings
r = self.r = attr.get("request", None)
self.list_fields = attr.get("list_fields")
self.pdf_groupby = attr.get("pdf_groupby")
self.pdf_orderby = attr.get("pdf_orderby")
self.pdf_hide_comments = attr.get("pdf_hide_comments")
self.table_autogrow = attr.get("pdf_table_autogrow")
self.pdf_header_padding = attr.get("pdf_header_padding", 0)
self.pdf_footer_padding = attr.get("pdf_footer_padding", 0)
# Get the title & filename
now = current.request.now.isoformat()[:19].replace("T", " ")
title = attr.get("pdf_title")
if title == None:
title = "Report"
docTitle = "%s %s" % (title, now)
self.filename = attr.get("pdf_filename")
if self.filename == None:
self.filename = "%s_%s.pdf" % (title, now)
# Get the Doc Template
paper_size = attr.get("paper_size")
pdf_paper_alignment = attr.get("pdf_paper_alignment", "Portrait")
doc = EdenDocTemplate(title=docTitle,
paper_size = paper_size,
paper_alignment = pdf_paper_alignment)
# Get the header
header_flowable = None
header = attr.get("pdf_header")
if not header:
header = attr.get("rheader")
if header:
header_flowable = self.get_html_flowable(header,
doc.printable_width)
if self.pdf_header_padding:
header_flowable.append(Spacer(1, self.pdf_header_padding))
# Get the footer
footer_flowable = None
footer = attr.get("pdf_footer")
if not footer:
footer = attr.get("rfooter")
if footer:
footer_flowable = self.get_html_flowable(footer,
doc.printable_width)
if self.pdf_footer_padding:
footer_flowable.append(Spacer(1, self.pdf_footer_padding))
# Build report template
# Get data for the body of the text
data = None
body_flowable = None
doc.calc_body_size(header_flowable, footer_flowable)
callback = attr.get("pdf_callback")
pdf_componentname = attr.get("pdf_componentname", None)
if callback:
# Get the document body from the callback
body_flowable = self.get_html_flowable(callback(r),
doc.printable_width)
elif pdf_componentname: # and resource.parent is None:
# Enforce a particular component
resource = current.s3db.resource(r.tablename,
components = [pdf_componentname],
id = r.id)
if pdf_componentname in resource.components:
component = resource.components[pdf_componentname]
body_flowable = self.get_resource_flowable(component, doc)
elif r.component or attr.get("method", "list") != "read":
# Use the requested resource
body_flowable = self.get_resource_flowable(resource, doc)
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
style.fontName = "Helvetica"
style.fontSize = 9
if not body_flowable:
body_flowable = [Paragraph("", style)]
self.normalstyle = style
# Build the PDF
doc.build(header_flowable,
body_flowable,
footer_flowable,
)
# Return the generated PDF
response = current.response
if response:
disposition = "attachment; filename=\"%s\"" % self.filename
response.headers["Content-Type"] = contenttype(".pdf")
response.headers["Content-disposition"] = disposition
doc.output.seek(0)
return doc.output.read()
# -------------------------------------------------------------------------
def get_html_flowable(self, rules, printable_width):
"""
Function to convert the rules passed in to a flowable.
The rules (for example) could be an rHeader callback
"""
if callable(rules):
# Callback to produce the HTML (e.g. rheader)
r = self.r
# Switch to HTML representation
representation = r.representation
r.representation = "html"
try:
html = rules(r)
except:
html = ""
r.representation = representation
else:
# Static HTML
html = rules
parser = S3html2pdf(pageWidth = printable_width,
exclude_class_list=["tabs"])
result = parser.parse(html)
return result
# -------------------------------------------------------------------------
def get_resource_flowable(self, resource, doc):
"""
Get a list of fields, if the list_fields attribute is provided
then use that to extract the fields that are required, otherwise
use the list of readable fields.
"""
fields = self.list_fields
if fields:
list_fields = [f for f in fields if f != "id"]
else:
list_fields = [f.name for f in resource.readable_fields()
if f.type != "id" and
f.name != "comments" or
not self.pdf_hide_comments]
vars = Storage(current.request.vars)
vars["iColumns"] = len(list_fields)
filter, orderby, left = resource.datatable_filter(list_fields, vars)
resource.add_filter(filter)
result = resource.select(list_fields,
left=left,
limit=None,
count=True,
getids=True,
orderby=orderby,
represent=True,
show_links=False)
# Now generate the PDF table
pdf_table = S3PDFTable(doc,
result["rfields"],
result["rows"],
groupby = self.pdf_groupby,
autogrow = self.table_autogrow,
body_height = doc.body_height,
).build()
return pdf_table
# =============================================================================
class EdenDocTemplate(BaseDocTemplate):
"""
The standard document template for eden reports
It allows for the following page templates:
1) First Page
2) Even Page
3) Odd Page
4) Landscape Page
"""
def __init__(self,
title = "Sahana Eden",
margin = (0.5 * inch, # top
0.3 * inch, # left
0.5 * inch, # bottom
0.3 * inch), # right
margin_inside = 0.0 * inch, # used for odd even pages
paper_size = None,
paper_alignment = "Portrait"):
"""
Set up the standard page templates
"""
self.output = StringIO()
self.defaultPage = paper_alignment
if paper_size:
self.paper_size = paper_size
else:
if current.deployment_settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
self.topMargin = margin[0]
self.leftMargin = margin[1]
self.bottomMargin = margin[2]
self.rightMargin = margin[3]
self.insideMargin = margin_inside
BaseDocTemplate.__init__(self,
self.output,
title = title,
leftMargin = self.leftMargin,
rightMargin = self.rightMargin,
topMargin = self.topMargin,
bottomMargin = self.bottomMargin,
)
self.MINIMUM_MARGIN_SIZE = 0.2 * inch
self.body_flowable = None
self._calc()
# -------------------------------------------------------------------------
def get_flowable_size(self, flowable):
"""
Function to return the size a flowable will require
"""
if not flowable:
return (0, 0)
if not isinstance(flowable, list):
flowable = [flowable]
w = 0
h = 0
for f in flowable:
if f:
size = f.wrap(self.printable_width,
self.printable_height)
if size[0] > w:
w = size[PDF_WIDTH]
h += size[PDF_HEIGHT]
return (w, h)
# -------------------------------------------------------------------------
def _calc(self):
if self.defaultPage == "Landscape":
self.pagesize = landscape(self.paper_size)
else:
self.pagesize = portrait(self.paper_size)
BaseDocTemplate._calc(self)
self.height = self.pagesize[PDF_HEIGHT]
self.width = self.pagesize[PDF_WIDTH]
self.printable_width = self.width - \
self.leftMargin - \
self.rightMargin - \
self.insideMargin
self.printable_height = self.height - \
self.topMargin - \
self.bottomMargin
# -------------------------------------------------------------------------
def calc_body_size(self,
header_flowable,
footer_flowable,
):
"""
Helper function to calculate the various sizes of the page
"""
self._calc() # in case we changed margins sizes etc
self.height = self.pagesize[PDF_HEIGHT]
self.width = self.pagesize[PDF_WIDTH]
self.printable_width = self.width - \
self.leftMargin - \
self.rightMargin - \
self.insideMargin
self.printable_height = self.height - \
self.topMargin - \
self.bottomMargin
header_size = self.get_flowable_size(header_flowable)
footer_size = self.get_flowable_size(footer_flowable)
self.header_height = header_size[PDF_HEIGHT]
self.footer_height = footer_size[PDF_HEIGHT]
self.body_height = self.printable_height - \
self.header_height - \
self.footer_height
# -------------------------------------------------------------------------
def build(self,
header_flowable,
body_flowable,
footer_flowable,
canvasmaker=canvas.Canvas):
"""
Build the document using the flowables.
Set up the page templates that the document can use
"""
self.header_flowable = header_flowable
self.body_flowable = body_flowable
self.footer_flowable = footer_flowable
self.calc_body_size(header_flowable,
footer_flowable,
)
showBoundary = 0 # for debugging set to 1, otherwise 0
body_frame = Frame(self.leftMargin,
self.bottomMargin + self.footer_height,
self.printable_width,
self.body_height,
leftPadding = 0,
bottomPadding = 0,
rightPadding = 0,
topPadding = 0,
id = "body",
showBoundary = showBoundary
)
self.body_frame = body_frame
self.normalPage = PageTemplate(id = "Normal",
frames = [body_frame,],
onPage = self.add_page_decorators,
pagesize = self.pagesize
)
# @todo set these page templates up
#self.evenPage = PageTemplate(id="even",
# frames=frame_list,
# onPage=self.onEvenPage,
# pagesize=self.pagesize
# )
#self.oddPage = PageTemplate(id="odd",
# frames=frame_list,
# onPage=self.onOddPage,
# pagesize=self.pagesize
# )
self.landscapePage = PageTemplate(id="Landscape",
frames = [body_frame,],
onPage=self.add_page_decorators,
pagesize=landscape(self.pagesize)
)
if self.defaultPage == "Landscape":
self.addPageTemplates(self.landscapePage)
else:
self.addPageTemplates(self.normalPage)
BaseDocTemplate.build(self, self.body_flowable, canvasmaker=canvasmaker)
# -------------------------------------------------------------------------
def add_page_decorators(self, canvas, doc):
"""
"""
if self.header_flowable:
top = self.bottomMargin + self.printable_height
for flow in self.header_flowable:
height = self.get_flowable_size(flow)[PDF_HEIGHT]
bottom = top - height
flow.drawOn(canvas,
self.leftMargin,
bottom
)
top = bottom
if self.footer_flowable:
top = self.bottomMargin + self.footer_height
for flow in self.footer_flowable:
height = self.get_flowable_size(flow)[PDF_HEIGHT]
bottom = top - height
flow.drawOn(canvas,
self.leftMargin,
bottom
)
top = bottom
# -------------------------------------------------------------------------
def addParagraph(self, text, style=None, append=True):
"""
Method to create a paragraph that may be inserted into the document
@param text: The text for the paragraph
@param append: If True then the paragraph will be stored in the
document flow ready for generating the pdf.
@return The paragraph
This method can return the paragraph rather than inserting into the
document. This is useful if the paragraph needs to be first
inserted in another flowable, before being added to the document.
An example of when this is useful is when large amounts of text
(such as a comment) are added to a cell of a table.
"""
if text != "":
if style == None:
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
para = Paragraph(text, style)
if append and self.body_flowable:
self.body_flowable.append(para)
return para
return ""
# -------------------------------------------------------------------------
def cellStyle(self, style, cell):
"""
Add special styles to the text in a cell
"""
if style == "*GREY":
return [("TEXTCOLOR", cell, cell, colors.lightgrey)]
elif style == "*RED":
return [("TEXTCOLOR", cell, cell, colors.red)]
return []
# -------------------------------------------------------------------------
def addCellStyling(self, table, style):
"""
Add special styles to the text in a table
"""
row = 0
for line in table:
col = 0
for cell in line:
try:
if cell.startswith("*"):
(instruction,sep,text) = cell.partition(" ")
style += self.cellStyle(instruction, (col, row))
table[row][col] = text
except:
pass
col += 1
row += 1
return (table, style)
# =============================================================================
class S3PDFTable(object):
"""
Class to build a table that can then be placed in a pdf document
The table will be formatted so that is fits on the page. This class
doesn't need to be called directly. Rather see S3PDF.addTable()
"""
def __init__(self,
document,
rfields,
raw_data,
groupby = None,
hide_comments = False,
autogrow = False,
body_height = 0,
):
"""
Method to create a table object
@param document: A S3PDF object
@param raw_data: A list of rows
@param rfields: A list of field selectors
@param groupby: A field name that is to be used as a sub-group
All the records that share the same pdf_groupby value
will be clustered together
@param hide_comments: Any comment field will be hidden
"""
if current.deployment_settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
self.pdf = document
# @todo: Change the code to use raw_data directly rather than this
# conversion to an ordered list of values
self.rfields = rfields
rdata = []
rappend = rdata.append
for row in raw_data:
data = []
dappend = data.append
for selector in rfields:
value = row[selector.colname]
# Try to convert Web2py elements to ReportLab equivalents
dvalue = None
while True:
if isinstance(value, (basestring, lazyT)):
dvalue = value
elif isinstance(value, IMG):
dvalue = S3html2pdf.parse_img(value, selector.field.uploadfolder)
if dvalue:
dvalue = dvalue[0]
elif isinstance(value, DIV):
if len(value.components) > 0:
value = value.components[0]
continue
else:
dvalue = s3_unicode(value)
else:
dvalue = s3_unicode(value)
break
dappend(dvalue)
rdata.append(data)
self.raw_data = rdata
self.labels = [selector.label for selector in self.rfields]
self.list_fields = [selector.fname for selector in self.rfields]
self.pdf_groupby = groupby
self.hideComments = hide_comments
self.autogrow = autogrow
self.body_height = body_height
self.data = []
self.subheadingList = []
self.subheadingLevel = {}
self.pages = []
self.colWidths = []
self.newColWidth = [] # @todo: remove this (but see presentation)
self.rowHeights = []
self.style = None
# Temp document to test the table size, default to A4 portrait
# @todo: use custom template
# @todo: set pagesize for pdf component not whole document
self.tempDoc = EdenDocTemplate()
#self.tempDoc.setPageTemplates(self.pdf.pageHeader,
# self.pdf.pageFooter)
#self.tempDoc.pagesize = portrait(self.paper_size)
# Set up style constants
self.headerColour = Color(0.73, 0.76, 1)
self.oddColour = Color(0.92, 0.92, 1)
self.evenColour = Color(0.83, 0.84, 1)
self.MIN_COMMENT_COL_WIDTH = 200
self.fontsize = 12
# -------------------------------------------------------------------------
def build(self):
"""
Method to build the table.
@return: A list of Table objects. Normally this will be a list with
just one table object, but if the table needs to be split
across columns then one object per page will be created.
"""
if self.pdf_groupby:
data = self.group_data()
data = [self.labels] + data
elif self.raw_data != None:
data = [self.labels] + self.raw_data
# Only build the table if we have some data
if not data or not (data[0]):
return None
endCol = len(self.labels) - 1
rowCnt = len(data)
self.style = self.tableStyle(0, rowCnt, endCol)
tempTable = Table(data,
repeatRows=1,
style=self.style,
hAlign="LEFT"
)
self.data = data
self.tempDoc.build(None, [tempTable], None)
self.newColWidth = [tempTable._colWidths]
self.rowHeights = [tempTable._rowHeights]
self.pages.append(data)
if not self.tweakDoc(tempTable):
# Need to split the table
self.pages = self.splitTable(tempTable)
return self.presentation()
# -------------------------------------------------------------------------
def group_data(self):
"""
"""
groups = self.pdf_groupby.split(",")
newData = []
data = self.raw_data
level = 0
list_fields = self.list_fields
for field in groups:
level += 1
field = field.strip()
# Find the location of field in list_fields
i = 0
rowlength = len(list_fields)
while i < rowlength:
if list_fields[i] == field:
break
i += 1
list_fields = list_fields[0:i] + list_fields[i + 1:]
labels = self.labels[0:i] + self.labels[i + 1:]
self.labels = labels
currentGroup = None
r = 0
for row in data:
if r + 1 in self.subheadingList:
newData.append(row)
r += 1
else:
try:
group = row[i]
if group != currentGroup:
line = [group]
newData.append(line)
r += 1
currentGroup = group
self.subheadingList.append(r)
self.subheadingLevel[r] = level
# All existing subheadings after this point need to
# be shuffled down one place.
for x in range (len(self.subheadingList)):
if self.subheadingList[x] > r:
posn = self.subheadingList[x]
self.subheadingList[x] = posn + 1
oldlevel = self.subheadingLevel[posn]
del self.subheadingLevel[posn]
self.subheadingLevel[posn + 1] = oldlevel
line = row[0:i] + row[i + 1:]
newData.append(line)
r += 1
except:
newData.append(row)
r += 1
data = newData
newData = []
self.list_fields = list_fields
return data
# -------------------------------------------------------------------------
def presentation(self):
"""
This will convert the S3PDFTABLE object to a format that can be
used to add to a S3PDF document object.
This is only used internally but could be used to generate a copy
of a previously generated table
"""
# Build the tables
content = []
currentPage = 0
totalPagesAcross = len(self.newColWidth)
if self.autogrow == "H" or self.autogrow == "B":
printable_width = self.pdf.printable_width
# Expand the columns to use all the available space
newColWidth = []
for cols in self.newColWidth:
col_width = 0
for col in cols:
col_width += col
if col_width < printable_width:
surplus = printable_width - col_width
proportion = surplus / col_width
newcols = []
for col in cols:
newcols.append(col + col * proportion)
newColWidth.append(newcols)
self.newColWidth = newColWidth
startRow = 0
for page in self.pages:
if page == []:
currentPage += 1
continue
colWidths = self.newColWidth[currentPage % totalPagesAcross]
if self.autogrow == "V" or self.autogrow == "B":
row_height = self.rowHeights[0][0]
rows = len(page)
if self.body_height > row_height * rows:
rowCnt = int(self.body_height/row_height)
extra_rows = rowCnt - rows
if extra_rows:
cells = len(colWidths)
row = [""] * cells
extra = [row] * extra_rows
page = page + extra
endCol = len(colWidths) - 1
rowCnt = len(page)
self.style = self.tableStyle(startRow, rowCnt, endCol)
(page, self.style) = self.pdf.addCellStyling(page, self.style)
p = Table(page, repeatRows=1,
style=self.style,
hAlign="LEFT",
colWidths=colWidths,
emptyTableAction="indicate"
)
content.append(p)
# Add a page break, except for the last page.
if currentPage + 1 < len(self.pages):
content.append(PageBreak())
currentPage += 1
if currentPage % totalPagesAcross == 0:
startRow += rowCnt - 1 # Don't include the heading
return content
# -------------------------------------------------------------------------
def getAvailableMarginSpace(self):
"""
Internally used method to calculate the amount of space available
on the width of a page.
"""
_pdf = self.pdf
availableMarginSpace = _pdf.leftMargin \
+ _pdf.rightMargin \
- 2 * _pdf.MINIMUM_MARGIN_SIZE
return availableMarginSpace
# -------------------------------------------------------------------------
def tweakMargin(self, tableWidth):
"""
Internally used method to adjust the document margins so that the
table will fit into the available space
"""
availableMarginSpace = self.getAvailableMarginSpace()
currentOverlap = tableWidth - self.tempDoc.printable_width
endCol = len(self.labels) - 1
rowCnt = len(self.data)
# Check margins
if currentOverlap < availableMarginSpace:
_pdf = self.pdf
_pdf.leftMargin -= currentOverlap / 2
_pdf.rightMargin -= currentOverlap / 2
return True
return False
# -------------------------------------------------------------------------
def tweakFont(self, tableWidth, newFontSize, colWidths):
"""
Internally used method to adjust the font size used so that the
table will fit into the available space on the page.
"""
# Check font
adjustedWidth = tableWidth * newFontSize / self.fontsize
if (adjustedWidth - self.tempDoc.printable_width) < self.getAvailableMarginSpace():
for i in range(len(colWidths)):
colWidths[i] *= float(newFontSize) / float(self.fontsize)
self.newColWidth = [colWidths]
self.fontsize = newFontSize
return self.tweakMargin(adjustedWidth)
return False
# -------------------------------------------------------------------------
def minorTweaks(self, tableWidth, colWidths):
"""
Internally used method to tweak the formatting so that the table
will fit into the available space on the page.
"""
if self.tweakMargin(tableWidth):
return True
originalFont = self.fontsize
if self.tweakFont(tableWidth, originalFont -1, colWidths):
return True
if self.tweakFont(tableWidth, originalFont -2, colWidths):
return True
if self.tweakFont(tableWidth, originalFont -3, colWidths):
return True
return False
# -------------------------------------------------------------------------
def tweakDoc(self, table):
"""
Internally used method to adjust the table so that it will fit
into the available space on the page.
@return: True if it is able to perform minor adjustments and have
the table fit in the page. False means that the table will need to
be split across the columns.
"""
tableWidth = 0
for colWidth in table._colWidths:
tableWidth += colWidth
colWidths = table._colWidths
#print "Doc size %s x %s Table width %s" % (self.tempDoc.printable_width, self.tempDoc.height, total)
if tableWidth > self.tempDoc.printable_width:
# self.pdf.setMargins(0.5*inch, 0.5*inch)
# First massage any comment column by putting it in a paragraph
colNo = 0
for label in self.labels:
# Wrap comments in a paragraph
if label.lower() == "comments":
currentWidth = table._colWidths[colNo]
# print "%s %s" % (colNo, currentWidth)
if currentWidth > self.MIN_COMMENT_COL_WIDTH:
for i in range(1, len(self.data)): # skip the heading
try:
comments = self.data[i][colNo]
if comments:
comments = self.pdf.addParagraph(comments, append=False)
self.data[i][colNo] = comments
except IndexError:
pass
colWidths[colNo] = self.MIN_COMMENT_COL_WIDTH
tableWidth += self.MIN_COMMENT_COL_WIDTH - currentWidth
colNo += 1
if not self.minorTweaks(tableWidth, colWidths):
self.tempDoc.defaultPage = "Landscape"
self.tempDoc._calc()
self.pdf.defaultPage = "Landscape"
self.pdf._calc()
return self.minorTweaks(tableWidth, colWidths)
return True
# -------------------------------------------------------------------------
def splitTable(self, tempTable):
"""
Internally used method to split the table across columns so that it
will fit into the available space on the page.
"""
colWidths = tempTable._colWidths
rowHeights = tempTable._rowHeights
total = 0
colNo = 0
colSplit = []
newColWidth = []
pageColWidth = []
for colW in colWidths:
if colNo > 0 and total + colW > self.tempDoc.printable_width:
# Split before this column...
colSplit.append(colNo)
newColWidth.append(pageColWidth)
# ...and put it on a new page
pageColWidth = [colW]
total = colW
else:
# Append this column to the current page
pageColWidth.append(colW)
total += colW
colNo += 1
colSplit.append(len(colWidths))
newColWidth.append(pageColWidth)
self.newColWidth = newColWidth
total = 0
rowNo = 0
lastKnownHeight = 20 # Not all row heights get calculated.
rowSplit = []
for rowH in rowHeights:
if rowH == None:
rowH = lastKnownHeight
else:
lastKnownHeight = rowH
if total + rowH > self.body_height:
rowSplit.append(rowNo)
total = 2 * rowH # 2* is needed to take into account the repeated header row
else:
total += rowH
rowNo += 1
rowSplit.append(rowNo)
# Build the pages of data
pages = []
startRow = 1 # Skip the first row (the heading) because we'll generate our own
for endRow in rowSplit:
startCol = 0
for endCol in colSplit:
page = []
pappend = page.append
label = []
lappend = label.append
for colIndex in range(startCol, endCol):
try:
lappend(self.labels[colIndex])
except IndexError:
lappend("")
pappend(label)
for rowIndex in range(startRow, endRow):
line = []
lappend = line.append
for colIndex in range(startCol, endCol):
try:
lappend(self.data[rowIndex][colIndex])
except IndexError: # No data to add.
# If this is the first column of a subheading row then repeat the subheading
if len(line) == 0 and rowIndex in self.subheadingList:
try:
lappend(self.data[rowIndex][0])
except IndexError:
lappend("")
else:
lappend("")
pappend(line)
pages.append(page)
startCol = endCol
startRow = endRow
return pages
# -------------------------------------------------------------------------
def tableStyle(self, startRow, rowCnt, endCol, colour_required=False):
"""
Internally used method to assign a style to the table
@param startRow: The row from the data that the first data row in
the table refers to. When a table is split the first row in the
table (ignoring the label header row) will not always be the first row
in the data. This is needed to align the two. Currently this parameter
is used to identify sub headings and give them an emphasised styling
@param rowCnt: The number of rows in the table
@param endCol: The last column in the table
@todo: replace endCol with -1
(should work but need to test with a split table)
"""
style = [("FONTNAME", (0, 0), (-1, -1), "Helvetica"),
("FONTSIZE", (0, 0), (-1, -1), self.fontsize),
("VALIGN", (0, 0), (-1, -1), "TOP"),
("LINEBELOW", (0, 0), (endCol, 0), 1, Color(0, 0, 0)),
("FONTNAME", (0, 0), (endCol, 0), "Helvetica-Bold"),
]
sappend = style.append
if colour_required:
sappend(("BACKGROUND", (0, 0), (endCol, 0), self.headerColour))
else:
sappend(("BACKGROUND", (0, 0), (-1, 0), colors.lightgrey))
sappend(("INNERGRID", (0, 0), (-1, -1), 0.2, colors.lightgrey))
if self.pdf_groupby != None:
sappend(("LEFTPADDING", (0, 0), (-1, -1), 20))
rowColourCnt = 0 # used to alternate the colours correctly when we have subheadings
for i in range(rowCnt):
# If subheading
if startRow + i in self.subheadingList:
level = self.subheadingLevel[startRow + i]
if colour_required:
sappend(("BACKGROUND", (0, i), (endCol, i),
self.headerColour))
sappend(("FONTNAME", (0, i), (endCol, i), "Helvetica-Bold"))
sappend(("SPAN", (0, i), (endCol, i)))
sappend(("LEFTPADDING", (0, i), (endCol, i), 6 * level))
elif i > 0:
if colour_required:
if rowColourCnt % 2 == 0:
sappend(("BACKGROUND", (0, i), (endCol, i),
self.evenColour))
rowColourCnt += 1
else:
sappend(("BACKGROUND", (0, i), (endCol, i),
self.oddColour))
rowColourCnt += 1
sappend(("BOX", (0, 0), (-1, -1), 1, Color(0, 0, 0)))
return style
# =============================================================================
class S3html2pdf():
def __init__(self,
pageWidth,
exclude_class_list = []):
"""
Method that takes html in the web2py helper objects
and converts it to pdf
"""
self.exclude_class_list = exclude_class_list
self.pageWidth = pageWidth
self.fontsize = 10
styleSheet = getSampleStyleSheet()
self.plainstyle = styleSheet["Normal"]
self.plainstyle.fontName = "Helvetica"
self.plainstyle.fontSize = 9
self.boldstyle = deepcopy(styleSheet["Normal"])
self.boldstyle.fontName = "Helvetica-Bold"
self.boldstyle.fontSize = 10
self.titlestyle = deepcopy(styleSheet["Normal"])
self.titlestyle.fontName = "Helvetica-Bold"
self.titlestyle.fontSize = 16
self.normalstyle = self.plainstyle
# To add more pdf styles define the style above (just like the titlestyle)
# Then add the style and the name to the lookup dict below
# These can then be added to the html in the code as follows:
# TD("Waybill", _class="pdf_title")
self.style_lookup = {"pdf_title": self.titlestyle}
# -------------------------------------------------------------------------
def parse(self, html):
"""
"""
result = self.select_tag(html)
return result
# -------------------------------------------------------------------------
def select_tag(self, html, title=False):
"""
"""
if self.exclude_tag(html):
return None
if isinstance(html, TABLE):
return self.parse_table(html)
elif isinstance(html, A):
return self.parse_a(html)
elif isinstance(html, P):
return self.parse_p(html)
elif isinstance(html, IMG):
return S3html2pdf.parse_img(html)
elif isinstance(html, DIV):
return self.parse_div(html)
elif (isinstance(html, basestring) or isinstance(html, lazyT)):
if title:
para = [Paragraph(html, self.boldstyle)]
else:
para = [Paragraph(html, self.normalstyle)]
self.normalstyle = self.plainstyle
return para
return None
# -------------------------------------------------------------------------
def exclude_tag(self, html):
"""
"""
try:
if html.attributes["_class"] in self.exclude_class_list:
return True
if html.attributes["_class"] in self.style_lookup:
self.normalstyle = self.style_lookup[html.attributes["_class"]]
except:
pass
return False
# -------------------------------------------------------------------------
def parse_div(self, html):
"""
Parses a DIV element and converts it into a format for ReportLab
@param html: the DIV element to convert
@return: a list containing text that ReportLab can use
"""
content = []
select_tag = self.select_tag
for component in html.components:
result = select_tag(component)
if result != None:
content += result
if content == []:
return None
return content
# -------------------------------------------------------------------------
def parse_a(self, html):
"""
Parses an A element and converts it into a format for ReportLab
@param html: the A element to convert
@return: a list containing text that ReportLab can use
"""
content = []
select_tag = self.select_tag
for component in html.components:
result = select_tag(component)
if result != None:
content += result
if content == []:
return None
return content
# -------------------------------------------------------------------------
@staticmethod
def parse_img(html, uploadfolder=None):
"""
Parses an IMG element and converts it into an Image for ReportLab
@param html: the IMG element to convert
@param uploadfolder: an optional uploadfolder in which to find the file
@return: a list containing an Image that ReportLab can use
@note: The `src` attribute of the image must either
point to a static resource, directly to a file, or to an upload.
"""
from reportlab.platypus import Image
I = None
if "_src" in html.attributes:
src = html.attributes["_src"]
if uploadfolder:
src = src.rsplit("/", 1)
src = os.path.join(uploadfolder, src[1])
elif src.startswith("/%s/static" % current.request.application):
src = src.split("/%s/" % current.request.application)[-1]
src = os.path.join(current.request.folder, src)
else:
src = src.rsplit("/", 1)
src = os.path.join(current.request.folder, "uploads/", src[1])
if os.path.exists(src):
I = Image(src)
if not I:
return None
iwidth = I.drawWidth
iheight = I.drawHeight
# @todo: extract the number from a 60px value
# if "_height" in html.attributes:
# height = int(html.attributes["_height"]) * inch / 80.0
# width = iwidth * (height / iheight)
# elif "_width" in html.attributes:
# width = int(html.attributes["_width"]) * inch / 80.0
# height = iheight * (width / iwidth)
# else:
# height = 1.0 * inch
# width = iwidth * (height / iheight)
height = 1.0 * inch
width = iwidth * (height / iheight)
I.drawHeight = height
I.drawWidth = width
return [I]
def parse_p(self, html):
"""
Parses a P element and converts it into a format for ReportLab
@param html: the P element to convert
@return: a list containing text that ReportLab can use
"""
content = []
select_tag = self.select_tag
for component in html.components:
result = select_tag(component)
if result != None:
content += result
if content == []:
return None
return content
# -------------------------------------------------------------------------
def parse_table(self, html):
"""
Parses a TABLE element and converts it into a format for ReportLab
@param html: the TABLE element to convert
@return: a list containing text that ReportLab can use
"""
style = [("FONTSIZE", (0, 0), (-1, -1), self.fontsize),
("VALIGN", (0, 0), (-1, -1), "TOP"),
("FONTNAME", (0, 0), (-1, -1), "Helvetica"),
("GRID", (0, 0), (-1, -1), 0.5, colors.grey),
]
content = []
cappend = content.append
rowCnt = 0
result = None
exclude_tag = self.exclude_tag
parse_tr = self.parse_tr
for component in html.components:
if exclude_tag(component):
continue
if isinstance(component, TR):
result = parse_tr(component, style, rowCnt)
rowCnt += 1
if result != None:
cappend(result)
if content == []:
return None
table = Table(content,
style=style,
hAlign="LEFT",
vAlign="Top",
)
cw = table._colWidths
return [table]
# -------------------------------------------------------------------------
def parse_tr (self, html, style, rowCnt):
"""
Parses a TR element and converts it into a format for ReportLab
@param html: the TR element to convert
@return: a list containing text that ReportLab can use
"""
row = []
rappend = row.append
sappend = style.append
colCnt = 0
exclude_tag = self.exclude_tag
select_tag = self.select_tag
for component in html.components:
if isinstance(component, (TH, TD)):
if exclude_tag(component):
continue
colspan = component.attributes.get("_colspan", 1)
if component.components == []:
rappend("")
else:
for detail in component.components:
result = select_tag(detail, title=isinstance(component, TH))
if result != None:
rappend(result)
if isinstance(component, TH):
sappend(("BACKGROUND", (colCnt, rowCnt), (colCnt, rowCnt), colors.lightgrey))
sappend(("FONTNAME", (colCnt, rowCnt), (colCnt, rowCnt), "Helvetica-Bold"))
if colspan > 1:
for i in xrange(1, colspan):
rappend("")
sappend(("SPAN", (colCnt, rowCnt), (colCnt + colspan - 1, rowCnt)))
colCnt += colspan
else:
colCnt += 1
if row == []:
return None
return row
# END =========================================================================
|
anthcp/cdap | refs/heads/develop | cdap-docs/integrations/source/conf.py | 1 | # -*- coding: utf-8 -*-
import sys
import os
# Import the common config file
# Note that paths in the common config are interpreted as if they were
# in the location of this file
sys.path.insert(0, os.path.abspath('../../_common'))
from common_conf import *
# Override the common config
html_short_title_toc = manuals_dict["integrations"]
html_short_title = u'CDAP %s' % html_short_title_toc
html_context = {"html_short_title_toc":html_short_title_toc}
# Remove this guide from the mapping as it will fail as it has been deleted by clean
intersphinx_mapping.pop("integrations", None) |
lanyuwen/openthread | refs/heads/master | tools/harness-automation/cases_R140/leader_9_2_19.py | 9 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Leader_9_2_19(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '9 2 19'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
mrquim/repository.mrquim | refs/heads/master | repo/script.module.schism.common/lib/js2py/constructors/jsobject.py | 29 | from js2py.base import *
#todo Double check everything is OK
@Js
def Object():
val = arguments.get('0')
if val.is_null() or val.is_undefined():
return PyJsObject(prototype=ObjectPrototype)
return val.to_object()
@Js
def object_constructor():
if len(arguments):
val = arguments.get('0')
if val.TYPE=='Object':
#Implementation dependent, but my will simply return :)
return val
elif val.TYPE in ['Number', 'String', 'Boolean']:
return val.to_object()
return PyJsObject(prototype=ObjectPrototype)
Object.create = object_constructor
class ObjectMethods:
def getPrototypeOf(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.getPrototypeOf called on non-object')
return null if obj.prototype is None else obj.prototype
def getOwnPropertyDescriptor (obj, prop):
if not obj.is_object():
raise MakeError('TypeError', 'Object.getOwnPropertyDescriptor called on non-object')
return obj.own.get(prop.to_string().value) # will return undefined if we dont have this prop
def getOwnPropertyNames(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.getOwnPropertyDescriptor called on non-object')
return obj.own.keys()
def create(obj):
if not (obj.is_object() or obj.is_null()):
raise MakeError('TypeError', 'Object prototype may only be an Object or null')
temp = PyJsObject(prototype=(None if obj.is_null() else obj))
if len(arguments)>1 and not arguments[1].is_undefined():
ObjectMethods.defineProperties.__func__(temp, arguments[1])
return temp
def defineProperty(obj, prop, attrs):
if not obj.is_object():
raise MakeError('TypeError', 'Object.defineProperty called on non-object')
name = prop.to_string().value
if not obj.define_own_property(name, ToPropertyDescriptor(attrs)):
raise MakeError('TypeError', 'Cannot redefine property: %s' % name)
return obj
def defineProperties(obj, properties):
if not obj.is_object():
raise MakeError('TypeError', 'Object.defineProperties called on non-object')
props = properties.to_object()
for name in props:
desc = ToPropertyDescriptor(props.get(name.value))
if not obj.define_own_property(name.value, desc):
raise MakeError('TypeError', 'Failed to define own property: %s'%name.value)
return obj
def seal(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.seal called on non-object')
for desc in obj.own.values():
desc['configurable'] = False
obj.extensible = False
return obj
def freeze(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.freeze called on non-object')
for desc in obj.own.values():
desc['configurable'] = False
if is_data_descriptor(desc):
desc['writable'] = False
obj.extensible = False
return obj
def preventExtensions(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.preventExtensions on non-object')
obj.extensible = False
return obj
def isSealed(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.isSealed called on non-object')
if obj.extensible:
return False
for desc in obj.own.values():
if desc['configurable']:
return False
return True
def isFrozen(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.isFrozen called on non-object')
if obj.extensible:
return False
for desc in obj.own.values():
if desc['configurable']:
return False
if is_data_descriptor(desc) and desc['writable']:
return False
return True
def isExtensible(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.isExtensible called on non-object')
return obj.extensible
def keys(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.keys called on non-object')
return [e for e,d in obj.own.iteritems() if d.get('enumerable')]
# add methods attached to Object constructor
fill_prototype(Object, ObjectMethods, default_attrs)
# add constructor to prototype
fill_in_props(ObjectPrototype, {'constructor':Object}, default_attrs)
# add prototype property to the constructor.
Object.define_own_property('prototype', {'value': ObjectPrototype,
'enumerable': False,
'writable': False,
'configurable': False})
# some utility functions:
def ToPropertyDescriptor(obj): # page 38 (50 absolute)
if obj.TYPE!='Object':
raise MakeError('TypeError', 'Can\'t convert non-object to property descriptor')
desc = {}
if obj.has_property('enumerable'):
desc['enumerable'] = obj.get('enumerable').to_boolean().value
if obj.has_property('configurable'):
desc['configurable'] = obj.get('configurable').to_boolean().value
if obj.has_property('value'):
desc['value'] = obj.get('value')
if obj.has_property('writable'):
desc['writable'] = obj.get('writable').to_boolean().value
if obj.has_property('get'):
cand = obj.get('get')
if not (cand.is_undefined() or cand.is_callable()):
raise MakeError('TypeError', 'Invalid getter (it has to be a function or undefined)')
desc['get'] = cand
if obj.has_property('set'):
cand = obj.get('set')
if not (cand.is_undefined() or cand.is_callable()):
raise MakeError('TypeError', 'Invalid setter (it has to be a function or undefined)')
desc['set'] = cand
if ('get' in desc or 'set' in desc) and ('value' in desc or 'writable' in desc):
raise MakeError('TypeError', 'Invalid property. A property cannot both have accessors and be writable or have a value.')
return desc
|
Immortalin/python-for-android | refs/heads/master | python3-alpha/python3-src/Doc/includes/mp_webserver.py | 48 | #
# Example where a pool of http servers share a single listening socket
#
# On Windows this module depends on the ability to pickle a socket
# object so that the worker processes can inherit a copy of the server
# object. (We import `multiprocessing.reduction` to enable this pickling.)
#
# Not sure if we should synchronize access to `socket.accept()` method by
# using a process-shared lock -- does not seem to be necessary.
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import os
import sys
from multiprocessing import Process, current_process, freeze_support
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
if sys.platform == 'win32':
import multiprocessing.reduction # make sockets pickable/inheritable
def note(format, *args):
sys.stderr.write('[%s]\t%s\n' % (current_process().name, format % args))
class RequestHandler(SimpleHTTPRequestHandler):
# we override log_message() to show which process is handling the request
def log_message(self, format, *args):
note(format, *args)
def serve_forever(server):
note('starting server')
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def runpool(address, number_of_processes):
# create a single server object -- children will each inherit a copy
server = HTTPServer(address, RequestHandler)
# create child processes to act as workers
for i in range(number_of_processes - 1):
Process(target=serve_forever, args=(server,)).start()
# main process also acts as a worker
serve_forever(server)
def test():
DIR = os.path.join(os.path.dirname(__file__), '..')
ADDRESS = ('localhost', 8000)
NUMBER_OF_PROCESSES = 4
print('Serving at http://%s:%d using %d worker processes' % \
(ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES))
print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32'])
os.chdir(DIR)
runpool(ADDRESS, NUMBER_OF_PROCESSES)
if __name__ == '__main__':
freeze_support()
test()
|
tndatacommons/tndata_backend | refs/heads/master | tndata_backend/config/settings/base.py | 2 | """ Django settings file this project.
This file contains settings that are usable for both a production and a
development environment. You'll need to export the appropriate values as
environment variables, however. The following environment variables should
be set prior to running the project:
* DEBUG -- 1 or 0, defines whether or not we're in debug mode.
* STAGING -- 1 or 0, defines whether or not we're in a staging environment.
* SECRET_KEY -- string to use for django's secret key
* ADMIN_NAME -- Name of the admin user.
* ADMIN_EMAIL -- Email of the admin user.
* MANAGER_NAME -- Name of a Manager.
* MANAGER_EMAIL -- Email of a Manager
* DEFAULT_EMAIL -- Default email address for transactional email.
* EMAIL_SUBJECT_PREFIX -- prefix for your emails
* EMAIL_HOST -- host of your smtp server
* EMAIL_HOST_USER -- smtp user
* EMAIL_HOST_PASSWORD -- email password
* EMAIL_USE_TLS -- whether or not to use TLS
* EMAIL_USE_SSL -- whether or not to use SSL
* EMAIL_PORT -- smtp server port
* ALLOWED_HOSTS -- semicolon-separated string of allowed hosts, e.g.
"localhost;127.0.0.1;.example.com"
* SITE_DOMAIN -- fully qualified domain name for your site, e.g. "example.com"
* HAYSTACK_URL -- connection to haystack; e.g. "http://127.0.0.1:9200/"
* HAYSTACK_INDEX_NAME -- index name to use for haystack
* GCM_API_KEY -- API key for google cloud messaging
* GCM_IOS_API_KEY -- API key for google cloud messaging (for iOS)
* GOOGLE_OAUTH_CLIENT_ID -- API key for OAuth with Google
* GOOGLE_OAUTH_CLIENT_SECRET -- Secret for OAuth with Google
* ANDROID_OAUTH_CLIENT_ID -- Same as `GOOGLE_OAUTH_CLIENT_ID`, but the ID we're
using on the Android app.
* IOS_OAUTH_CLIENT_ID -- Same as `GOOGLE_OAUTH_CLIENT_ID`, but the ID we're
using on the iOS app.
* APNS_CERT_PATH -- Path the the Apple Certificate for APNS
* DB_NAME -- Database name
* DB_USER -- Database user
* DB_PASSWORD -- database password
* DB_HOST -- database host
* DB_PORT -- database port
* REDIS_PASSWORD -- Redis password
* REDIS_PORT -- Redis port
* REDIS_HOST -- Redis host, e.g. "127.0.0.1"
* REDIS_CACHE_DB -- The redis DB to use for the cache.
* REDIS_METRICS_DB -- The redis DB to use for metrics.
* REDIS_RQ_DB -- The redis DB to use for rq task queues.
- prod / rq --> 0
- prod / cache --> 1
- prod / metrics --> 2
- staging / cache --> 3
- staging / metrics --> 4
- staging / rq --> 5
* PLAY_APP_URL -- Link to the downloadable app on the play store.
* IOS_APP_URL -- Link to the downloadable app on the apple app store.
* SLACK_API_TOKEN -- slack api token
* SLACK_CHANNEL -- chanel in which you want slack to post e.g. "#general"
* SLACK_USERNAME -- username that will be used for posts to slack
* MEDIA_ROOT -- path to your media uploads (only for local dev if AWS is not used)
* AWS_USER -- AWS user
* AWS_STORAGE_BUCKET_NAME -- S3 bucket name
* AWS_ACCESS_KEY_ID -- AWS access key
* AWS_SECRET_ACCESS_KEY -- AWS secret
* CRONLOG_KEY -- A secret value for us to use the cronlog logger DB.
"""
from ipaddress import IPv4Network, IPv4Address
import os
import sys
class CIDRS(list):
"""Use the ipaddress module to create lists of ip networks that we check
against.
e.g. INTERNAL_IPS = CIDR_LIST(['127.0.0.1', '192.168.0.0/16'])
Inspired by https://djangosnippets.org/snippets/1862/
"""
def __init__(self, cidrs):
self.cidrs = []
for cidr in cidrs:
self.cidrs.append(IPv4Network(cidr))
def __contains__(self, ip):
# in dev, we get this weird input, where IP is "b''".
# this hack is a fix for that.
if len(ip) < 7:
return True
return any([IPv4Address(ip) in net for net in self.cidrs])
SECRET_KEY = os.environ.get('SECRET_KEY')
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
STAGING = bool(int(os.environ.get('STAGING', 0)))
TESTING = sys.argv[1:2] == ['test']
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Admins & Managers for the site.
ADMINS = [(os.environ.get('ADMIN_NAME'), os.environ.get('ADMIN_EMAIL'))]
MANAGERS = ADMINS + [(os.environ.get('ADMIN_NAME'), os.environ.get('ADMIN_EMAIL'))]
# Email
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_EMAIL')
SERVER_EMAIL = DEFAULT_FROM_EMAIL
EMAIL_SUBJECT_PREFIX = os.environ.get('EMAIL_SUBJECT_PREFIX')
if os.environ.get('EMAIL_HOST'):
# 3rd-party email delivery.
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = bool(int(os.environ.get('EMAIL_USE_TLS', 1)))
EMAIL_USE_SSL = bool(int(os.environ.get('EMAIL_USE_SSL', 0)))
EMAIL_PORT = os.environ.get('EMAIL_PORT')
else:
# Local email delivery
EMAIL_HOST = 'localhost'
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_PORT = 1025
# The site's FQDN and URL. Used for building links in email.
SITE_DOMAIN = os.environ.get('SITE_DOMAIN')
if DEBUG:
SITE_URL = "http://{0}".format(SITE_DOMAIN)
else:
SITE_URL = "https://{0}".format(SITE_DOMAIN)
# The environment variable for allowed hosts should be a ;-separated string
# of domains and/or ip addresses, e.g. "localhost;127.0.0.1;example.com"
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '').split(";")
# NOTE: this is the production setting. It uses the cached.Loader.
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'OPTIONS': {
'debug': DEBUG,
'loaders': [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
],
'context_processors': (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"utils.context_processors.staging",
"utils.context_processors.site_domain",
"utils.context_processors.google_client_id",
),
},
},
]
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd-party apps
# 'axes',
'badgify',
'badgify_api',
'channels',
'corsheaders',
'crispy_forms',
'crispy_forms_foundation',
'dashboard',
'django_extensions',
'django_rq',
'haystack',
'jsonfield',
'recurrence',
'redis_metrics',
'rest_framework',
'rest_framework.authtoken',
'storages',
'staticflatpages',
'waffle',
# custom apps
'chat',
'goals',
'notifications',
'officehours',
'questions',
'rewards',
'survey',
'userprofile',
'utils',
)
# cronlog
CRONLOG_KEY = os.environ.get('CRONLOG_KEY')
# django-haystack settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': os.environ.get('HAYSTACK_URL'),
'INDEX_NAME': os.environ.get('HAYSTACK_INDEX_NAME'),
},
}
# Google OAuth
GOOGLE_OAUTH_CLIENT_ID = os.environ.get('GOOGLE_OAUTH_CLIENT_ID')
GOOGLE_OAUTH_CLIENT_SECRET = os.environ.get('GOOGLE_OAUTH_CLIENT_SECRET')
ANDROID_OAUTH_CLIENT_ID = os.environ.get('ANDROID_OAUTH_CLIENT_ID')
IOS_OAUTH_CLIENT_ID = os.environ.get('IOS_OAUTH_CLIENT_ID')
# Settings for Google Cloud Messaging.
GCM = {
'API_KEY': os.environ.get('GCM_API_KEY'),
'IOS_API_KEY': os.environ.get('GCM_IOS_API_KEY'),
}
# Settings for APNS
APNS_CERT_PATH = os.environ.get('APNS_CERT_PATH')
AUTHENTICATION_BACKENDS = (
'utils.backends.EmailAuthenticationBackend',
'utils.backends.EmailAndTokenBackend',
'django.contrib.auth.backends.ModelBackend',
)
MIDDLEWARE_CLASSES = (
'utils.middleware.IgnoreRequestMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'waffle.middleware.WaffleMiddleware',
'utils.middleware.TimezoneMiddleware',
'utils.middleware.ResponseForbiddenMiddleware',
'utils.middleware.APIMetricsMiddleware',
'staticflatpages.middleware.StaticFlatpageFallbackMiddleware',
'utils.middleware.DebugMedia404Middleware',
)
ROOT_URLCONF = 'config.urls'
WSGI_APPLICATION = 'config.wsgi.application'
# Local Database settings.
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASSWORD'),
'HOST': os.environ.get('DB_HOST'),
'PORT': os.environ.get('DB_PORT'),
}
}
# Caching with a redis backend
CACHE_TIMEOUT = 60 * 5 # 5-minute cache timeout
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD')
REDIS_PORT = os.environ.get('REDIS_PORT')
REDIS_HOST = os.environ.get('REDIS_HOST')
REDIS_CACHE_DB = int(os.environ.get('REDIS_CACHE_DB'))
REDIS_CACHE_URL = 'redis://:{password}@{host}:{port}/{db}'.format(
password=REDIS_PASSWORD,
host=REDIS_HOST,
port=REDIS_PORT,
db=REDIS_CACHE_DB
)
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": REDIS_CACHE_URL,
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"SOCKET_CONNECT_TIMEOUT": 5, # in seconds
"SOCKET_TIMEOUT": 5, # in seconds
"IGNORE_EXCEPTIONS": not DEBUG, # True in production
},
'TIMEOUT': CACHE_TIMEOUT,
}
}
if DEBUG:
# For development, we can use a dummy or a local-memory cache.
CACHES = {
'default': {
#'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'snowflake'
}
}
# django-redis-metrics: http://django-redis-metrics.readthedocs.org/en/latest/
REDIS_METRICS_DB = int(os.environ.get('REDIS_METRICS_DB'))
REDIS_METRICS = {
'HOST': REDIS_HOST,
'PORT': REDIS_PORT,
'DB': REDIS_METRICS_DB,
'PASSWORD': REDIS_PASSWORD,
'SOCKET_TIMEOUT': None,
'SOCKET_CONNECTION_POOL': None,
'MIN_GRANULARITY': 'daily',
'MAX_GRANULARITY': 'yearly',
'MONDAY_FIRST_DAY_OF_WEEK': False,
}
# Use the Redis cache as a session backend: https://goo.gl/U0xajQ
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
SESSION_CACHE_ALIAS = "default"
# channels config
REDIS_CHANNELS_URL = 'redis://:{password}@{host}:{port}/0'.format(
password=REDIS_PASSWORD,
host=REDIS_HOST,
port=REDIS_PORT,
)
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": [(REDIS_CHANNELS_URL)],
},
"ROUTING": "config.routing.channel_routing",
},
# "default": {
# "BACKEND": "asgiref.inmemory.ChannelLayer",
# "ROUTING": "config.routing.channel_routing",
# },
}
# django.contrib.auth settings.
LOGIN_URL = 'login' # Named url patter for the built-in auth
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = '/'
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
TIME_FORMAT = "g:ia e" # 5:30pm CDT
DATE_FORMAT = "N j, Y" # Jan 3, 2015
DATETIME_FORMAT = "N j, Y g:iaO e" # Jan. 3, 2015 5:30pm+200 CDT
SHORT_DATE_FORMAT = "m/d/Y" # 01/03/2015
SHORT_DATETIME_FORMAT = "H:iO" # 17:30+200
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Messages tags: Updated to represent Foundation alert classes.
from django.contrib.messages import constants as message_constants
MESSAGE_TAGS = {
message_constants.DEBUG: 'debug secondary',
message_constants.INFO: 'info',
message_constants.SUCCESS: 'success',
message_constants.WARNING: 'warning',
message_constants.ERROR: 'error alert'
}
# Rainbow-tests
TEST_RUNNER = 'rainbowtests.test.runner.RainbowDiscoverRunner'
RAINBOWTESTS_HIGHLIGHT_PATH = '/vagrant/tndata_backend/'
RAINBOWTESTS_SHOW_MESSAGES = False
# django-axes
# AXES_LOGIN_FAILURE_LIMIT = 1
# AXES_LOCK_OUT_AT_FAILURE = False # Don't lock accounts.
# AXES_VERBOSE = True
# AXES_USERNAME_FORM_FIELD = 'email'
# rq & django_rq config, See:
# - http://python-rq.org/docs/workers/
# - https://github.com/ui/django-rq
# NOTE: To run the worker, do: python manage.py rqworker default
REDIS_RQ_DB = int(os.environ.get('REDIS_RQ_DB'))
RQ_QUEUES = {
'default': {
'HOST': REDIS_HOST,
'PORT': REDIS_PORT,
'DB': REDIS_RQ_DB,
'PASSWORD': REDIS_PASSWORD,
'DEFAULT_TIMEOUT': 360,
'ASYNC': False if DEBUG else True
},
}
# Crispy forms
CRISPY_TEMPLATE_PACK = 'foundation-5'
CRISPY_ALLOWED_TEMPLATE_PACKS = ('uni_form', 'foundation-5', 'mdl')
# Django Rest Framework
REST_FRAMEWORK = {
'PAGE_SIZE': 25,
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'utils.api.BrowsableAPIRendererWithoutForms',
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
),
'DEFAULT_THROTTLE_RATES': {
'anon': '1000/day',
'user': '10000/day'
},
'VERSION_PARAM': 'version',
'DEFAULT_VERSION': '2',
'ALLOWED_VERSIONS': ['1', '2'],
'DEFAULT_VERSIONING_CLASS': 'utils.api.DefaultQueryParamVersioning',
'DATETIME_FORMAT': '%Y-%m-%d %H:%M:%S%z', # 2015-04-28 03:47:25+0000
}
# Play Store Link for the mobile app.
# https://developers.google.com/api-client-library/python/start/get_started
PLAY_APP_URL = os.environ.get('PLAY_APP_URL')
IOS_APP_URL = os.environ.get('IOS_APP_URL')
# django-cors-headers: https://github.com/ottoyiu/django-cors-headers/
CORS_ORIGIN_ALLOW_ALL = False
CORS_URLS_REGEX = r'^/api/.*$'
CORS_ORIGIN_WHITELIST = (
'app.tndata.org',
'staging.tndata.org',
'tndata.ngrok.io',
'brad.ngrok.io',
'localhost',
'127.0.0.1',
)
# Ignore these bad host headers; This circumvents the SuspiciousOperation
# exceptions that would otherwise get raised.
IGNORE_BAD_HOST_HEADERS = [
'proxyradar.com', # Stupid check.proxyradar.com/azenv.php
]
# Slack tokens: https://api.slack.com/web
SLACK_API_TOKEN = os.environ.get('SLACK_API_TOKEN')
SLACK_CHANNEL = os.environ.get('SLACK_CHANNEL')
SLACK_USERNAME = os.environ.get('SLACK_USERNAME')
# Media Uploads, default
MEDIA_ROOT = os.environ.get('MEDIA_ROOT')
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = os.path.join(BASE_DIR, 'collected_static_files')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# Amazon S3 & django-storages config
AWS_USER = os.environ.get('AWS_USER')
AWS_HEADERS = { # http://developer.yahoo.com/performance/rules.html#expires
'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',
'Cache-Control': 'max-age=94608000',
}
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_BUCKET_NAME = AWS_STORAGE_BUCKET_NAME # for sync_s3
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
SYNC_S3_PREFIX = 'media' # only include our media files when using sync_s3
# Tell django-storages that when coming up with the URL for an item in S3
# storage, keep it simple - just use this domain plus the path. (If this isn't
# set, things get complicated). This controls how the `static` template tag
# from `staticfiles` gets expanded, if you're using it.
#
# We also use it in the next setting.
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
# Tell the staticfiles app to use S3Boto storage when writing the collected
# static files (when you run `collectstatic`).
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'utils.storages.StaticStorage'
# This is used by the `static` template tag from `static`, if you're using that.
# Or if anything else refers directly to STATIC_URL. So it's safest to always
# set it.
MEDIAFILES_LOCATION = 'media'
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION)
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
DEFAULT_FILE_STORAGE = 'utils.storages.MediaStorage'
# Additional Goal app Settings
PROGRESS_HISTORY_DAYS = 30 # Number of days back to generate progress history
# django-querycount settings
QUERYCOUNT = {
'THRESHOLDS': {
'MEDIUM': 50,
'HIGH': 200,
'MIN_TIME_TO_LOG': 0,
'MIN_QUERY_COUNT_TO_LOG': 0
},
'IGNORE_PATTERNS': [r'^/static', r'^/media', r'^/admin'],
'DISPLAY_DUPLICATES': 1,
}
# Settings for DEBUG / local development
# --------------------------------------
if DEBUG:
INSTALLED_APPS = INSTALLED_APPS + (
'debug_toolbar',
'querycount',
)
# django-cors-headers: https://github.com/ottoyiu/django-cors-headers/
CORS_ORIGIN_ALLOW_ALL = True
# debug_toolbar
# -------------
DEBUG_TOOLBAR_PATCH_SETTINGS = False # Do not adjust settings automatically
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
# XXX: This panel is _really_ slow if you have a node_modules directory
# XXX: buried in your static files folders somewhere.
#'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
MIDDLEWARE_CLASSES = (
'querycount.middleware.QueryCountMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
) + MIDDLEWARE_CLASSES
INTERNAL_IPS = CIDRS(['127.0.0.1', '192.168.0.0/16', '10.0.0.0/16'])
# Just like production, but without the cached template loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
# Disable AWS/S3 (for when working on js/css locally)
# ---------------------------------------------------
STATIC_ROOT = "collected_static_files"
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage"
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
MEDIA_ROOT = "/webapps/tndata_backend/uploads/"
MEDIA_URL = "/media/"
DEFAULT_FILE_STORAGE = "django.core.files.storage.FileSystemStorage"
# Logging Config
# --------------
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'django': {
'format': 'django: %(message)s',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
# 'logging.handlers.SysLogHandler': {
# 'level': 'DEBUG',
# 'class': 'logging.handlers.SysLogHandler',
# 'facility': 'local7',
# 'formatter': 'django',
# },
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'slack-error': {
'level': 'ERROR',
'api_key': SLACK_API_TOKEN,
'class': 'slacker_log_handler.SlackerLogHandler',
'channel': '#logs',
'username': SITE_DOMAIN,
},
'slack-info': {
'level': 'INFO',
'api_key': SLACK_API_TOKEN,
'class': 'slacker_log_handler.SlackerLogHandler',
'channel': '#logs',
'username': SITE_DOMAIN,
},
},
'loggers': {
# 'loggly_logs': {
# 'handlers': ['logging.handlers.SysLogHandler'],
# 'propagate': True,
# 'format': 'django: %(message)s',
# 'level': 'DEBUG',
# },
'django.request': {
'handlers': ['mail_admins', 'slack-error', 'slack-info'],
'level': 'ERROR',
'propagate': True,
},
}
}
if DEBUG:
# No logging in dev
LOGGING = {}
|
duanhjlt/gyp | refs/heads/master | test/mac/gyptest-framework-headers.py | 344 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that mac_framework_headers works properly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
# TODO(thakis): Make this work with ninja, make. http://crbug.com/129013
test = TestGyp.TestGyp(formats=['xcode'])
CHDIR = 'framework-headers'
test.run_gyp('test.gyp', chdir=CHDIR)
# Test that headers are installed for frameworks
test.build('test.gyp', 'test_framework_headers_framework', chdir=CHDIR)
test.built_file_must_exist(
'TestFramework.framework/Versions/A/TestFramework', chdir=CHDIR)
test.built_file_must_exist(
'TestFramework.framework/Versions/A/Headers/myframework.h', chdir=CHDIR)
# Test that headers are installed for static libraries.
test.build('test.gyp', 'test_framework_headers_static', chdir=CHDIR)
test.built_file_must_exist('libTestLibrary.a', chdir=CHDIR)
test.built_file_must_exist('include/myframework.h', chdir=CHDIR)
test.pass_test()
|
opencord/voltha | refs/heads/master | voltha/adapters/tellabs_olt/tellabs_resource_manager.py | 1 | # Copyright 2018-present Tellabs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from voltha.adapters.openolt.openolt import OpenOltResourceMgr, OpenOltPlatform
class TellabsResourceManager(OpenOltResourceMgr):
def __init__(self, device_id, host_and_port, extra_args, device_info):
super(TellabsResourceManager, self).__init__(device_id, host_and_port, extra_args, device_info)
@property
def max_uni_id_per_onu(self):
return 3 # OpenOltPlatform.MAX_UNIS_PER_ONU-1 |
sgargan/ansible | refs/heads/devel | v2/test/plugins/__init__.py | 7690 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
sstocker46/OctoPrint | refs/heads/master | tests/util/__init__.py | 47 | # coding=utf-8
"""
Unit tests for ``octoprint.util``.
"""
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
|
fangeugene/the-blue-alliance | refs/heads/master | datafeeds/usfirst_event_offseason_list_parser.py | 1 | from datetime import datetime
import urlparse
import logging
from consts.event_type import EventType
from datafeeds.parser_base import ParserBase
from helpers.event_helper import EventHelper
class UsfirstEventOffseasonListParser(ParserBase):
@classmethod
def parse(self, html):
"""
Parse the list of events from USFIRST. This provides us with basic
information about events and is how we first discover them.
"""
from BeautifulSoup import BeautifulSoup
events = list()
soup = BeautifulSoup(html,
convertEntities=BeautifulSoup.HTML_ENTITIES)
for table in soup.findAll('table'):
trs = table.find('tbody').findAll('tr')
for tr in trs:
tds = tr.findAll('td')
event = dict()
for td in tds:
if td.get('class') and td["class"].count('views-field-title') > 0:
event["first_eid"] = td.a["href"].split("/")[-1]
event["name"] = " ".join(td.a.text.split(" ")[:-1])
event["state_prov"] = str(td.a.text.split(" ")[-1]).translate(None, "()")
for span in td.findAll('span'):
if span["class"].count("date-display-start") > 0:
event["start_date"] = datetime.strptime(span["content"][:10], "%Y-%m-%d")
if span["class"].count("date-display-end") > 0:
event["end_date"] = datetime.strptime(span["content"][:10], "%Y-%m-%d")
if span["class"].count("date-display-single") > 0:
event["start_date"] = datetime.strptime(span["content"][:10], "%Y-%m-%d")
event["end_date"] = datetime.strptime(span["content"][:10], "%Y-%m-%d")
event["event_type_enum"] = EventType.OFFSEASON
events.append(event)
return events, False
|
lfsimoes/beam_paco__gtoc5 | refs/heads/master | paco/paco.py | 1 | #
# Copyright (c) 2017 Luis F. Simoes (github: @lfsimoes)
#
# Licensed under the MIT License. See the LICENSE file for details.
from collections import deque
import numpy as np
# ==================================== ## ==================================== #
class tsp_path(object):
"""
Handler for Travelling Salesman Problem (TSP) solutions built by P-ACO.
Implements tour construction, evaluation, and heuristic estimates.
To solve different combinatorial problems, create a new class exposing the
same interface.
"""
# indication of whether edge costs/weights are symmetric
# (a transition between nodes A and B is as good as a transition in the
# opposite direction, and the pheromone matrix should value both as such)
symmetric = True
# indication of whether the path handler, via the `.tabu()` method, allows
# for nodes already visited in a path to be revisited
allows_revisits = False
def __init__(self, dist_matrix, random_state=None):
# weights matrix with distances between cities
self.distances = np.array(dist_matrix)
assert self.distances.shape[0] == self.distances.shape[1], \
'non-square weights matrix'
# default heuristic values for TSP problems: inverse of city distances
# (assumes distance values are greater than 1.0)
self.weights = self.distances.copy()
# temporarily set diagonal to 1.0 (prevent divisions by 0 below)
self.weights.ravel()[::self.weights.shape[1]+1] = 1.0
self.weights = 1.0 / self.weights
# set diagonal to 0.0
self.weights.ravel()[::self.weights.shape[1]+1] = 0.0
self.nr_nodes = self.distances.shape[0]
self.random = np.random if random_state is None else random_state
def initialize(self, aco):
"ACO is starting a new run. Reset all run state variables."
pass
def heuristic(self, ant_path):
"Heuristic used to estimate the quality of node transitions."
return self.weights[ant_path[-1]]
def start(self):
"Start a new path through the graph."
# path starts at a randomly chosen node/city
return [self.random.choice(self.nr_nodes)]
def tabu(self, ant_path):
"List of nodes to exclude from consideration as future nodes to visit."
# revisits are forbidden in TSP, so nodes already visited are now tabu
return self.get_nodes(ant_path)
def add_node(self, ant_path, node):
"Extend an ant's path with a new visited node."
ant_path.append(node)
def get_nodes(self, ant_path):
"Get the list of nodes visited so far along the ant's path."
return ant_path
def get_links(self, ant_path):
"Get an iterator over node transitions performed along an ant's path."
path_nodes = self.get_nodes(ant_path)
for ij in zip(path_nodes[:-1], path_nodes[1:]):
yield ij
# link the last node back to the first one
yield path_nodes[-1], path_nodes[0]
def stop(self, ant_path, force_stop=False):
"Indicate whether an ant's path should be terminated."
# A TSP tour has ended when all nodes have been visited.
# If force_stop==True, a signal is then being sent that an incomplete
# path is being forcibly terminated. This can be used to trigger
# eventual clean up operations.
return (len(ant_path) == self.nr_nodes) or force_stop
def evaluate(self, ant_path):
"Cost function used to evaluate an ant's path through the graph."
# TSP evaluation: total distance travelled (cumulative path length)
cost = 0.0
for (i, j) in self.get_links(ant_path):
cost += self.distances[i, j]
return cost
def sort(self, evaluated_paths, r=None):
"""
Given a list of `evaluated_paths` (a list of (cost, ant_path) tuples),
return a list with the top `r` paths (or all, if unspecified), sorted by
decreasing order of quality (increasing order of total distance
travelled).
"""
if r == 1:
return [min(evaluated_paths, key=lambda i:i[0])]
return sorted(evaluated_paths, key=lambda i:i[0])[:r]
def copy(self, ant_path):
"Create a copy of a given ant path."
return ant_path.copy()
# ==================================== ## ==================================== #
class paco(object):
"""
Population-based Ant Colony Optimization (P-ACO).
Introduced by Michael Guntsch & Martin Middendorf (2002-2004).
References
==========
[1] http://dx.doi.org/10.1007/3-540-46004-7_8
[2] http://dx.doi.org/10.1007/3-540-45724-0_10
[3] http://dx.doi.org/10.1007/3-540-36970-8_33
[4] http://d-nb.info/1013929756
[5] http://iridia.ulb.ac.be/IridiaTrSeries/link/IridiaTr2011-006.pdf
http://iridia.ulb.ac.be/supp/IridiaSupp2011-010/
"""
def __init__(self, nr_nodes, path_handler, pop_size=3, ants_per_gen=25,
pher_init=None, pher_max=1.0, alpha=1., beta=5.,
prob_greedy=0.9, use_elitism=True, random_state=None,
**kwargs):
# handler for solutions built by this P-ACO instance
self.path = path_handler
# number of combinatorial elements being assembled into sequences
self.nr_nodes = nr_nodes
# number of "champion" ants logged in the pheromone matrix (k)
self.pop_size = pop_size
# number of ants spawned per generation (m)
self.ants_per_gen = ants_per_gen
# minimum/initial pheromone concentration on an edge (\tau_{init})
# (implements the convention of having rows/columns of initial
# pheromone values summing to 1.0)
self.pher_min = pher_init
if self.pher_min is None:
non_zero_cols = nr_nodes - (0 if self.path.allows_revisits else 1)
self.pher_min = 1.0 / non_zero_cols
# maximum pheromone concentration on an edge (\tau_{max})
self.pher_max = pher_max
# amounth of pheromone one ant lays down on an edge of the graph
self.pher_incr = (self.pher_max - self.pher_min) / self.pop_size
# in symmetric problems ants lay the same total amount of pheromone, but
# split along both directions (ij and ji). NOTE: total pheromone in a
# link may then range in [pher_min, pher_min + pop_size * pher_incr],
# and not in [pher_min, pher_max / 2] as indicated in [1] and [4].
self.pher_incr /= (2.0 if self.path.symmetric else 1.0)
# exponents indicating the relative importance of pheromone (alpha)
# and heuristic (beta) contributions to nodes' selection probabilities
assert alpha > 0.0 or beta > 0.0, \
'At least one of `alpha`/`beta` must be defined.'
self.alpha = alpha
self.beta = beta
# probabiliy of an ant greedily/deterministically choosing the next
# node to visit (q_0)
self.prob_greedy = prob_greedy
# Indication of whether one slot in the population is reserved for the
# best solution seen so far. Elitism implemented as specified in [2].
self.use_elitism = bool(use_elitism)
self.random = np.random if random_state is None else random_state
self._ph = np.zeros(self.nr_nodes)
self.initialize()
def initialize(self):
"Reset all run state variables, and prepare to start a new run."
# full paths taken by the ants that have deposited pheromones
pop_len = self.pop_size - (1 if self.use_elitism else 0)
self.population = deque(maxlen=pop_len)
# Edges out from each given node along which ants have previously
# deposited pheromones. Example: self.popul_pheromone[i] = [j,k,j]
# indicates 3 ants have previously visited node i, two of which
# moved on to j, while a third moved on to k.
self.popul_pheromone = [deque() for i in range(self.nr_nodes)]
if self.use_elitism:
self.elite = deque(maxlen=1)
self.elite_pheromone = [deque() for i in range(self.nr_nodes)]
self.nr_gen = 0
self.generation = None
self.best = None
self.path.initialize(self)
def pheromone(self, ant_path=None, current_node=None):
"""
Obtain the pheromone contribution to the probability distribution by
which a successor node for the current `ant_path` is to be chosen.
Produces the pheromone matrix row containing all pheromones deposited by
previous ants, in their transitions from the node presently occupied by
the considered ant.
Enforces tabus: nodes the path handler indicates should be excluded from
consideration as successor from `ant_path` receive a probability of 0.0.
May alternatively be called by specifying only the `current_node`.
"""
if current_node is None:
current_node = self.path.get_nodes(ant_path)[-1]
tabu = self.path.tabu(ant_path)
else:
assert ant_path is None, 'Redundant arguments given.'
tabu = [] if self.path.allows_revisits else [current_node]
# ph = np.zeros(self.nr_nodes) + self.pher_min
ph = self._ph
ph.fill(self.pher_min)
for s in self.popul_pheromone[current_node]:
ph[s] += self.pher_incr
if self.use_elitism:
for s in self.elite_pheromone[current_node]:
ph[s] += self.pher_incr
# give a 0.0 pheromone value to nodes that should be excluded from
# consideration in the choice of successor node
ph[list(tabu)] = 0.0
return ph
def pheromone_matrix(self):
"""
Generates the full pheromone matrix, by stacking the rows produced
in calls to .pheromone().
"""
rows = [
self.pheromone(current_node=i).copy()
for i in range(self.nr_nodes)
]
return np.vstack(rows)
def _get_links(self, ant_path):
"""
Get an iterator over the node transitions in a unit of information
stored in the population (by default: a single ant's path).
"""
return self.path.get_links(ant_path)
def lay_down_pheromone(self, ant_path, update_elite=False):
"Deposit pheromone along the path walked by an ant."
# pick the population that is to be updated (the main one, or the elite)
if update_elite:
population, pheromone = self.elite, self.elite_pheromone
else:
population, pheromone = self.population, self.popul_pheromone
# population behaves as a FIFO-queue: oldest ant is removed
# in case population size limit has been reached.
# Implements the "Age" population update strategy from P-ACO's papers.
if len(population) == population.maxlen:
ant_out = population.popleft()
for (i, j) in self._get_links(ant_out):
n = pheromone[i].popleft()
# assert n == j, 'removed unexpected pheromone'
if self.path.symmetric:
n = pheromone[j].popleft()
# assert n == i, 'removed unexpected pheromone'
# add new `ant_path`
population.append(ant_path)
for (i, j) in self._get_links(ant_path):
pheromone[i].append(j)
if self.path.symmetric:
pheromone[j].append(i)
def ant_walk(self):
"Create an ant, and have it travel the graph."
ant_path = self.path.start()
while not self.path.stop(ant_path):
p = None
if self.alpha > 0.0:
p = self.pheromone(ant_path)**self.alpha
if self.beta > 0.0:
b = self.path.heuristic(ant_path)**self.beta
p = b if p is None else (p * b)
if self.random.rand() < self.prob_greedy:
# greedy selection
next_node = np.argmax(p)
else:
# probabilistic selection
p /= p.sum()
next_node = self.random.choice(self.nr_nodes, p=p)
self.path.add_node(ant_path, next_node)
return ant_path
def build_generation(self):
'Have a "generation" of ants travel the graph.'
self.generation = []
for _ in range(self.ants_per_gen):
path = self.ant_walk()
cost = self.path.evaluate(path)
self.generation.append((cost, path))
self.process_generation()
def process_generation(self):
"""
Process the most recent generation of ant walks:
* identify the generation's most successful ant;
* have it lay down pheromones along the path it took;
* keep track of the best ant path seen so far (self.best);
* update the elitist solution (and its pheromones), if applicable.
"""
champion = self.path.sort(self.generation, r=1)[0]
if self.alpha > 0.0:
self.lay_down_pheromone(champion[1], update_elite=False)
if self.best is None:
self.best = champion
else:
self.best = self.path.sort([self.best, champion], r=1)[0]
# if self.best (best ant path seen so far) now holds the current
# generation's champion, then update the elitist solution.
# In the current generation, the the same ant path will then then lay
# down pheromone both in the main population, and in the elite one.
# This is in agreement with the specification in [2].
if self.alpha > 0.0 and self.best is champion:
self.lay_down_pheromone(champion[1], update_elite=True)
def solve(self, nr_generations=10000, reinitialize=False):
"""
Solve the combinatorial problem. Over a span of multiple generations,
ants walk through the graph, depositing pheromones which then influence
the paths taken in subsequent walks.
"""
if reinitialize:
self.initialize()
for g in range(nr_generations):
self.nr_gen += 1
self.build_generation()
return self.best
# ==================================== ## ==================================== #
class beam_paco(paco):
"""
Beam P-ACO: hybridization of P-ACO with Beam Search.
"""
def __init__(self, *args, beam_width=None, branch_factor=None, **kwargs):
# `beam_width`, the number of solutions kept per path depth, is enforced
# via the number of `ants_per_gen`. Should the argument be specified
# with this alias, it's copied to `ants_per_gen`, possibly overwriting
# a redundant/inconsistent specification in it.
if beam_width is not None:
kwargs['ants_per_gen'] = beam_width
super(beam_paco, self).__init__(*args, **kwargs)
# nr. of successor nodes an ant should branch into per step of its path
# (defaults to 2 * pop_size, if unspecified, ensuring at least pop_size
# successors are generated without using pheromone information)
if branch_factor is None:
branch_factor = 2 * self.pop_size
self.branch_factor = branch_factor
def ant_walk(self, ant_path=None):
"""
Have an ant take a step in its path through the graph, towards multiple
successor nodes.
"""
if ant_path is None:
ant_path = self.path.start()
# build nodes' selection probability distribution
p = None
if self.alpha > 0.0:
p = self.pheromone(ant_path)**self.alpha
if self.beta > 0.0:
b = self.path.heuristic(ant_path)**self.beta
p = b if p is None else (p * b)
# select the `next_nodes` to branch into
nz = np.nonzero(p)[0]
if len(nz) <= self.branch_factor:
# if there are fewer than `branch_factor` nodes that can be branched
# into (for instance, if most nodes are tabu), then branch into all
# available ones, and skip computations below
next_nodes = nz
elif self.random.rand() < self.prob_greedy:
# greedy selection
# (identify indices into the `branch_factor` highest values in `p`)
next_nodes = np.argpartition(-p, self.branch_factor - 1)
next_nodes = next_nodes[:self.branch_factor]
else:
# probabilistic selection
p /= p.sum()
next_nodes = self.random.choice(
self.nr_nodes, size=self.branch_factor, replace=False, p=p)
# branch the ant's path into all successor nodes in `next_nodes`
complete, ongoing = [], []
for n in next_nodes:
ap = self.path.copy(ant_path)
self.path.add_node(ap, n)
(complete if self.path.stop(ap) else ongoing).append(ap)
return complete, ongoing
def build_generation(self):
"""
Have a "generation" of ants travel the graph.
Performs a full Beam Search, a constrained breadth-first search on a
tree of ant paths: each tree node is branched into `self.branch_factor`
successor nodes, and per tree depth only the `self.ants_per_gen` best
solutions (the beam's width) are kept and carried forward to the next
level. An ant path is here the succession of edges from the tree's root
down to a leaf node.
The generation's best solution is defined as the best ranked among the
longest produced paths (those that reached the greatest tree depth).
"""
# ongoing = [None] * self.ants_per_gen
# single root node; all paths start from the same initial conditions
ongoing = [None]
while ongoing != []:
# extend all the still ongoing paths, and split outcomes between
# completed paths, and those that should still be further extended.
complete, incomplete = [], []
for ant_path in ongoing:
c, o = self.ant_walk(ant_path)
complete.extend(c)
incomplete.extend(o)
# evaluate and sort the incomplete paths
incomplete = [(self.path.evaluate(p), p) for p in incomplete]
incomplete = self.path.sort(incomplete)
# select the best `ants_per_gen` paths out from those that are still
# incomplete, and discard the remaining ones
ongoing = [p for (c, p) in incomplete[:self.ants_per_gen]]
# signal to the path handler that paths being discarded should be
# forcibly stopped (trigger eventual clean up steps)
for (c, p) in incomplete[self.ants_per_gen:]:
self.path.stop(p, force_stop=True)
# # All paths have completed. Pick the `ants_per_gen` best among the
# # longest paths, and discard the remaining ones.
# complete = [(self.path.evaluate(p), p) for p in complete]
# self.generation = self.path.sort(complete, r=self.ants_per_gen)
# Define the generation's paths as being *all* the complete paths having
# the same maximal length. Does not prune down to at most `ants_per_gen`
# solutions. In the end, `.generation` may hold more, or even less than
# that number of solutions (if few feasible solutions reached the
# maximal observed length).
self.generation = [(self.path.evaluate(p), p) for p in complete]
self.process_generation()
# ==================================== ## ==================================== #
class _pareto_elite(object):
"""
Abstract class implementing a variant of elitism that tracks the full
set of non-dominated ant paths found to date.
The pheromone matrix is reset at the end of every generation from a
random subset of paths in the elite population.
Assumes a compatible path handler is being used, with an `.evaluate()`
method that produces multiple evaluations per path, and a `.sort()` method
that sorts solutions according to Pareto dominance.
Partially implements the specification in Sec. 3.1 of:
[3] http://dx.doi.org/10.1007/3-540-36970-8_33
"""
def __init__(self, *args, nr_elite_fronts=1, **kwargs):
super(_pareto_elite, self).__init__(*args, **kwargs)
# number of non-dominated fronts to keep in the elite
self.nr_elite_fronts = nr_elite_fronts
def initialize(self):
"Reset all run state variables, and prepare to start a new run."
# Pheromones will be exclusively determined by the elite population.
# The main population's variables are just kept for code compatibility.
self.population = None
self.popul_pheromone = [[] for i in range(self.nr_nodes)]
# Force the usage of Elitism
self.use_elitism = True
# Here, the elite population is unbounded in size, and stores evaluated
# paths (tuples containing both the evaluation and the path).
# This is in contrast to `class paco`, where it's a bounded population
# storing only paths (same as the main population).
self.elite = []
# Empty pheromone "matrix". To be defined later in .lay_down_pheromone()
self.elite_pheromone = self.popul_pheromone
# Given the elite population's size is now unbounded, the amount of
# pheromone deposition will instead be bounded via a limit on the
# number of memorized transitions out from each node.
self.node_pheromone_maxlen = self.pop_size * (
2 if self.path.symmetric else 1)
self.nr_gen = 0
self.generation = None
self.best = None
self.path.initialize(self)
def lay_down_pheromone(self):
"""
Reset the the pheromone matrix, using a random subset of paths in the
elite population.
"""
# Pheromone ("memory") for each node is implemented as a FIFO-queue: in
# case it already contains as many contributions as `self.pop_size`, the
# oldest deposited pheromone evaporates, making way for the new one.
self.elite_pheromone = [
deque(maxlen=self.node_pheromone_maxlen)
for i in range(self.nr_nodes)]
# The order in which paths are added to the pheromone matrix is
# determined from a permutation of the elite population
# (this differs from the specification in [3], where one path is chosen
# at random, and the remaining `self.pop_size - 1` ones added are then
# chosen so as to maximize similarity to the randomly chosen path).
for idx in self.random.permutation(len(self.elite)):
(quality, ant_path) = self.elite[idx]
for (i, j) in self._get_links(ant_path):
self.elite_pheromone[i].append(j)
if self.path.symmetric:
self.elite_pheromone[j].append(i)
#
# In asymmetric problems:
# if all paths visit all nodes, only the last `self.pop_size` processed
# ants paths will have deposited pheromones. However, in the case of
# variable sized ant paths, the pheromone matrix may now contain
# contributions from more than `self.pop_size` paths (in the limit, it
# may even contain contributions from all elite paths), while still only
# accumulating contributions from up to `self.pop_size` paths at the
# level of each individual node.
def process_generation(self):
"""
Process the most recent generation of ant walks:
* update the elite population (non-dominated paths seen so far);
* trigger a reset of the pheromone matrix, using the new elite.
"""
# Update the elite, to correspond to the first `self.nr_elite_fronts`
# fronts obtained by non-dominated sorting of the elite's union with the
# ant paths taken in the current generation.
# With `self.nr_elite_fronts == 1`, this will just be the Pareto front.
paths_union = self.elite + self.generation
self.elite = self.path.sort(paths_union, f=self.nr_elite_fronts)
# Update `self.best`, to contain the Pareto front of solutions found
if self.nr_elite_fronts == 1:
self.best = self.elite
else:
self.best = self.path.sort(self.elite, f=1)
if self.alpha > 0.0:
self.lay_down_pheromone()
# ==================================== ## ==================================== #
class paco_pareto(_pareto_elite, paco):
"Multi-objective P-ACO with Pareto elitism"
pass
class beam_paco_pareto(_pareto_elite, beam_paco):
"Multi-objective Beam P-ACO with Pareto elitism"
pass
|
RLReed/libdetran | refs/heads/master | src/python/pydetranutils/quad_plot.py | 2 | # This provides utilities for plotting things on a
# 1D or 2D mesh or a slice of a 3D mesh.
try :
import numpy as np
except ImportError :
print "Error importing Numpy"
try :
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.pyplot as plt
except ImportError :
print "Error importing matplotlib"
global __detranuselatex__ = False
try :
import os
print "Checking for LaTeX for nicer plotting labels..."
if (os.system("latex")==0) :
from matplotlib import rc
rc('text', usetex=True)
rc('font', family='serif')
__detranuselatex__ = True
except ImportError :
print "Warning: LaTeX labels being skipped"
def plot_quadrature(quad) :
""" Plots a quadrature.
"""
try :
D = quad.dimension()
except :
print "Error getting quadrature dimension... maybe not a quadrature object?"
return
if D == 1 :
# Get the abscissa and weights
mu = np.asarray(quad.cosines(0))
wt = np.asarray(quad.weights())
# Plot
plt.plot(mu, wt, 'bo')
if __detranuselatex__ :
plt.xlabel('$\mu$')
else :
plt.xlabel('mu')
plt.ylabel('weight')
plt.show()
else :
# Get the abscissa and weights
mu = np.asarray(quad.cosines(0))
eta = np.asarray(quad.cosines(1))
xi = np.asarray(quad.cosines(2))
wt = np.asarray(quad.weights())
# Plot. Note, using my (old) version of matplotlib, the colors
# are not translated to the scatter plot. The sizes are, but
# it's not really enough. What's a good solution?
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(30, 60)
myplot = ax.scatter(mu,eta,xi,c=wt, s=100000*wt**2, marker='^')
labels = ['mu','eta','xi']
if __detranuselatex__ :
labels ['$\mu$', '$\eta$', '$\\xi$']
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2])
fig.colorbar(myplot)
plt.show()
|
borisz264/mod_seq | refs/heads/master | mod_seq_multi.py | 1 | import os, sys, subprocess
folder, threads = sys.argv[1:]
for file_name in os.listdir(folder):
full_path = os.path.join(folder, file_name)
print full_path
command_to_run = 'python mod_seq_main.py %s --threads %s' % (full_path, threads)
subprocess.Popen(command_to_run, shell=True).wait() |
valurhrafn/chrome-sync-server | refs/heads/master | google/protobuf/python/google/protobuf/service.py | 590 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""DEPRECATED: Declares the RPC service interfaces.
This module declares the abstract interfaces underlying proto2 RPC
services. These are intended to be independent of any particular RPC
implementation, so that proto2 services can be used on top of a variety
of implementations. Starting with version 2.3.0, RPC implementations should
not try to build on these, but should instead provide code generator plugins
which generate code specific to the particular RPC implementation. This way
the generated code can be more appropriate for the implementation in use
and can avoid unnecessary layers of indirection.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class RpcException(Exception):
"""Exception raised on failed blocking RPC method call."""
pass
class Service(object):
"""Abstract base interface for protocol-buffer-based RPC services.
Services themselves are abstract classes (implemented either by servers or as
stubs), but they subclass this base interface. The methods of this
interface can be used to call the methods of the service without knowing
its exact type at compile time (analogous to the Message interface).
"""
def GetDescriptor():
"""Retrieves this service's descriptor."""
raise NotImplementedError
def CallMethod(self, method_descriptor, rpc_controller,
request, done):
"""Calls a method of the service specified by method_descriptor.
If "done" is None then the call is blocking and the response
message will be returned directly. Otherwise the call is asynchronous
and "done" will later be called with the response value.
In the blocking case, RpcException will be raised on error.
Preconditions:
* method_descriptor.service == GetDescriptor
* request is of the exact same classes as returned by
GetRequestClass(method).
* After the call has started, the request must not be modified.
* "rpc_controller" is of the correct type for the RPC implementation being
used by this Service. For stubs, the "correct type" depends on the
RpcChannel which the stub is using.
Postconditions:
* "done" will be called when the method is complete. This may be
before CallMethod() returns or it may be at some point in the future.
* If the RPC failed, the response value passed to "done" will be None.
Further details about the failure can be found by querying the
RpcController.
"""
raise NotImplementedError
def GetRequestClass(self, method_descriptor):
"""Returns the class of the request message for the specified method.
CallMethod() requires that the request is of a particular subclass of
Message. GetRequestClass() gets the default instance of this required
type.
Example:
method = service.GetDescriptor().FindMethodByName("Foo")
request = stub.GetRequestClass(method)()
request.ParseFromString(input)
service.CallMethod(method, request, callback)
"""
raise NotImplementedError
def GetResponseClass(self, method_descriptor):
"""Returns the class of the response message for the specified method.
This method isn't really needed, as the RpcChannel's CallMethod constructs
the response protocol message. It's provided anyway in case it is useful
for the caller to know the response type in advance.
"""
raise NotImplementedError
class RpcController(object):
"""An RpcController mediates a single method call.
The primary purpose of the controller is to provide a way to manipulate
settings specific to the RPC implementation and to find out about RPC-level
errors. The methods provided by the RpcController interface are intended
to be a "least common denominator" set of features which we expect all
implementations to support. Specific implementations may provide more
advanced features (e.g. deadline propagation).
"""
# Client-side methods below
def Reset(self):
"""Resets the RpcController to its initial state.
After the RpcController has been reset, it may be reused in
a new call. Must not be called while an RPC is in progress.
"""
raise NotImplementedError
def Failed(self):
"""Returns true if the call failed.
After a call has finished, returns true if the call failed. The possible
reasons for failure depend on the RPC implementation. Failed() must not
be called before a call has finished. If Failed() returns true, the
contents of the response message are undefined.
"""
raise NotImplementedError
def ErrorText(self):
"""If Failed is true, returns a human-readable description of the error."""
raise NotImplementedError
def StartCancel(self):
"""Initiate cancellation.
Advises the RPC system that the caller desires that the RPC call be
canceled. The RPC system may cancel it immediately, may wait awhile and
then cancel it, or may not even cancel the call at all. If the call is
canceled, the "done" callback will still be called and the RpcController
will indicate that the call failed at that time.
"""
raise NotImplementedError
# Server-side methods below
def SetFailed(self, reason):
"""Sets a failure reason.
Causes Failed() to return true on the client side. "reason" will be
incorporated into the message returned by ErrorText(). If you find
you need to return machine-readable information about failures, you
should incorporate it into your response protocol buffer and should
NOT call SetFailed().
"""
raise NotImplementedError
def IsCanceled(self):
"""Checks if the client cancelled the RPC.
If true, indicates that the client canceled the RPC, so the server may
as well give up on replying to it. The server should still call the
final "done" callback.
"""
raise NotImplementedError
def NotifyOnCancel(self, callback):
"""Sets a callback to invoke on cancel.
Asks that the given callback be called when the RPC is canceled. The
callback will always be called exactly once. If the RPC completes without
being canceled, the callback will be called after completion. If the RPC
has already been canceled when NotifyOnCancel() is called, the callback
will be called immediately.
NotifyOnCancel() must be called no more than once per request.
"""
raise NotImplementedError
class RpcChannel(object):
"""Abstract interface for an RPC channel.
An RpcChannel represents a communication line to a service which can be used
to call that service's methods. The service may be running on another
machine. Normally, you should not use an RpcChannel directly, but instead
construct a stub {@link Service} wrapping it. Example:
Example:
RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
RpcController controller = rpcImpl.Controller()
MyService service = MyService_Stub(channel)
service.MyMethod(controller, request, callback)
"""
def CallMethod(self, method_descriptor, rpc_controller,
request, response_class, done):
"""Calls the method identified by the descriptor.
Call the given method of the remote service. The signature of this
procedure looks the same as Service.CallMethod(), but the requirements
are less strict in one important way: the request object doesn't have to
be of any specific class as long as its descriptor is method.input_type.
"""
raise NotImplementedError
|
wolfskaempf/ga_statistics | refs/heads/master | lib/python2.7/site-packages/django/contrib/contenttypes/management.py | 476 | from django.apps import apps
from django.db import DEFAULT_DB_ALIAS, router
from django.utils import six
from django.utils.six.moves import input
def update_contenttypes(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs):
"""
Creates content types for models in the given app, removing any model
entries that no longer have a matching model class.
"""
if not app_config.models_module:
return
try:
ContentType = apps.get_model('contenttypes', 'ContentType')
except LookupError:
return
if not router.allow_migrate_model(using, ContentType):
return
ContentType.objects.clear_cache()
app_label = app_config.label
app_models = {
model._meta.model_name: model
for model in app_config.get_models()}
if not app_models:
return
# Get all the content types
content_types = {
ct.model: ct
for ct in ContentType.objects.using(using).filter(app_label=app_label)
}
to_remove = [
ct
for (model_name, ct) in six.iteritems(content_types)
if model_name not in app_models
]
cts = [
ContentType(
app_label=app_label,
model=model_name,
)
for (model_name, model) in six.iteritems(app_models)
if model_name not in content_types
]
ContentType.objects.using(using).bulk_create(cts)
if verbosity >= 2:
for ct in cts:
print("Adding content type '%s | %s'" % (ct.app_label, ct.model))
# Confirm that the content type is stale before deletion.
if to_remove:
if interactive:
content_type_display = '\n'.join(
' %s | %s' % (ct.app_label, ct.model)
for ct in to_remove
)
ok_to_delete = input("""The following content types are stale and need to be deleted:
%s
Any objects related to these content types by a foreign key will also
be deleted. Are you sure you want to delete these content types?
If you're unsure, answer 'no'.
Type 'yes' to continue, or 'no' to cancel: """ % content_type_display)
else:
ok_to_delete = False
if ok_to_delete == 'yes':
for ct in to_remove:
if verbosity >= 2:
print("Deleting stale content type '%s | %s'" % (ct.app_label, ct.model))
ct.delete()
else:
if verbosity >= 2:
print("Stale content types remain.")
|
ahealy19/F-IDE-2016 | refs/heads/master | benchexec/tools/impara.py | 3 | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import xml.etree.ElementTree as ET
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for impara (https://github.com/bjowac/impara).
It always adds --xml-ui to the command-line arguments for easier parsing of the output.
"""
REQUIRED_PATHS = [
"impara"
]
def executable(self):
return util.find_executable('impara')
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return 'impara'
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
if ("--xml-ui" not in options):
options = options + ["--xml-ui"]
self.options = options
return [executable] + options + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
#an empty tag cannot be parsed into a tree
def sanitizeXML(s):
return s.replace("<>", "<emptyTag>") \
.replace("</>", "</emptyTag>")
if returnsignal == 0 and ((returncode == 0) or (returncode == 10)):
try:
tree = ET.fromstringlist(map(sanitizeXML, output))
status = tree.findtext('cprover-status')
if status is None:
def isErrorMessage(msg):
return msg.get('type', None) == 'ERROR'
messages = list(filter(isErrorMessage, tree.getiterator('message')))
if messages:
# for now, use only the first error message if there are several
msg = messages[0].findtext('text')
if msg == 'Out of memory':
status = 'OUT OF MEMORY'
elif msg:
status = 'ERROR ({0})'.format(msg)
else:
status = 'ERROR'
else:
status = 'INVALID OUTPUT'
elif status == "FAILURE":
assert returncode == 10
reason = tree.find('goto_trace').find('failure').findtext('reason')
if not reason:
reason = tree.find('goto_trace').find('failure').get('reason')
if 'unwinding assertion' in reason:
status = result.RESULT_UNKNOWN
else:
status = result.RESULT_FALSE_REACH
elif status == "SUCCESS":
assert returncode == 0
if "--no-unwinding-assertions" in self.options:
status = result.RESULT_UNKNOWN
else:
status = result.RESULT_TRUE_PROP
except Exception:
if isTimeout:
# in this case an exception is expected as the XML is invalid
status = 'TIMEOUT'
elif 'Minisat::OutOfMemoryException' in output:
status = 'OUT OF MEMORY'
else:
status = 'INVALID OUTPUT'
logging.exception("Error parsing impara output for returncode %d", returncode)
elif returncode == 64 and 'Usage error!' in output:
status = 'INVALID ARGUMENTS'
else:
status = result.RESULT_ERROR
return status
|
ryankanno/honolulu-makerfaire-raffle | refs/heads/master | hnlmakerfaire/uwsgi.py | 1 | import logging
from logging.handlers import RotatingFileHandler
from app import app
if __name__ == '__main__':
handler = RotatingFileHandler('/var/log/flask-hnlmakerfaire.log',
maxBytes=1024*1024*5, backupCount=30)
handler.setLevel(logging.DEBUG)
app.logger.addHandler(handler)
app.run()
|
jobiols/odoomrp-wip | refs/heads/8.0 | machine_manager_preventive/models/mrp_repair.py | 5 | # -*- coding: utf-8 -*-
# Copyright 2016 Daniel Campos - Avanzosc S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api
class MrpRepair(models.Model):
_inherit = 'mrp.repair'
preventive_operations = fields.Many2many(
comodel_name='preventive.machine.operation')
idmachine = fields.Many2one(comodel_name='machinery', string='Machine')
preventive = fields.Boolean(string='Is preventive')
@api.multi
def action_repair_end(self):
res = super(MrpRepair, self).action_repair_end()
self.mapped('preventive_operations').filtered(
lambda o: o.update_preventive ==
'after_repair')._next_action_update()
return res
|
pklaus/silhouette | refs/heads/master | src/silhouette.py | 1 | import time
import usb.core
import usb.util
from warnings import warn
from . import gpgl
class SilhouetteException(Exception):
pass
class Silhouette(object):
def __init__(self, **kw):
self.vendor_id = kw.get('vendor_id', 0x0b4d)
self.product_id = kw.get('product_id', None)
self.output_file = kw.get('output_file', None)
self.pos = (0, 0)
self._pressure = gpgl.Pressure()
self._speed = gpgl.Speed()
self._media = gpgl.Media()
self._offset = gpgl.Offset()
self._position = None
def usbscan(self):
args = {"find_all": True, "idVendor": self.vendor_id}
if self.product_id:
args["idProduct"] = self.product_id
devs = usb.core.find(**args)
devs = list(devs)
if not devs:
msg = "Can not find any devices with vendor_id == %s" % self.vendor_id
raise SilhouetteException(msg)
if len(devs) > 1:
msg = "There are multiple devices that match vendor_id == %s, using the first one in the list." % self.vendor_id
warn(msg)
return devs[0]
def connect(self):
if self.output_file:
self.init()
return
self.dev = self.usbscan()
if self.dev.is_kernel_driver_active(0):
self.dev.detach_kernel_driver(0)
usb.util.claim_interface(self.dev, 0)
self.dev.reset()
# set the active configuration. With no arguments, the first
# configuration will be the active one
print(self.dev)
self.dev.set_configuration()
# get an endpoint instance
cfg = self.dev.get_active_configuration()
intf = cfg[(0,0)]
self.ep_out = usb.util.find_descriptor(intf,
custom_match = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_OUT)
assert self.ep_out is not None
self.ep_in = usb.util.find_descriptor(intf,
custom_match = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_IN)
assert self.ep_in is not None
self.init()
def move(self, pos, rel=True):
pos = gpgl.Point(*list(pos))
if self._position == pos:
return
if rel:
rel_pos = pos - self._position
move = gpgl.RelativeMove(*rel_pos)
else:
move = gpgl.Move(*pos)
self.send(move)
self._position = gpgl.Point(*pos)
def get_position(self):
return self._position
def set_position(self, pos):
if self._position == None:
self.move(pos, rel=False)
else:
self.move(pos)
position = property(get_position, set_position)
def draw(self, points):
cmd = gpgl.Draw(*points)
self.send(cmd)
self._position = cmd.points[-1]
def init(self):
self.write("\x1b\x04")
def set_offset(self, offset):
self._offset.offset = offset
self.send(self._offset)
def get_offset(self):
return self._offset.offset
offset = property(get_offset, set_offset)
def set_speed(self, speed):
self._speed.speed = speed
self.send(self._speed)
def get_speed(self):
return self._speed.speed
speed = property(get_speed, set_speed)
def set_media(self, media):
self._media.media = media
self.send(self._media)
def get_media(self):
return self._media.media
media = property(get_media, set_media)
def set_pressure(self, pressure):
self._pressure = gpgl.Pressure(pressure)
self.send(self._pressure)
def get_pressure(self):
return self._pressure.pressure
pressure = property(get_pressure, set_pressure)
def home(self):
self.send(gpgl.Home())
@property
def status(self):
if self.output_file:
return "ready"
reslen = self.ep_out.write("\x1b\x05")
resp = self.read(2)
resp = list(resp)
if len(resp) != 2:
raise ValueError("Bad response to status request")
(status_byte, magic_byte) = resp
if magic_byte != 0x3:
raise ValueError("Status magic byte does not equal 0x03 (0x%02x)" % resp[-1])
if status_byte == 0x30:
return "ready"
if status_byte == 0x31:
return "moving"
if status_byte == 0x32:
return "unloaded"
return "unknown"
@property
def ready(self):
return self.status == "ready"
@property
def moving(self):
return self.status == "moving"
@property
def unloaded(self):
return self.status == "unloaded"
@property
def version(self):
self.write("FG")
resp = self.read(1000)
resp = str.join('', map(chr, resp))
return resp
def wait(self):
while not self.ready:
time.sleep(.1)
def read(self, length=1):
if self.output_file:
return b''
info = self.ep_in.read(length)
return info
def write(self, msg):
msg = msg.encode()
if self.output_file:
self.output_file.write(msg)
return
bufsize = self.ep_out.wMaxPacketSize
idx = 0
#print str.join(' ', ["%s (0x%02x)" % (x, ord(x)) for x in msg])
while idx < len(msg):
submsg = msg[idx:idx + bufsize]
reslen = self.ep_out.write(submsg)
#print "[%s:%s] %s" % (idx, idx + bufsize, len(msg))
assert reslen == len(submsg), "%s != %s" % (reslen, len(submsg))
idx += bufsize
if idx < len(msg):
self.wait()
def send(self, *commands, **kw):
block = kw.get('block', True)
for cmd in commands:
self.write(cmd.encode())
if block:
self.wait()
|
40223211/2015cd_midterm2 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/errno.py | 624 | """
This module makes available standard errno system symbols.
The value of each symbol is the corresponding integer value,
e.g., on most systems, errno.ENOENT equals the integer 2.
The dictionary errno.errorcode maps numeric codes to symbol names,
e.g., errno.errorcode[2] could be the string 'ENOENT'.
Symbols that are not relevant to the underlying system are not defined.
To map error codes to error messages, use the function os.strerror(),
e.g. os.strerror(2) could return 'No such file or directory'.
"""
errorcode= {1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO',
6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EAGAIN',
12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY',
17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR',
22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY',
27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK',
32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EDEADLOCK', 36: 'ENAMETOOLONG',
37: 'ENOLCK', 38: 'ENOSYS', 39: 'ENOTEMPTY', 40: 'ELOOP', 42: 'ENOMSG',
43: 'EIDRM', 44: 'ECHRNG', 45: 'EL2NSYNC', 46: 'EL3HLT', 47: 'EL3RST',
48: 'ELNRNG', 49: 'EUNATCH', 50: 'ENOCSI', 51: 'EL2HLT', 52: 'EBADE',
53: 'EBADR', 54: 'EXFULL', 55: 'ENOANO', 56: 'EBADRQC', 57: 'EBADSLT',
59: 'EBFONT', 60: 'ENOSTR', 61: 'ENODATA', 62: 'ETIME', 63: 'ENOSR',
64: 'ENONET', 65: 'ENOPKG', 66: 'EREMOTE', 67: 'ENOLINK', 68: 'EADV',
69: 'ESRMNT', 70: 'ECOMM', 71: 'EPROTO', 72: 'EMULTIHOP', 73: 'EDOTDOT',
74: 'EBADMSG', 75: 'EOVERFLOW', 76: 'ENOTUNIQ', 77: 'EBADFD', 78: 'EREMCHG',
79: 'ELIBACC', 80: 'ELIBBAD', 81: 'ELIBSCN', 82: 'ELIBMAX', 83: 'ELIBEXEC',
84: 'EILSEQ', 85: 'ERESTART', 86: 'ESTRPIPE', 87: 'EUSERS', 88: 'ENOTSOCK',
89: 'EDESTADDRREQ', 90: 'EMSGSIZE', 91: 'EPROTOTYPE', 92: 'ENOPROTOOPT',
93: 'EPROTONOSUPPORT', 94: 'ESOCKTNOSUPPORT', 95: 'ENOTSUP',
96: 'EPFNOSUPPORT', 97: 'EAFNOSUPPORT', 98: 'EADDRINUSE',
99: 'EADDRNOTAVAIL', 100: 'ENETDOWN', 101: 'ENETUNREACH', 102: 'ENETRESET',
103: 'ECONNABORTED', 104: 'ECONNRESET', 105: 'ENOBUFS', 106: 'EISCONN',
107: 'ENOTCONN', 108: 'ESHUTDOWN', 109: 'ETOOMANYREFS', 110: 'ETIMEDOUT',
111: 'ECONNREFUSED', 112: 'EHOSTDOWN', 113: 'EHOSTUNREACH', 114: 'EALREADY',
115: 'EINPROGRESS', 116: 'ESTALE', 117: 'EUCLEAN', 118: 'ENOTNAM',
119: 'ENAVAIL', 120: 'EISNAM', 121: 'EREMOTEIO', 122: 'EDQUOT',
123: 'ENOMEDIUM', 124: 'EMEDIUMTYPE', 125: 'ECANCELED', 126: 'ENOKEY',
127: 'EKEYEXPIRED', 128: 'EKEYREVOKED', 129: 'EKEYREJECTED',
130: 'EOWNERDEAD', 131: 'ENOTRECOVERABLE', 132: 'ERFKILL'}
EPERM=1
ENOENT=2
ESRCH=3
EINTR=4
EIO=5
ENXIO=6
E2BIG=7
ENOEXEC=8
EBADF=9
ECHILD=10
EAGAIN=11
ENOMEM=12
EACCES=13
EFAULT=14
ENOTBLK=15
EBUSY=16
EEXIST=17
EXDEV=18
ENODEV=19
ENOTDIR=20
EISDIR=21
EINVAL=22
ENFILE=23
EMFILE=24
ENOTTY=25
ETXTBSY=26
EFBIG=27
ENOSPC=28
ESPIPE=29
EROFS=30
EMLINK=31
EPIPE=32
EDOM=33
ERANGE=34
EDEADLOCK=35
ENAMETOOLONG=36
ENOLCK=37
ENOSYS=38
ENOTEMPTY=39
ELOOP=40
ENOMSG=42
EIDRM=43
ECHRNG=44
EL2NSYNC=45
EL3HLT=46
EL3RST=47
ELNRNG=48
EUNATCH=49
ENOCSI=50
EL2HLT=51
EBADE=52
EBADR=53
EXFULL=54
ENOANO=55
EBADRQC=56
EBADSLT=57
EBFONT=59
ENOSTR=60
ENODATA=61
ETIME=62
ENOSR=63
ENONET=64
ENOPKG=65
EREMOTE=66
ENOLINK=67
EADV=68
ESRMNT=69
ECOMM=70
EPROTO=71
EMULTIHOP=72
EDOTDOT=73
EBADMSG=74
EOVERFLOW=75
ENOTUNIQ=76
EBADFD=77
EREMCHG=78
ELIBACC=79
ELIBBAD=80
ELIBSCN=81
ELIBMAX=82
ELIBEXEC=83
EILSEQ=84
ERESTART=85
ESTRPIPE=86
EUSERS=87
ENOTSOCK=88
EDESTADDRREQ=89
EMSGSIZE=90
EPROTOTYPE=91
ENOPROTOOPT=92
EPROTONOSUPPORT=93
ESOCKTNOSUPPORT=94
ENOTSUP=95
EPFNOSUPPORT=96
EAFNOSUPPORT=97
EADDRINUSE=98
EADDRNOTAVAIL=99
ENETDOWN=100
ENETUNREACH=101
ENETRESET=102
ECONNABORTED=103
ECONNRESET=104
ENOBUFS=105
EISCONN=106
ENOTCONN=107
ESHUTDOWN=108
ETOOMANYREFS=109
ETIMEDOUT=110
ECONNREFUSED=111
EHOSTDOWN=112
EHOSTUNREACH=113
EALREADY=114
EINPROGRESS=115
ESTALE=116
EUCLEAN=117
ENOTNAM=118
ENAVAIL=119
EISNAM=120
EREMOTEIO=121
EDQUOT=122
ENOMEDIUM=123
EMEDIUMTYPE=124
ECANCELED=125
ENOKEY=126
EKEYEXPIRED=127
EKEYREVOKED=128
EKEYREJECTED=129
EOWNERDEAD=130
ENOTRECOVERABLE=131
ERFKILL=132
|
doronkatz/firefox-ios | refs/heads/master | scripts/xliff-to-strings.py | 5 | #!/usr/bin/env python
#
# xliff-export.py l10n-repository export-directory
#
# Convert the l10n repository from the following format:
#
# en/firefox-ios.xliff
# fr/firefox-ios.xliff
#
# To the following format:
#
# Client/en-US.lproj/Localizable.strings
# Client/fr.lproj/Localizable.strings
# ShareTo/en-US.lproj/ShareTo.strings
# ShareTo/fr.lproj/ShareTo.strings
# SendTo/en-US.lproj/SendTo.strings
# SendTo/fr.lproj/SendTo.strings
#
# For any Info.plist file in the xliff, we generate a InfoPlist.strings.
#
import glob
import os
import sys
from lxml import etree
NS = {'x':'urn:oasis:names:tc:xliff:document:1.2'}
# Files we are interested in. It would be nice to not hardcode this but I'm not totally sure how yet.
FILES = [
"Client/3DTouchActions.strings",
"Client/AuthenticationManager.strings",
"Client/BookmarkPanel.strings",
"Client/BookmarkPanelDeleteConfirm.strings",
"Client/ClearHistoryConfirm.strings",
"Client/ClearPrivateData.strings",
"Client/ClearPrivateDataConfirm.strings",
"Client/ErrorPages.strings",
"Client/FindInPage.strings",
"Client/HistoryPanel.strings",
"Client/Info.plist",
"Client/Intro.strings",
"Client/LightweightThemes.strings",
"Client/Localizable.strings",
"Client/LoginManager.strings",
"Client/Menu.strings",
"Client/PrivateBrowsing.strings",
"Client/Search.strings",
"Client/SendAnonymousUsageData.strings",
"Client/SendTo.strings",
"Client/Shared.strings",
"Client/Storage.strings",
"Extensions/SendTo/Info.plist",
"Extensions/ShareTo/ShareTo.strings",
"Extensions/Today/Today.strings",
"Extensions/ViewLater/Info.plist",
"Shared/Localizable.strings",
]
# Because Xcode is unpredictable. See bug 1162510 - Sync.strings are not imported
FILENAME_OVERRIDES = {
"Shared/Supporting Files/Info.plist": "Shared/Localizable.strings",
"Shared/Supporting Files/Shared.strings": "Client/Shared.strings",
"Storage.strings": "Client/Storage.strings",
}
# Because Xcode can't handle strings that need to live in two
# different bundles, we also duplicate some files.(For example
# SendTo.strings is needed both in the main app and in the SendTo
# extension.) See bug 1234322
FILES_TO_DUPLICATE = {
"Client/SendTo.strings": ["Extensions/SendTo/SendTo.strings"],
}
def export_xliff_file(file_node, export_path, target_language):
directory = os.path.dirname(export_path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(export_path, "w") as fp:
for trans_unit_node in file_node.xpath("x:body/x:trans-unit", namespaces=NS):
trans_unit_id = trans_unit_node.get("id")
targets = trans_unit_node.xpath("x:target", namespaces=NS)
if trans_unit_id is not None and len(targets) == 1 and targets[0].text is not None:
notes = trans_unit_node.xpath("x:note", namespaces=NS)
if len(notes) == 1:
line = u"/* %s */\n" % notes[0].text
fp.write(line.encode("utf8"))
source_text = trans_unit_id.replace('"', '\\"')
target_text = targets[0].text.replace('"', '\\"')
line = u"\"%s\" = \"%s\";\n\n" % (source_text, target_text)
fp.write(line.encode("utf8"))
# Export fails if the strings file is empty. Xcode probably checks
# on file length vs read error.
contents = open(export_path).read()
if len(contents) == 0:
os.remove(export_path)
def original_path(root, target, original):
dir,file = os.path.split(original)
if file == "Info.plist":
file = "InfoPlist.strings"
lproj = "%s.lproj" % target_language
path = dir + "/" + lproj + "/" + file
return path
if __name__ == "__main__":
import_root = sys.argv[1]
if not os.path.isdir(import_root):
print "import path does not exist or is not a directory"
sys.exit(1)
export_root = sys.argv[2]
if not os.path.isdir(export_root):
print "export path does not exist or is not a directory"
sys.exit(1)
for xliff_path in glob.glob(import_root + "/*/firefox-ios.xliff"):
print "Exporting", xliff_path
with open(xliff_path) as fp:
tree = etree.parse(fp)
root = tree.getroot()
# Make sure there are <file> nodes in this xliff file.
file_nodes = root.xpath("//x:file", namespaces=NS)
if len(file_nodes) == 0:
print " ERROR: No translated files. Skipping."
continue
# Take the target language from the first <file>. Not sure if that
# is a bug in the XLIFF, but in some files only the first node has
# the target-language set.
target_language = file_nodes[0].get('target-language')
if not target_language:
print " ERROR: Missing target-language. Skipping."
continue
# Export each <file> node as a separate strings file under the
# export root.
for file_node in file_nodes:
original = file_node.get('original')
original = FILENAME_OVERRIDES.get(original, original)
if original in FILES:
# Because we have strings files that need to live in multiple bundles
# we build a list of export_paths. Start with the default.
export_paths = [original_path(export_root, target_language, original)]
for extra_copy in FILES_TO_DUPLICATE.get(original, []):
export_path = original_path(export_root, target_language, extra_copy)
export_paths.append(export_path)
for export_path in export_paths:
print " Writing %s to %s" % (original, export_path)
export_xliff_file(file_node, export_path, target_language)
|
stainbank/simulocloud | refs/heads/master | simulocloud/tiles.py | 1 | """
tiles
"""
import numpy as np
import itertools
import simulocloud.pointcloud
import simulocloud.exceptions
class Tile(simulocloud.pointcloud.PointCloud):
"""An immmutable pointcloud."""
def __init__(self, xyz, header=None):
"""See documentation for `simulocloud.pointcloud.Pointcloud`."""
super(Tile, self).__init__(xyz, header)
self._arr.flags.writeable = False
@property
def arr(self):
"""Get, but not set, the underlying (x, y, z) array of point coordinates."""
return self._arr
@arr.setter
def arr(self, value):
raise simulocloud.exceptions.TileException("Tile pointcloud cannot be modified")
class TilesGrid(object):
"""Container for grid of tiles described spatially by edges grid.
Attributes
----------
tiles: `numpy.ndarray` (ndim=3, dtype=object)
spatially contiguous pointclouds (usually type `Tile`) gridded to a 3D
array ordered by sequence of intervals in x (0), y (1) and z (2)
edges: `numpy.ndarray` (ndim=4, dtype=float)
three 3D x, y and z coordinate arrays (i,j indexing) concatenated in
4th axis, defining intervals seperating elements in `tiles` such that:
- `edges[ix, iy, iz]` returns a point coordinate at the corner between
adjacent pointclouds `tiles[ix-1, iy-1, iz-1], tiles[ix, iy, iz]`
- the bounds produced by concatenation of `edges[ix, iy, iz]` and
`edges[ix+1, iy+1, iz+1]` (i.e. `grid[ix, iy, iz].bounds`)
are guaranteed to spatially contain (but not necessarily equal) those
of the pointcloud at `tiles[ix, iy, iz]`
bounds: `Bounds`
defined by the outermost coordinates of `edges`
Subsetting
----------
A `TilesGrid` can be sliced or indexed to produce a subset (i.e. another
`TilesGrid` instance), with the following restrictions:
- step size must be 1 (or None)
- negative steps (reverse slicing) is unsupported
Subsetting produces views into, not copies of, the `tiles` and `edge` grid
arrays of the parent. This makes subsetting a light operation, but care
must be taken not to modify these attributes.
"""
def __init__(self, tiles, edges, validate=True):
"""Directly initialise `TilesGrid` from grids.
Arguments
---------
tiles: `numpy.ndarray` (ndim=3, dtype=object)
3D array of ordered pointclouds gridded onto `edges`
usually produced by `grid_pointclouds`
edges: `numpy.ndarray` (ndim=4, dtype=float)
4D array of shape (nx+1, ny+1, nz+1, 3) where nx, ny, nz = tiles.shape
usually produced by `make_edges`
Instantiation by constructor classmethods is preferred.
"""
self.tiles = tiles
self.edges = edges
if validate:
if not self.validate():
msg = "Tiles do not fit into edges grid"
raise simulocloud.exceptions.TilesGridException(msg)
def __getitem__(self, key):
"""Return a subset of TilesGrid instance using numpy-like indexing.
Notes
-----
- Steps are forbidden; only contiguous TilesGrids can be created
- Negative steps are forbidden
"""
# Coerce key to list
try:
key = list(key)
except TypeError:
key = [key]
# Freeze slice indices to shape of tiles array
key_ = []
for sl, nd in itertools.izip_longest(key, self.tiles.shape,
fillvalue=slice(None)):
try: # assume slice
start, stop, step = sl.indices(nd)
except AttributeError: # coerce indices to slice
if sl is None:
start, stop, step = slice(None).indices(nd)
else: # single element indexing
stop = None if sl == -1 else sl+1
start, stop, step = slice(sl, stop).indices(nd)
if not step == 1:
raise ValueError("TilesGrid must be contiguous, slice step must be 1")
key_.append(slice(start, stop))
# Extend slice stops by 1 for edges array
ekey = [slice(sl.start, sl.stop+1) if sl.stop - sl.start
else slice(sl.start, sl.stop) # dont create edges where no tiles
for sl in key_]
return type(self)(self.tiles[key_], self.edges[ekey], validate=False)
def __iter__(self):
"""Iterate over the tiles array."""
return np.nditer(self.tiles, flags=["refs_ok"])
def __len__(self):
"""Return the number of elements in tiles grid."""
return self.tiles.size
def __nonzero__(self):
"""Return True if there are any tiles."""
return bool(len(self))
@classmethod
def from_splitlocs(cls, pcs, splitlocs, inclusive=True):
"""Construct `TilesGrid` instance by retiling pointclouds.
Arguments
---------
pcs: seq of `simulocloud.pointcloud.Pointcloud`
splitlocs: dict {axis: locs, ...}, where:
axis: str
'x', 'y' and/or 'z'
locs: list
locations along specified axis at which to split
(see docs for `simulocloud.pointcloud.PointCloud.split`)
axes can be omitted, resulting in no splitting in that
axis
inclusive: bool (optional, default=True)
if True, upper bounds of grid outer edges are increased by 1e.-6,
so that all points in `pcs` are preserved upon gridding
if False, any points exactly on the upper bounds of `pcs` are lost
(i.e. maintain upper bounds exclusive cropping)
Returns
-------
`TilesGrid` instance
internal edges defined by `splitlocs`
lower grid bounds are equal to merged bounds of `pcs`, upper grid
bounds are 1e-6 higher than those of `pcs` if `inclusive` is True,
otherwise they are equal
"""
# Sort splitlocs and determine their bounds
mins, maxs = [],[]
for axis in 'xyz':
locs = sorted(splitlocs.get(axis, []))
try:
min_, max_ = locs[0], locs[-1]
except IndexError:
min_, max_ = np.inf, -np.inf # always within another bounds
splitlocs[axis] = locs
mins.append(min_), maxs.append(max_)
# Ensure grid will be valid
splitloc_bounds = simulocloud.pointcloud.Bounds(*(mins + maxs))
pcs_bounds = simulocloud.pointcloud.merge_bounds([pc.bounds for pc in pcs])
if not simulocloud.pointcloud._inside_bounds(splitloc_bounds, pcs_bounds):
raise ValueError("Split locations must be within total bounds of pointclouds")
edges = make_edges(pcs_bounds, splitlocs)
tiles = grid_pointclouds(pcs, edges, pctype=Tile)
return cls(tiles, edges, validate=False)
@property
def bounds(self):
"""Return the bounds containing the entire grid of tiles."""
bounds = np.concatenate([self.edges[0,0,0], self.edges[-1,-1,-1]])
return simulocloud.pointcloud.Bounds(*bounds)
@property
def shape(self):
"""Return the shape of the grid of tiles."""
return self.tiles.shape
def validate(self):
"""Return True if grid edges accurately describes tiles."""
for ix, iy, iz in itertools.product(*map(xrange, self.tiles.shape)):
# Ensure pointcloud bounds fall within edges
tile = self.tiles[ix, iy, iz]
for compare, edges, bounds in zip(
(np.less_equal, np.greater_equal), # both edges inclusive due to outermost edges
(self.edges[ix, iy, iz], self.edges[ix+1, iy+1, iz+1]),
(tile.bounds[:3], tile.bounds[3:])): # mins, maxs
for edge, bound in zip(edges, bounds):
if not compare(edge, bound):
return False
return True
def grid_pointclouds(pcs, edges, pctype=Tile):
"""Return a 3D array of (merged) pointclouds gridded to edges.
Arguments
---------
pcs: seq of `simulocloud.pointcloud.PointCloud`
edges: `numpy.ndarray` (ndim=4, dtype=float)
ij-indexed meshgrids for x, y and z stacked in 4th axis, whose values
defining boundaries of cells into `pcs` will be gridded
pctype: subclass of `simulocloud.pointcloud.PointCloud` (optional)
type of pointclouds to return
default = `simulocloud.pointcloud.PointCloud`
Returns
-------
tiles: `numpy.ndarray` (ndim=3, dtype=object)
3D array containing pointclouds (of type `pctype`) resulting from the
(collective) splitting of `pcs` in each axis according to `locs`
in `splitlocs`
sorted `locs` align with sequential pointclouds along each array axis:
0:x, 1:y, 2:z
"""
# Pre-allocate empty tiles array
shape = (len(pcs),) + tuple((n-1 for n in edges.shape[:3]))
tiles = np.empty(shape, dtype=object)
# Build 4D array with pcs split in x, y and z
for i, pc in enumerate(pcs):
pcs = pc.split('x', edges[:,0,0,0], pctype=pctype)[1:-1]
for ix, pc in enumerate(pcs):
pcs = pc.split('y', edges[0,:,0,1])[1:-1]
for iy, pc in enumerate(pcs):
pcs = pc.split('z', edges[0,0,:,2])[1:-1]
# Assign pc to predetermined location
tiles[i, ix, iy] = pcs
# Flatten to 3D
return np.sum(tiles, axis=0)
def fractional_splitlocs(bounds, nx=None, ny=None, nz=None):
"""Generate locations to split bounds into n even sections per axis.
Arguments
---------
bounds: `simulocloud.pointcloud.Bounds` (or similiar)
bounds within which to create tiles
nx, ny, nz : int (default=None)
number of pointclouds desired along each axis
no splitting if n < 2 (or None)
Returns
-------
splitlocs: dict ({axis: locs, ...)}
lists of locations for each axis (i.e. 'x', 'y', 'z')
len(locs) = n-1; omitted if n=None
"""
bounds = simulocloud.pointcloud.Bounds(*bounds) #should be a strict bounds (min<max, etc)
nsplits = {axis: n for axis, n in zip('xyz', (nx, ny, nz)) if n is not None}
# Build splitlocs
splitlocs = {}
for axis, n in nsplits.iteritems():
min_, max_ = simulocloud.pointcloud.axis_bounds(bounds, axis)
splitlocs[axis] = np.linspace(min_, max_, num=n,
endpoint=False)[1:] # "inside" edges only
return splitlocs
def make_edges(bounds, splitlocs, inclusive=False):
"""Return coordinate array describing the edges between gridded pointclouds.
Arguments
---------
bounds: `simulocloud.pointcloud.Bounds` or similiar
(minx, miny, minz, maxx, maxy, maxz) bounds of entire grid
splitlocs: dict {axis: locs, ...}, where:
axis: str
'x', 'y' and/or 'z'
locs: list
locations along specified axis at which to split
(see docs for `simulocloud.pointcloud.PointCloud.split`)
axes can be omitted, resulting in no splitting in that axis
inclusive: bool (optional, default=False)
if True, upper bounds of grid outer edges are increased by 1e.-6,
so that all points in a pointcloud are guaranteed to be preserved upon
gridding when `bounds` is equal to the bounds of said pointcloud
if False, any points exactly on the upper bounds of `pcs` are lost
(i.e. maintain upper bounds exclusive cropping)
Returns
-------
edges: `numpy.ndarray` (ndim=4, dtype=float)
4D array containing x, y and z coordinate arrays (see documentation for
`numpy.meshgrid`), indexed by 'ij' and concatenated in 4th dimension
indices, such that `edges[ix, iy, iz, :]` returns a single point
coordinate in the form `array([x, y, z])`
Notes and Examples
------------------
An edges` grid provides a spatial description of the pointclouds in a
`tiles` grid:
- the coordinates at `edges[ix, iy, iz]` lies between the two adjacent
pointclouds `tiles[ix-1, iy-1, iz-1], tiles[ix, iy, iz]`
- `edges[ix, iy, iz]` and `edges[ix+1, iy+1, iz+1]` combine to form a set
of bounds which contain --- but are not (necessarily) equal to --- those
of the pointcloud at `tiles[ix, iy, iz]`
>>> splitlocs = fractional_splitlocs(pc.bounds, nx=10, ny=8, nz=5)
>>> edges = make_edges(pc.bounds, splitlocs)
>>> tiles = grid_pointclouds([pc], edges)
>>> print tiles.shape, edges.shape # +1 in each axis
(10, 8, 5) (11, 9, 6, 3)
>>> ix, iy, iz = 5, 3, 2
# Show edge between tile pointclouds
>>> print (tiles[ix-1, iy-1, iz-1].bounds[3:], # upper bounds
... edges[ix, iy, iz],
... tiles[ix, iy, iz].bounds[:3]) # lower bounds
((14.99, 24.98, 1.98), array([ 15., 25., 2.]), (15.01, 25.02, 2.09))
>>> # Show bounds around tile
>>> print tiles[ix, iy, iz].bounds
Bounds: minx=15, miny=25, minz=2.09
maxx=16, maxy=26.2, maxz=2.99
>>> print Bounds(*np.concatenate([edges[ix, iy, iz],
... edges[ix+1, iy+1, iz+1]]))
Bounds: minx=15, miny=25, minz=2
maxx=16, maxy=26.2, maxz=3
"""
if inclusive:
bounds = np.array(bounds)
bounds[3:] += 1e-6 # expand upper bounds to ensure all points contained
bounds = simulocloud.pointcloud.Bounds(*bounds)
#! Should fail if splitlocs is not within bounds
# Determine bounds for each tile in each axis
edges = []
for axis in 'xyz':
axis_edges = []
min_, max_ = simulocloud.pointcloud.axis_bounds(bounds, axis)
locs = np.array(splitlocs.setdefault(axis, np.array([])))
edges.append(np.concatenate([[min_], locs, [max_]]))
# Grid edge coordinates
return np.stack(np.meshgrid(*edges, indexing='ij'), axis=-1)
def make_regular_edges(bounds, spacings, bases=None, exact=False):
"""Return `edges` array with regular interval spacing.
Arguments
---------
bounds: `Bounds` or equivalent tuple
(minx, miny, minz, maxx, maxy, maxz)
spacings: `dict`
{axis: spacing}, where
axis: `str`
any combination of 'x', 'y', 'z'
spacing: numeric
size of regular interval in that axis
may be adjusted, see `exact` below)
bases: `dict` (optional)
{axis: base} to align bounds (see documentation for `align_bounds`)
note that bounds will become unaligned if `exact` is True, unless
`bases` and `spacings` are equal
exact: bool (default=False):
for a given axis, unless (maxbound-minbound)%spacing == 0, either of
spacing or bounds must be adjusted to yield integer n intervals
if True, upper bound will be adjusted downwards to ensure spacing is
exactly as specified (edges bounds will no longer == `bounds`!)
if False, spacing will be adjusted and bounds will remain as specified
"""
if bases is not None:
bounds = align_bounds(bounds, bases)
splitlocs = {}
for axis, spacing in spacings.iteritems():
minbound, maxbound = simulocloud.pointcloud.axis_bounds(bounds, axis)
num = int((maxbound-minbound)/(spacing*1.))
if exact:
# Adjust upper bound to ensure exact spacing
maxbound = minbound + int((maxbound-minbound)/spacing) * spacing
bounds = bounds._replace(**{'max'+axis: maxbound})
splitlocs[axis] = np.linspace(minbound, maxbound, num=num, endpoint=False)[1:]
return make_edges(bounds, splitlocs)
def align_bounds(bounds, bases):
"""Contract `bounds` such that each axis aligns on it's respective `base`.
Arguments
---------
bounds: `Bounds` or equivalent tuple
(minx, miny, minz, maxx, maxy, maxz)
bases: `dict`
{axis: base} where
axis: `str`
any combination of 'x', 'y', 'z'
base: numeric
value onto which to align axis bounds
Returns
-------
bounds: `Bounds`
bounds with each each axis specified in `bases` is a multiple of the
respective base
always equal or smaller in area than input `bounds`
Example
-------
>>> bounds
Bounds(minx=3.7, miny=-11.3, minz=7.5, maxx=20.6, maxy=5.3, maxz=23.3)
>>> bases
{'x': 1.0, 'y': 0.5}
>>> align_bounds(bounds, bases)
Bounds(minx=4.0, miny=-11.0, minz=7.5, maxx=20.0, maxy=5.0, maxz=23.3)
"""
bases = {axis: float(base) for axis, base in bases.iteritems()}
bounds = simulocloud.pointcloud.Bounds(*bounds)
replacements = {}
for axis, base in bases.iteritems():
minbound, maxbound = simulocloud.pointcloud.axis_bounds(bounds, axis)
remain = (minbound % base)
replacements['min'+axis] = minbound + base - remain if remain else minbound
replacements['max'+axis] = maxbound - maxbound % base
return bounds._replace(**replacements)
|
fmarchenko/i3pystatus | refs/heads/master | docs/module_docs.py | 9 |
import pkgutil
import importlib
import sphinx.application
from docutils.parsers.rst import Directive
from docutils.nodes import paragraph
from docutils.statemachine import StringList
import i3pystatus.core.settings
import i3pystatus.core.modules
from i3pystatus.core.imputil import ClassFinder
from i3pystatus.core.color import ColorRangeModule
IGNORE_MODULES = ("__main__", "core", "tools")
def is_module(obj):
return (isinstance(obj, type)
and issubclass(obj, i3pystatus.core.settings.SettingsBase)
and not obj.__module__.startswith("i3pystatus.core."))
def fail_on_missing_dependency_hints(obj, lines):
# We can automatically check in some cases if we forgot something
if issubclass(obj, ColorRangeModule):
if all("colour" not in line for line in lines):
raise ValueError(">>> Module <{}> uses ColorRangeModule and should document it <<<\n"
"> Requires the PyPI package ``colour``".format(obj.__name__))
def check_settings_consistency(obj, settings):
errs = []
for setting in settings:
if not setting.required and setting.default is setting.sentinel:
errs.append("<" + setting.name + ">")
if errs:
raise ValueError(">>> Module <{}> has non-required setting(s) {} with no default! <<<\n"
.format(obj.__name__, ", ".join(errs)))
def process_docstring(app, what, name, obj, options, lines):
class Setting:
doc = ""
required = False
default = sentinel = object()
empty = object()
def __init__(self, cls, setting):
if isinstance(setting, tuple):
self.name = setting[0]
self.doc = setting[1]
else:
self.name = setting
if self.name in cls.required:
self.required = True
elif hasattr(cls, self.name):
default = getattr(cls, self.name)
if isinstance(default, str) and not len(default)\
or default is None:
default = self.empty
self.default = default
def __str__(self):
attrs = []
if self.required:
attrs.append("required")
if self.default not in [self.sentinel, self.empty]:
attrs.append("default: ``{default}``".format(default=self.default))
if self.default is self.empty:
attrs.append("default: *empty*")
formatted = "* **{name}** {attrsf} {doc}".format(
name=self.name,
doc="– " + self.doc if self.doc else "",
attrsf=" ({attrs})".format(attrs=", ".join(attrs)) if attrs else "")
return formatted
if is_module(obj) and obj.settings:
fail_on_missing_dependency_hints(obj, lines)
if issubclass(obj, i3pystatus.core.modules.Module):
mod = obj.__module__
if mod.startswith("i3pystatus."):
mod = mod[len("i3pystatus."):]
lines[0:0] = [
".. raw:: html",
"",
" <div class='modheader'>" +
"Module name: <code class='modname descclassname'>" + mod + "</code> " +
"(class <code class='descclassname'>" + name + "</code>)" +
"</div>",
"",
]
else:
lines[0:0] = [
".. raw:: html",
"",
" <div class='modheader'>class <code class='descclassname'>" + name + "</code></div>",
"",
]
lines.append(".. rubric:: Settings")
lines.append("")
settings = [Setting(obj, setting) for setting in obj.settings]
lines += map(str, settings)
check_settings_consistency(obj, settings)
lines.append("")
def process_signature(app, what, name, obj, options, signature, return_annotation):
if is_module(obj):
return ("", return_annotation)
def get_modules(path, package):
modules = []
for finder, modname, is_package in pkgutil.iter_modules(path):
if modname not in IGNORE_MODULES:
modules.append(get_module(finder, modname, package))
return modules
def get_module(finder, modname, package):
fullname = "{package}.{modname}".format(package=package, modname=modname)
return (modname, finder.find_loader(fullname)[0].load_module(fullname))
def get_all(module_path, modname, basecls):
mods = []
finder = ClassFinder(basecls)
for name, module in get_modules(module_path, modname):
classes = finder.get_matching_classes(module)
found = []
for cls in classes:
if cls.__name__ not in found:
found.append(cls.__name__)
mods.append((module.__name__, cls.__name__))
return sorted(mods, key=lambda module: module[0])
def generate_automodules(path, name, basecls):
modules = get_all(path, name, basecls)
contents = []
for mod in modules:
contents.append("* :py:mod:`~{}`".format(mod[0]))
contents.append("")
for mod in modules:
contents.append(".. _{}:\n".format(mod[0].split(".")[-1]))
contents.append(".. automodule:: {}".format(mod[0]))
contents.append(" :members: {}\n".format(mod[1]))
return contents
class AutogenDirective(Directive):
required_arguments = 2
has_content = True
def run(self):
# Raise an error if the directive does not have contents.
self.assert_has_content()
modname = self.arguments[0]
modpath = importlib.import_module(modname).__path__
basecls = getattr(i3pystatus.core.modules, self.arguments[1])
contents = []
for e in self.content:
contents.append(e)
contents.append("")
contents.extend(generate_automodules(modpath, modname, basecls))
node = paragraph()
self.state.nested_parse(StringList(contents), 0, node)
return [node]
def setup(app: sphinx.application.Sphinx):
app.add_directive("autogen", AutogenDirective)
app.connect("autodoc-process-docstring", process_docstring)
app.connect("autodoc-process-signature", process_signature)
|
ieguinoa/tools-iuc | refs/heads/master | deprecated/tools/rglasso/rgToolFactory.py | 25 | # rgToolFactory.py
# see https://bitbucket.org/fubar/galaxytoolfactory/wiki/Home
#
# copyright ross lazarus (ross stop lazarus at gmail stop com) May 2012
#
# all rights reserved
# Licensed under the LGPL
# suggestions for improvement and bug fixes welcome at https://bitbucket.org/fubar/galaxytoolfactory/wiki/Home
#
# july 2014
# added buffered read of sterror after run
#
# august 2013
# found a problem with GS if $TMP or $TEMP missing - now inject /tmp and warn
#
# july 2013
# added ability to combine images and individual log files into html output
# just make sure there's a log file foo.log and it will be output
# together with all images named like "foo_*.pdf
# otherwise old format for html
#
# January 2013
# problem pointed out by Carlos Borroto
# added escaping for <>$ - thought I did that ages ago...
#
# August 11 2012
# changed to use shell=False and cl as a sequence
# This is a Galaxy tool factory for simple scripts in python, R or whatever ails ye.
# It also serves as the wrapper for the new tool.
#
# you paste and run your script
# Only works for simple scripts that read one input from the history.
# Optionally can write one new history dataset,
# and optionally collect any number of outputs into links on an autogenerated HTML page.
# DO NOT install on a public or important site - please.
# installed generated tools are fine if the script is safe.
# They just run normally and their user cannot do anything unusually insecure
# but please, practice safe toolshed.
# Read the fucking code before you install any tool
# especially this one
# After you get the script working on some test data, you can
# optionally generate a toolshed compatible gzip file
# containing your script safely wrapped as an ordinary Galaxy script in your local toolshed for
# safe and largely automated installation in a production Galaxy.
# If you opt for an HTML output, you get all the script outputs arranged
# as a single Html history item - all output files are linked, thumbnails for all the pdfs.
# Ugly but really inexpensive.
#
# Patches appreciated please.
#
#
# long route to June 2012 product
# Behold the awesome power of Galaxy and the toolshed with the tool factory to bind them
# derived from an integrated script model
# called rgBaseScriptWrapper.py
# Note to the unwary:
# This tool allows arbitrary scripting on your Galaxy as the Galaxy user
# There is nothing stopping a malicious user doing whatever they choose
# Extremely dangerous!!
# Totally insecure. So, trusted users only
#
# preferred model is a developer using their throw away workstation instance - ie a private site.
# no real risk. The universe_wsgi.ini admin_users string is checked - only admin users are permitted to run this tool.
#
import math
import optparse
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
import time
progname = os.path.split(sys.argv[0])[1]
myversion = 'V000.2 June 2012'
verbose = False
debug = False
toolFactoryURL = 'https://bitbucket.org/fubar/galaxytoolfactory'
buffsize = 1048576
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
html_escape_table = {
"&": "&",
">": ">",
"<": "<",
"$": "\$"}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c, c) for c in text)
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
class ScriptRunner:
"""class is a wrapper for an arbitrary script
"""
def __init__(self, opts=None):
"""
cleanup inputs, setup some outputs
"""
self.useGM = cmd_exists('gm')
self.useIM = cmd_exists('convert')
self.useGS = cmd_exists('gs')
self.temp_warned = False # we want only one warning if $TMP not set
if opts.output_dir: # simplify for the tool tarball
os.chdir(opts.output_dir)
self.thumbformat = 'png'
self.opts = opts
self.toolname = re.sub('[^a-zA-Z0-9_]+', '', opts.tool_name) # a sanitizer now does this but..
self.toolid = self.toolname
self.myname = sys.argv[0] # get our name because we write ourselves out as a tool later
self.pyfile = self.myname # crude but efficient - the cruft won't hurt much
self.xmlfile = '%s.xml' % self.toolname
s = open(self.opts.script_path, 'r').readlines()
s = [x.rstrip() for x in s] # remove pesky dos line endings if needed
self.script = '\n'.join(s)
fhandle, self.sfile = tempfile.mkstemp(prefix=self.toolname, suffix=".%s" % (opts.interpreter))
tscript = open(self.sfile, 'w') # use self.sfile as script source for Popen
tscript.write(self.script)
tscript.close()
self.indentedScript = '\n'.join([' %s' % x for x in s]) # for restructured text in help
self.escapedScript = '\n'.join([html_escape(x) for x in s])
self.elog = os.path.join(self.opts.output_dir, "%s_error.log" % self.toolname)
self.tlog = os.path.join(self.opts.output_dir, "%s_runner.log" % self.toolname)
if opts.output_dir: # may not want these complexities
art = '%s.%s' % (self.toolname, opts.interpreter)
artpath = os.path.join(self.opts.output_dir, art) # need full path
artifact = open(artpath, 'w') # use self.sfile as script source for Popen
artifact.write(self.script)
artifact.close()
self.html = []
self.cl = (opts.interpreter, self.sfile)
self.outFormats = 'tabular' # TODO make this an option at tool generation time
self.inputFormats = 'tabular' # TODO make this an option at tool generation time
self.test1Input = '%s_test1_input.xls' % self.toolname
self.test1Output = '%s_test1_output.xls' % self.toolname
self.test1HTML = '%s_test1_output.html' % self.toolname
def makeXML(self):
"""
Create a Galaxy xml tool wrapper for the new script as a string to write out
fixme - use templating or something less fugly than this example of what we produce
<tool id="reverse" name="reverse" version="0.01">
<description>a tabular file</description>
<command interpreter="python">
reverse.py --script_path "$runMe" --interpreter "python"
--tool_name "reverse" --input_tab "$input1" --output_tab "$tab_file"
</command>
<inputs>
<param name="input1" type="data" format="tabular" label="Select a suitable input file from your history"/><param name="job_name" type="text" label="Supply a name for the outputs to remind you what they contain" value="reverse"/>
</inputs>
<outputs>
<data format="tabular" name="tab_file" label="${job_name}"/>
</outputs>
<help>
**What it Does**
Reverse the columns in a tabular file
</help>
<configfiles>
<configfile name="runMe">
# reverse order of columns in a tabular file
import sys
inp = sys.argv[1]
outp = sys.argv[2]
i = open(inp,'r')
o = open(outp,'w')
for row in i:
rs = row.rstrip().split('\t')
rs.reverse()
o.write('\t'.join(rs))
o.write('\n')
i.close()
o.close()
</configfile>
</configfiles>
</tool>
"""
newXML = """<tool id="%(toolid)s" name="%(toolname)s" version="%(tool_version)s">
%(tooldesc)s
%(command)s
<inputs>
%(inputs)s
</inputs>
<outputs>
%(outputs)s
</outputs>
<configfiles>
<configfile name="runMe">
%(script)s
</configfile>
</configfiles>
%(tooltests)s
<help>
%(help)s
</help>
</tool>""" # needs a dict with toolname, toolid, interpreter, scriptname, command, inputs as a multi line string ready to write, outputs ditto, help ditto
newCommand = """<command interpreter="python">
%(toolname)s.py --script_path "$runMe" --interpreter "%(interpreter)s"
--tool_name "%(toolname)s" %(command_inputs)s %(command_outputs)s
</command>""" # may NOT be an input or htmlout
tooltestsTabOnly = """<tests><test>
<param name="input1" value="%(test1Input)s" ftype="tabular"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="tabular"/>
</test></tests>"""
tooltestsHTMLOnly = """<tests><test>
<param name="input1" value="%(test1Input)s" ftype="tabular"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="5"/>
</test></tests>"""
tooltestsBoth = """<tests><test>
<param name="input1" value="%(test1Input)s" ftype="tabular"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="tabular" />
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="10"/>
</test></tests>"""
xdict = {}
xdict['tool_version'] = self.opts.tool_version
xdict['test1Input'] = self.test1Input
xdict['test1HTML'] = self.test1HTML
xdict['test1Output'] = self.test1Output
if self.opts.make_HTML and self.opts.output_tab != 'None':
xdict['tooltests'] = tooltestsBoth % xdict
elif self.opts.make_HTML:
xdict['tooltests'] = tooltestsHTMLOnly % xdict
else:
xdict['tooltests'] = tooltestsTabOnly % xdict
xdict['script'] = self.escapedScript
# configfile is least painful way to embed script to avoid external dependencies
# but requires escaping of <, > and $ to avoid Mako parsing
if self.opts.help_text:
xdict['help'] = open(self.opts.help_text, 'r').read()
else:
xdict['help'] = 'Please ask the tool author for help as none was supplied at tool generation'
coda = ['**Script**', 'Pressing execute will run the following code over your input file and generate some outputs in your history::']
coda.append(self.indentedScript)
coda.append('**Attribution** This Galaxy tool was created by %s at %s\nusing the Galaxy Tool Factory.' % (self.opts.user_email, timenow()))
coda.append('See %s for details of that project' % (toolFactoryURL))
coda.append('Please cite: Creating re-usable tools from scripts: The Galaxy Tool Factory. Ross Lazarus; Antony Kaspi; Mark Ziemann; The Galaxy Team. ')
coda.append('Bioinformatics 2012; doi: 10.1093/bioinformatics/bts573')
xdict['help'] = '%s\n%s' % (xdict['help'], '\n'.join(coda))
if self.opts.tool_desc:
xdict['tooldesc'] = '<description>%s</description>' % self.opts.tool_desc
else:
xdict['tooldesc'] = ''
xdict['command_outputs'] = ''
xdict['outputs'] = ''
if self.opts.input_tab != 'None':
xdict['command_inputs'] = '--input_tab "$input1" ' # the space may matter a lot if we append something
xdict['inputs'] = '<param name="input1" type="data" format="%s" label="Select a suitable input file from your history"/> \n' % self.inputFormats
else:
xdict['command_inputs'] = '' # assume no input - eg a random data generator
xdict['inputs'] = ''
xdict['inputs'] += '<param name="job_name" type="text" label="Supply a name for the outputs to remind you what they contain" value="%s"/> \n' % self.toolname
xdict['toolname'] = self.toolname
xdict['toolid'] = self.toolid
xdict['interpreter'] = self.opts.interpreter
xdict['scriptname'] = self.sfile
if self.opts.make_HTML:
xdict['command_outputs'] += ' --output_dir "$html_file.files_path" --output_html "$html_file" --make_HTML "yes" '
xdict['outputs'] += ' <data format="html" name="html_file" label="${job_name}.html"/>\n'
if self.opts.output_tab != 'None':
xdict['command_outputs'] += ' --output_tab "$tab_file"'
xdict['outputs'] += ' <data format="%s" name="tab_file" label="${job_name}"/>\n' % self.outFormats
xdict['command'] = newCommand % xdict
xmls = newXML % xdict
xf = open(self.xmlfile, 'w')
xf.write(xmls)
xf.write('\n')
xf.close()
# ready for the tarball
def makeTooltar(self):
"""
a tool is a gz tarball with eg
/toolname/tool.xml /toolname/tool.py /toolname/test-data/test1_in.foo ...
"""
retval = self.run()
if retval:
print >> sys.stderr, '## Run failed. Cannot build yet. Please fix and retry'
sys.exit(1)
self.makeXML()
tdir = self.toolname
os.mkdir(tdir)
if self.opts.input_tab != 'None': # no reproducible test otherwise? TODO: maybe..
testdir = os.path.join(tdir, 'test-data')
os.mkdir(testdir) # make tests directory
shutil.copyfile(self.opts.input_tab, os.path.join(testdir, self.test1Input))
if self.opts.output_tab != 'None':
shutil.copyfile(self.opts.output_tab, os.path.join(testdir, self.test1Output))
if self.opts.make_HTML:
shutil.copyfile(self.opts.output_html, os.path.join(testdir, self.test1HTML))
if self.opts.output_dir:
shutil.copyfile(self.tlog, os.path.join(testdir, 'test1_out.log'))
op = '%s.py' % self.toolname # new name
outpiname = os.path.join(tdir, op) # path for the tool tarball
pyin = os.path.basename(self.pyfile) # our name - we rewrite ourselves (TM)
notes = ['# %s - a self annotated version of %s generated by running %s\n' % (op, pyin, pyin), ]
notes.append('# to make a new Galaxy tool called %s\n' % self.toolname)
notes.append('# User %s at %s\n' % (self.opts.user_email, timenow()))
pi = open(self.pyfile, 'r').readlines() # our code becomes new tool wrapper (!) - first Galaxy worm
notes += pi
outpi = open(outpiname, 'w')
outpi.write(''.join(notes))
outpi.write('\n')
outpi.close()
stname = os.path.join(tdir, self.sfile)
if not os.path.exists(stname):
shutil.copyfile(self.sfile, stname)
xtname = os.path.join(tdir, self.xmlfile)
if not os.path.exists(xtname):
shutil.copyfile(self.xmlfile, xtname)
tarpath = "%s.gz" % self.toolname
tar = tarfile.open(tarpath, "w:gz")
tar.add(tdir, arcname=self.toolname)
tar.close()
shutil.copyfile(tarpath, self.opts.new_tool)
shutil.rmtree(tdir)
# TODO: replace with optional direct upload to local toolshed?
return retval
def compressPDF(self, inpdf=None, thumbformat='png'):
"""need absolute path to pdf
note that GS gets confoozled if no $TMP or $TEMP
so we set it
"""
assert os.path.isfile(inpdf), "## Input %s supplied to %s compressPDF not found" % (inpdf, self.myName)
our_env = os.environ.copy()
if not (our_env.get('TMP', None) or our_env.get('TEMP', None)):
our_env['TMP'] = '/tmp'
if not self.temp_warned:
print >> sys.stdout, '## WARNING - no $TMP or $TEMP!!! Please fix - using /tmp temporarily'
self.temp_warned = True
hlog = os.path.join(self.opts.output_dir, "compress_%s.txt" % os.path.basename(inpdf))
sto = open(hlog, 'w')
outpdf = '%s_compressed' % inpdf
cl = ["gs", "-sDEVICE=pdfwrite", "-dNOPAUSE", "-dUseCIEColor", "-dBATCH", "-dPDFSETTINGS=/printer", "-sOutputFile=%s" % outpdf, inpdf]
x = subprocess.Popen(cl, stdout=sto, stderr=sto, cwd=self.opts.output_dir, env=our_env)
retval1 = x.wait()
sto.close()
if retval1 == 0:
os.unlink(inpdf)
shutil.move(outpdf, inpdf)
os.unlink(hlog)
else:
x = open(hlog, 'r').readlines()
print >> sys.stdout, x
hlog = os.path.join(self.opts.output_dir, "thumbnail_%s.txt" % os.path.basename(inpdf))
sto = open(hlog, 'w')
outpng = '%s.%s' % (os.path.splitext(inpdf)[0], thumbformat)
if self.useGM:
cl2 = ['gm', 'convert', inpdf, outpng]
else: # assume imagemagick
cl2 = ['convert', inpdf, outpng]
x = subprocess.Popen(cl2, stdout=sto, stderr=sto, cwd=self.opts.output_dir, env=our_env)
retval2 = x.wait()
sto.close()
if retval2 != 0:
x = open(hlog, 'r').readlines()
print >> sys.stdout, x
else:
os.unlink(hlog)
retval = retval1 or retval2
return retval
def getfSize(self, fpath, outpath):
"""
format a nice file size string
"""
size = ''
fp = os.path.join(outpath, fpath)
if os.path.isfile(fp):
size = '0 B'
n = float(os.path.getsize(fp))
if n > 2**20:
size = '%1.1f MB' % (n / 2**20)
elif n > 2**10:
size = '%1.1f KB' % (n / 2**10)
elif n > 0:
size = '%d B' % (int(n))
return size
def makeHtml(self):
""" Create an HTML file content to list all the artifacts found in the output_dir
"""
galhtmlprefix = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="toolFormBody">
"""
galhtmlpostfix = """</div></body></html>\n"""
flist = os.listdir(self.opts.output_dir)
flist = [x for x in flist if x != 'Rplots.pdf']
flist.sort()
html = []
html.append(galhtmlprefix % progname)
html.append('<div class="infomessage">Galaxy Tool "%s" run at %s</div><br/>' % (self.toolname, timenow()))
fhtml = []
if len(flist) > 0:
logfiles = [x for x in flist if x.lower().endswith('.log')] # log file names determine sections
logfiles.sort()
logfiles = [x for x in logfiles if os.path.abspath(x) != os.path.abspath(self.tlog)]
logfiles.append(os.path.abspath(self.tlog)) # make it the last one
pdflist = []
npdf = len([x for x in flist if os.path.splitext(x)[-1].lower() == '.pdf'])
for rownum, fname in enumerate(flist):
dname, e = os.path.splitext(fname)
sfsize = self.getfSize(fname, self.opts.output_dir)
if e.lower() == '.pdf': # compress and make a thumbnail
thumb = '%s.%s' % (dname, self.thumbformat)
pdff = os.path.join(self.opts.output_dir, fname)
retval = self.compressPDF(inpdf=pdff, thumbformat=self.thumbformat)
if retval == 0:
pdflist.append((fname, thumb))
else:
pdflist.append((fname, fname))
if (rownum + 1) % 2 == 0:
fhtml.append('<tr class="odd_row"><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname, fname, sfsize))
else:
fhtml.append('<tr><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname, fname, sfsize))
for logfname in logfiles: # expect at least tlog - if more
if os.path.abspath(logfname) == os.path.abspath(self.tlog): # handled later
sectionname = 'All tool run'
if (len(logfiles) > 1):
sectionname = 'Other'
ourpdfs = pdflist
else:
realname = os.path.basename(logfname)
sectionname = os.path.splitext(realname)[0].split('_')[0] # break in case _ added to log
ourpdfs = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] == sectionname]
pdflist = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] != sectionname] # remove
nacross = 1
npdf = len(ourpdfs)
if npdf > 0:
nacross = math.sqrt(npdf) # int(round(math.log(npdf,2)))
if int(nacross)**2 != npdf:
nacross += 1
nacross = int(nacross)
width = min(400, int(1200 / nacross))
html.append('<div class="toolFormTitle">%s images and outputs</div>' % sectionname)
html.append('(Click on a thumbnail image to download the corresponding original PDF image)<br/>')
ntogo = nacross # counter for table row padding with empty cells
html.append('<div><table class="simple" cellpadding="2" cellspacing="2">\n<tr>')
for i, paths in enumerate(ourpdfs):
fname, thumb = paths
s = """<td><a href="%s"><img src="%s" title="Click to download a PDF of %s" hspace="5" width="%d"
alt="Image called %s"/></a></td>\n""" % (fname, thumb, fname, width, fname)
if (i + 1) % nacross == 0:
s += '</tr>\n'
ntogo = 0
if i < (npdf - 1): # more to come
s += '<tr>'
ntogo = nacross
else:
ntogo -= 1
html.append(s)
if html[-1].strip().endswith('</tr>'):
html.append('</table></div>\n')
else:
if ntogo > 0: # pad
html.append('<td> </td>' * ntogo)
html.append('</tr></table></div>\n')
logt = open(logfname, 'r').readlines()
logtext = [x for x in logt if x.strip() > '']
html.append('<div class="toolFormTitle">%s log output</div>' % sectionname)
if len(logtext) > 1:
html.append('\n<pre>\n')
html += logtext
html.append('\n</pre>\n')
else:
html.append('%s is empty<br/>' % logfname)
if len(fhtml) > 0:
fhtml.insert(0, '<div><table class="colored" cellpadding="3" cellspacing="3"><tr><th>Output File Name (click to view)</th><th>Size</th></tr>\n')
fhtml.append('</table></div><br/>')
html.append('<div class="toolFormTitle">All output files available for downloading</div>\n')
html += fhtml # add all non-pdf files to the end of the display
else:
html.append('<div class="warningmessagelarge">### Error - %s returned no files - please confirm that parameters are sane</div>' % self.opts.interpreter)
html.append(galhtmlpostfix)
htmlf = file(self.opts.output_html, 'w')
htmlf.write('\n'.join(html))
htmlf.write('\n')
htmlf.close()
self.html = html
def run(self):
"""
scripts must be small enough not to fill the pipe!
"""
my_env = os.environ.copy()
if self.opts.output_dir:
ste = open(self.elog, 'w')
sto = open(self.tlog, 'w')
sto.write('## Toolfactory running %s as %s script\n' % (self.toolname, self.opts.interpreter))
sto.flush()
p = subprocess.Popen(self.cl, shell=False, stdout=sto, stderr=ste, cwd=self.opts.output_dir, env=my_env)
retval = p.wait()
sto.close()
ste.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( self.elog, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
else:
p = subprocess.Popen(self.cl, shell=False, env=my_env)
retval = p.wait()
if self.opts.make_HTML:
self.makeHtml()
return retval
def remove_me_runBash(self):
"""
cannot use - for bash so use self.sfile
"""
if self.opts.output_dir:
s = '## Toolfactory generated command line = %s\n' % ' '.join(self.cl)
ste = open(self.elog, 'w')
sto = open(self.tlog, 'w')
sto.write(s)
sto.flush()
p = subprocess.Popen(self.cl, shell=False, stdout=sto, stderr=ste, cwd=self.opts.output_dir)
else:
p = subprocess.Popen(self.cl, shell=False)
retval = p.wait()
if self.opts.output_dir:
sto.close()
ste.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open(self.elog, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
return retval
def main():
"""
This is a Galaxy wrapper. It expects to be called by a special purpose tool.xml as (eg):
<command interpreter="python">rgToolFactory.py --script_path "$scriptPath" --tool_name "foo" --interpreter "Rscript"
</command>
The tool writes a script to a scriptPath using a configfile.
Some things in the script are templates.
The idea here is that depending on how this code is called, it uses the specified interpreter
to run a (hopefully correct) script/template. Optionally generates a clone of itself
which will run that same script/template as a toolshed repository tarball for uploading to a toolshed.
There's now an updated version which allows arbitrary parameters.
And so it goes.
"""
op = optparse.OptionParser()
a = op.add_option
a('--script_path', default=None)
a('--tool_name', default=None)
a('--interpreter', default=None)
a('--output_dir', default=None)
a('--output_html', default=None)
a('--input_tab', default="None")
a('--output_tab', default="None")
a('--user_email', default='Unknown')
a('--bad_user', default=None)
a('--make_Tool', default=None)
a('--make_HTML', default=None)
a('--help_text', default=None)
a('--tool_desc', default=None)
a('--new_tool', default=None)
a('--tool_version', default=None)
opts, args = op.parse_args()
assert not opts.bad_user, 'UNAUTHORISED: %s is NOT authorized to use this tool until Galaxy admin adds %s to admin_users in universe_wsgi.ini' % (opts.bad_user, opts.bad_user)
assert opts.tool_name, '## Tool Factory expects a tool name - eg --tool_name=DESeq'
assert opts.interpreter, '## Tool Factory wrapper expects an interpreter - eg --interpreter=Rscript'
assert os.path.isfile(opts.script_path), '## Tool Factory wrapper expects a script path - eg --script_path=foo.R'
if opts.output_dir:
try:
os.makedirs(opts.output_dir)
except:
pass
r = ScriptRunner(opts)
if opts.make_Tool:
retcode = r.makeTooltar()
else:
retcode = r.run()
os.unlink(r.sfile)
if retcode:
sys.exit(retcode) # indicate failure to job runner
if __name__ == "__main__":
main()
|
suncycheng/intellij-community | refs/heads/master | python/testData/refactoring/changeSignature/duplicateNamesOfStarredParameters.py | 79 | def func(*foo, **bar):
pass |
kk47/C-Cpp | refs/heads/master | deppends/python/requests/packages/charade/codingstatemachine.py | 206 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
|
shybovycha/buck | refs/heads/master | test/com/facebook/buck/cli/bootstrapper/class_loader_test.py | 21 | import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from project_workspace import ProjectWorkspace
class ClassLoaderTest(unittest.TestCase):
def test_should_not_pollute_classpath_when_processor_path_is_set(self):
"""
Tests that annotation processors get their own class path, isolated from Buck's.
There was a bug caused by adding annotation processors and setting the processorpath
for javac. In that case, Buck's version of guava would leak into the classpath of the
annotation processor causing it to fail to run and all heck breaking loose."""
test_data = os.path.join('test', 'com', 'facebook', 'buck', 'cli', 'bootstrapper',
'testdata', 'old_guava')
with ProjectWorkspace(test_data) as workspace:
returncode = workspace.run_buck('build',
'//:example',
)
self.assertEquals(0, returncode)
if __name__ == '__main__':
unittest.main()
|
abhishekgahlot/youtube-dl | refs/heads/master | youtube_dl/extractor/youjizz.py | 148 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class YouJizzIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/[^/#?]+-(?P<id>[0-9]+)\.html(?:$|[?#])'
_TEST = {
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
'md5': '07e15fa469ba384c7693fd246905547c',
'info_dict': {
'id': '2189178',
'ext': 'flv',
"title": "Zeichentrick 1",
"age_limit": 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
age_limit = self._rta_search(webpage)
video_title = self._html_search_regex(
r'<title>\s*(.*)\s*</title>', webpage, 'title')
embed_page_url = self._search_regex(
r'(https?://www.youjizz.com/videos/embed/[0-9]+)',
webpage, 'embed page')
webpage = self._download_webpage(
embed_page_url, video_id, note='downloading embed page')
# Get the video URL
m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P<playlist>.+?)"\);', webpage)
if m_playlist is not None:
playlist_url = m_playlist.group('playlist')
playlist_page = self._download_webpage(playlist_url, video_id,
'Downloading playlist page')
m_levels = list(re.finditer(r'<level bitrate="(\d+?)" file="(.*?)"', playlist_page))
if len(m_levels) == 0:
raise ExtractorError('Unable to extract video url')
videos = [(int(m.group(1)), m.group(2)) for m in m_levels]
(_, video_url) = sorted(videos)[0]
video_url = video_url.replace('%252F', '%2F')
else:
video_url = self._search_regex(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': video_title,
'ext': 'flv',
'format': 'flv',
'player_url': embed_page_url,
'age_limit': age_limit,
}
|