text
stringlengths 29
850k
|
---|
#!/usr/bin/env python3
#from __future__ import division
import PIL
import sys
import os
import argparse
import numpy as np
def print_fqr_format():
print('''fqr file format:
*...xxx***x*x**x
xx****xxxx*..***
'x' or 'X' => black
'.' => white
'*' => unknown
It should be an NxN matrix with only 'x', '.' and '*' characters
Spaces around lines will be erased and empty lines will be ignored
Size must be NxN where N is (4*qr_version+17) meaning 21, 25, 29..., 177
1<=qr_version<=40
''')
class MalformedFQRException(Exception):
def __init__(self, msg):
super(MalformedFQRException, self).__init__(msg)
# calculate mask val at pos i, j with mask k
def get_mask(k):
if k == 0:
return lambda i, j: (i + j) % 2 == 0
if k == 1:
return lambda i, j: i % 2 == 0
if k == 2:
return lambda i, j: j % 3 == 0
if k == 3:
return lambda i, j: (i + j) % 3 == 0
if k == 4:
return lambda i, j: (i // 2 + j // 3) % 2 == 0
if k == 5:
return lambda i, j: (i * j) % 2 + (i * j) % 3 == 0
if k == 6:
return lambda i, j: ((i * j) % 2 + (i * j) % 3) % 2 == 0
if k == 7:
return lambda i, j: ((i * j) % 3 + (i + j) % 2) % 2 == 0
def bin_ar_to_int(bin_ar):
bs = ''.join(bin_ar).replace('x', '1').replace('.', '0')
return int(bs, 2)
class FQR(object):
FINDER_POS = ['LT', 'RT', 'LB', 'LT']
FINDER_POS_PATTERN = np.array([ list(x) for x in [
'xxxxxxx',
'x.....x',
'x.xxx.x',
'x.xxx.x',
'x.xxx.x',
'x.....x',
'xxxxxxx'
]
])
ALIGN_PATTERN = np.array([ list(x) for x in [
'xxxxx',
'x...x',
'x.x.x',
'x...x',
'xxxxx'
]
])
# version, location list
ALIGN_PATTERN_LOC = [
(2, [6, 18]),
(3, [6, 22]),
(4, [6, 26]),
(5, [6, 30]),
(6, [6, 34]),
(7, [6, 22, 38]),
(8, [6, 24, 42]),
(9, [6, 26, 46]),
(10, [6, 28, 50]),
(11, [6, 30, 54]),
(12, [6, 32, 58]),
(13, [6, 34, 62]),
(14, [6, 26, 46, 66]),
(15, [6, 26, 48, 70]),
(16, [6, 26, 50, 74]),
(17, [6, 30, 54, 78]),
(18, [6, 30, 56, 82]),
(19, [6, 30, 58, 86]),
(20, [6, 34, 62, 90]),
(21, [6, 28, 50, 72, 94]),
(22, [6, 26, 50, 74, 98]),
(23, [6, 30, 54, 78, 102]),
(24, [6, 28, 54, 80, 106]),
(25, [6, 32, 58, 84, 110]),
(26, [6, 30, 58, 86, 114]),
(27, [6, 34, 62, 90, 118]),
(28, [6, 26, 50, 74, 98, 122]),
(29, [6, 30, 54, 78, 102, 126]),
(30, [6, 26, 52, 78, 104, 130]),
(31, [6, 30, 56, 82, 108, 134]),
(32, [6, 34, 60, 86, 112, 138]),
(33, [6, 30, 58, 86, 114, 142]),
(34, [6, 34, 62, 90, 118, 146]),
(35, [6, 30, 54, 78, 102, 126]),
(36, [6, 24, 50, 76, 102, 128]),
(37, [6, 28, 54, 80, 106, 132]),
(38, [6, 32, 58, 84, 110, 136]),
(39, [6, 26, 54, 82, 110, 138]),
(40, [6, 30, 58, 86, 114, 142])
]
BLACK = ord('x')
WHITE = ord('.')
UNKNW = ord('*')
# Error Correction Level, mask, format string
FORMATS = [
('L', 0, 'xxx.xxxxx...x..'),
('L', 1, 'xxx..x.xxxx..xx'),
('L', 2, 'xxxxx.xx.x.x.x.'),
('L', 3, 'xxxx...x..xxx.x'),
('L', 4, 'xx..xx...x.xxxx'),
('L', 5, 'xx...xx...xx...'),
('L', 6, 'xx.xx...x.....x'),
('L', 7, 'xx.x..x.xxx.xx.'),
('M', 0, 'x.x.x.....x..x.'),
('M', 1, 'x.x...x..x..x.x'),
('M', 2, 'x.xxxx..xxxxx..'),
('M', 3, 'x.xx.xx.x..x.xx'),
('M', 4, 'x...x.xxxxxx..x'),
('M', 5, 'x......xx..xxx.'),
('M', 6, 'x..xxxxx..x.xxx'),
('M', 7, 'x..x.x.x.x.....'),
('Q', 0, '.xx.x.x.x.xxxxx'),
('Q', 1, '.xx.....xx.x...'),
('Q', 2, '.xxxxxx..xx...x'),
('Q', 3, '.xxx.x......xx.'),
('Q', 4, '.x..x..x.xx.x..'),
('Q', 5, '.x....xx.....xx'),
('Q', 6, '.x.xxx.xx.xx.x.'),
('Q', 7, '.x.x.xxxxx.xx.x'),
('H', 0, '..x.xx.x...x..x'),
('H', 1, '..x..xxx.xxxxx.'),
('H', 2, '..xxx..xxx..xxx'),
('H', 3, '..xx..xxx.x....'),
('H', 4, '....xxx.xx...x.'),
('H', 5, '.....x..x.x.x.x'),
('H', 6, '...xx.x....xx..'),
('H', 7, '...x.....xxx.xx')
]
# bit encryption modes
MODES = {
'0001':'numeric',
'0010':'alphanumeric',
'0100':'byte',
'1000':'kanji',
'0000':'terminator'
}
ALPHANUM = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:'
@staticmethod
def get_char_count_ind_len(mode, version):
mode = 4-mode.find('1') # fix this but too lazy now
# I first wrote as 1 2 3 4 then converted to 0001 strings upper line is a quick fix
if version < 10:
if mode == 1: return 10
if mode == 2: return 9
if mode == 3: return 8
if mode == 4: return 8
if version < 27:
if mode == 1: return 12
if mode == 2: return 11
if mode == 3: return 16
if mode == 4: return 10
if mode == 1: return 14
if mode == 2: return 13
if mode == 3: return 16
if mode == 4: return 12
def __init__(self, path=None):
self.dirty = True
self.N = -1
self.qr = []
# corner position
self.pos_finderp = [] # 0: LT, 1: RT, 2: LB, 3: LT as in FINDER_POS
# align position
self.pos_align = [] # 0,1,... depends on version
if path is not None:
self.load_qr(path)
def get_qr(self):
return self.qr
@staticmethod
def print_qr(qr):
print('\n'+'\n'.join([ ''.join(x) for x in qr])+'\n')
# '*' in mstr will ignored cstr can't have '*'
@staticmethod
def _qstr_match(cstr, mstr):
cstr = ''.join(cstr)
mstr = ''.join(mstr)
for a, b in zip(cstr, mstr):
if a != '*' and a != b:
return False
return True
@staticmethod
def size2version(N):
error = 'Size is invalid must be N = (4*version + 17) and NxN N='+str(N)
N -= 17
if N % 4 != 0:
raise MalformedFQRException(error)
N /= 4
if N < 0 or N > 40:
raise MalformedFQRException('Unknown version: ' + N)
return N
@staticmethod
def version2size(N):
return 4*N+17
# if path is set save image to path
@staticmethod
def save_qr_img(qr, path=None):
dqr = qr[:, :] # copy
dqr[dqr == 'x'] = '0' # turn str array to color array
dqr[dqr == '.'] = '1'
dqr[dqr == '*'] = '2'
dqr = dqr.astype(np.uint32)
dqr[dqr == 0] = 0
dqr[dqr == 1] = 255
dqr[dqr == 2] = 128
from PIL import Image
N = len(dqr)
nqr = np.zeros((N*8, N*8)) # x8 zoom image
for i in range(N*8):
for j in range(N*8):
nqr[i, j] = dqr[i//8, j//8]
if nqr[i, j] == 128:
nqr[i, j] = ((i+j)%2)*255
img = Image.fromarray(np.uint8(nqr))
if path is None:
img.show()
else:
img.save(path)
def load_qr(self, path):
self.dirty = True
with open(path, 'r') as f:
# read non empty lines, erase end of lines
self.qr = np.array([ list( x.strip('|\n').lower() ) for x in f.readlines() if len(x)>1])
self.N = len(self.qr)
self.version = FQR.size2version(self.N)
print("Version:", self.version, "\nSize: {0}x{0}".format(self.N), "\n")
error = ''
for line in self.qr:
print(''.join(line))
if len(line) != self.N:
error = 'Dimensions does not match: line_len, N: '+str(len(line))+', '+str(self.N)
elif any(ch not in 'x.*' for ch in line):
error = 'Not allowed character(s): ' + ', '.join([ch for ch in line if ch not in 'x.*'])
if error != '':
raise MalformedFQRException(error)
self.dirty = False
self.bc_qr = self.qr[:, :] # take a copy for reversing
print('FQR file loaded successfully:', path, '\n')
# TODO: make this accept a percentage of matches i.e there can be * in there
# TODO: add this timing finder as well so as to more accurate results
def find_positioning(self):
s_qr = self.qr[:7, :7]
if np.array_equal(FQR.FINDER_POS_PATTERN, s_qr):
print('Position found: LT')
self.pos_finderp.append(0)
s_qr = self.qr[:7, -7:]
if np.array_equal(FQR.FINDER_POS_PATTERN, s_qr):
print('Position found: RT')
self.pos_finderp.append(1)
s_qr = self.qr[-7:, :7]
if np.array_equal(FQR.FINDER_POS_PATTERN, s_qr):
print('Position found: LB')
self.pos_finderp.append(2)
s_qr = self.qr[-7:, -7:]
if np.array_equal(FQR.FINDER_POS_PATTERN, s_qr):
print('Position found: RB')
self.pos_finderp.append(3)
# get not found corners
miss_finder = [x for x in range(4) if x not in self.pos_finderp]
return miss_finder
# assumes alignment is found
# need to check other format positions currently only RT is checked
def find_format(self):
fstr = ''.join(self.qr[8, -8:])
res = []
for f in FQR.FORMATS:
print(f)
print(fstr)
print(f[2][-len(fstr):])
print()
if self._qstr_match(f[2][-len(fstr):], fstr):
res.append(f)
return res
def fix_rotation(self, align, qr=None):
if qr is None:
qr = self.qr
num_turns = [2, 1, 3, 0]
qr = np.rot90(qr, num_turns[align])
# assumes rotation is already fixed and fixes finder patterns
def fix_position_patterns(self, qr=None):
if qr is None:
qr = self.qr
#fix LT
qr[:7, :7] = FQR.FINDER_POS_PATTERN[:, :]
for i in range(8):
qr[7][i] = qr[i][7] = '.'
# fix RT
qr[:7, -7:] = FQR.FINDER_POS_PATTERN[:, :]
for i in range(8):
qr[7][-i-1] = qr[i][ -8] = '.'
# fix LB
qr[-7:, :7] = FQR.FINDER_POS_PATTERN[:, :]
for i in range(8):
qr[-i-1][7] = qr[-8][i] = '.'
# RB is always empty
def fix_finder_patterns(self, qr=None):
if qr is None:
qr = self.qr
pass
def fix_timing_patterns(self, qr=None):
if qr is None:
qr = self.qr
for i in range(7, len(qr)-7):
p = ('x' if i%2 == 0 else '.')
qr[i][6] = qr[6][i] = p
def fix_format(self, f, qr=None):
if qr is None:
qr = self.qr
fs = np.array(list(f))
print('Fixing format with:', fs)
qr[8, :6] = fs[:6]
qr[8, 7:9] = fs[6:8]
qr[7, 8] = fs[8]
qr[8, -8:] = fs[-8:]
qr[:6, 8] = np.transpose(fs[-6:])[::-1]
qr[-7:, 8] = np.transpose(fs[:7])[::-1]
def fix_alignment_patterns(self, qr=None):
if qr is None:
qr = self.qr
if len(qr) <= 21: # these dont have align patterns
return
locs = None
for l in FQR.ALIGN_PATTERN_LOC:
if self.version == l[0]:
locs = l[1]
break
loc1 = locs[0] # first loc
locN = locs[len(locs)-1] # last loc
for i in locs:
for j in locs:
if i == loc1 and (j == loc1 or j == locN):
continue
elif i == locN and j == loc1:
continue
qr[i-2:i+3, j-2:j+3] = FQR.ALIGN_PATTERN[:, :]
def fix_dark_module(self, qr=None):
if qr is None:
qr = self.qr
qr[4*self.version+9][8] = 'x'
@staticmethod
def get_next_bit(qr):
N = len(qr)
j = N-1
while j > 0:
if j == 6: # skip vertical timing patt.
j -= 1
for i in range(N-1, -1, -1):
yield i, j
yield i, j-1
j -= 2
for i in range(0, N, 1):
yield i, j
yield i, j-1
j -= 2
def try_read(self):
# generate protected area of qr code by mimicing fixes
pr_qr = np.zeros(self.qr.shape, dtype=str)
self.fix_dark_module(pr_qr)
self.fix_dark_module(pr_qr)
self.fix_position_patterns(pr_qr)
self.fix_alignment_patterns(pr_qr)
self.fix_finder_patterns(pr_qr)
self.fix_timing_patterns(pr_qr)
self.fix_format('...............', pr_qr)
# convert string to truth values
is_data = (pr_qr == '')
mask = get_mask(self.format[1])
d = ''
for i, j in FQR.get_next_bit(self.qr):
if not is_data[i][j]:
continue
c = self.qr[i][j]
m = mask(i, j)
if not m:
d += c
elif c == 'x':
d += '.'
else:
d += 'x'
### TODO find a better solution for here sinde data segments are constant
ds = d[:26*8].replace('x', '1').replace('.', '0')
# re arrange d1-d13 and d14-d26
d = ''
for i in range(0, len(ds), 16):
d += ds[i:i+8]
for i in range(8, len(ds), 16):
d += ds[i:i+8]
ds = d
print('Read valid data: ', ds)
LDS = len(ds)
k = 0
res = ''
while k < LDS:
mode = ds[k:k+4]
k += 4
print(k, 'Read: ', ds[:k])
ds = ds[k:]
k = 0
if mode not in FQR.MODES:
raise TypeError('Bits are broken unknown mode: '+mode)
if mode == '0000':
print('Found:', res)
return res
print('Mode:', FQR.MODES[mode])
ind_len = FQR.get_char_count_ind_len(mode, self.version)
char_cnt = bin_ar_to_int(ds[k:k+ind_len])
k += ind_len
print('Ind len:', ind_len)
print('Char count:', char_cnt)
if mode == '0001': # numeric
for t in range(char_cnt):
raise NotImplementedError('will look how to do later')
k += 3
elif mode == '0010': # alphanumeric
for t in range(char_cnt//2):
x = bin_ar_to_int(ds[k:k+11])
x1 = x//45
x2 = x%45
c1 = FQR.ALPHANUM[x1]
res += c1
c2 = FQR.ALPHANUM[x2]
res += c2
print('ch1:', c1, x1)
print('ch2:', c2, x2)
k += 11
if char_cnt % 2 == 1:
x = bin_ar_to_int(ds[k:k+11])
print('ch3:', FQR.ALPHANUM[x], x)
res += FQR.ALPHANUM[x]
k += 11
elif mode == '0100': # byte
for t in range(char_cnt):
x = bin_ar_to_int(ds[k:k+8])
c = chr(x)
res += c
k += 8
print('ch0:', c, x, ds[k-8:k])
elif mode == '1000': # kanji
raise NotImplementedError('will look how to do later (sorry you bumped into one using :)')
def fix_qr(self):
poses = self.find_positioning()
poses = [3]
for p in poses:
print('Trying alignment:', p)
bc_qr = self.qr[:, :]
self.fix_rotation(p)
self.fix_dark_module()
self.fix_position_patterns()
self.fix_alignment_patterns()
self.fix_finder_patterns()
self.fix_timing_patterns()
fmts = self.find_format()
if len(fmts) == 0:
print('no matching format for: ', p)
continue
for f in fmts:
print('Trying format:', f)
fbc_qr = self.qr[:, :]
self.format = f
self.fix_format(self.format[2])
res = self.try_read()
if res is not None:
return res
self.qr = fbc_qr
self.qr = bc_qr
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='FQR file to fix')
parser.add_argument('-g','--gen-qr', action='store', type=int, help='generate empty fqr matrix')
parser.add_argument('--show-format', action='store_true', help='shows fqr matrix format')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
if args.gen_qr:
N = args.gen_qr
if N < 1: N = 1
if N > 40: N = 40
N = N*4+17
qr = [['*' for col in range(N)] for row in range(N)]
qr_str = '\n'.join([''.join(s) for s in qr])+'\n'
if args.file:
with open(args.file, 'w') as f:
f.write(qr_str)
else:
print(qr_str)
sys.exit(0)
if args.show_format:
print_fqr_format()
sys.exit(0)
fqr = FQR(args.file)
res = fqr.fix_qr()
print('Result:', res)
#fqr.print_qr(fqr.get_qr())
FQR.save_qr_img(fqr.get_qr(), args.file+'-fixed.png')
'''
TODO LIST
* for each possible fqr matrix we will try to fix it by
** trying possible missing bits
** give possible results (with filters such as visible ascii)
'''
|
This achievement is for earning 500 hits with henchman that are tied with the "Keikain Onmyoji". There aren't many cards in this category, that said it will be slightly tougher to earn this one. These cards are under the lighter "yellow" category. Not to be mistaken with the gold category which contains all of the gold cards. You can earn these hits across all modes excluding the practice and tutorial modes.
It's advised you do this after completing story mode as well as the challenge mode so you have as many cards as possible. The only two I used for the hits were Ryuji Keikain card and the Yura Keikain card. If you need information on who these characters are, use the wiki below.
There are currently no gaming sessions for the 京都は我々が護る! achievements that you can join - why not register and make a new session? |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" File containing simple spacial fact definition. """
from pssfb_additional import enum
# RCC5
# DR - disconnected
# PO - partially overlapping
# PP - proper part
# PPI - proper part inversed
# EQ - equal
rel_type = enum(DR = "DR", PO = "PO", PP = "PP", PPI = "PPI", EQ = "EQ")
ALL_RELATIONS = set([rel_type.DR, rel_type.PO, rel_type.PP, rel_type.PPI, rel_type.EQ])
class spacial_fact(object):
f_subject = None
f_object = None
f_relation = None
def __init__(self, sub, rel, obj):
"""Constructor."""
self.f_subject = sub
self.f_object = obj
if type(rel) is type(set()):
self.f_relation = set() | rel
else:
self.f_relation = set()
self.f_relation.add(rel)
def __repr__(self):
return str(self.f_subject) + " " + str(self.f_relation) + " " + str(self.f_object)
def get_id(self):
return str(self.f_subject) + " " + str(self.f_relation).replace("', '", ",").replace("set(['", "--").replace("'])", "->") + " " + str(self.f_object)
def compose(self, second_fact):
if str(self.f_object) == str(second_fact.f_subject):
new_rel = set()
for one_fr_rel in self.f_relation:
if new_rel == ALL_RELATIONS:
break;
for one_to_rel in second_fact.f_relation:
new_rel = new_rel | _compose_relations_(one_fr_rel, one_to_rel)
return spacial_fact(self.f_subject, new_rel, second_fact.f_object)
else:
# Tried to compose facts without common part!
return None
def _compose_(prev_rel_set, next_rel_set):
new_rel = set()
for one_fr_rel in prev_rel_set:
if new_rel == ALL_RELATIONS:
break;
for one_to_rel in next_rel_set:
new_rel = new_rel | _compose_relations_(one_fr_rel, one_to_rel)
return new_rel
def _compose_relations_(prev_rel, next_rel):
""" Typical for RCC5. """
if next_rel == rel_type.EQ:
return set([prev_rel])
elif prev_rel == rel_type.EQ:
return set([next_rel])
elif next_rel == rel_type.PPI:
if prev_rel == rel_type.PP:
return ALL_RELATIONS
elif prev_rel == rel_type.PO:
return set([rel_type.DR, rel_type.PO, rel_type.PPI])
elif prev_rel == rel_type.DR:
return set([prev_rel])
else:
return set([next_rel])
elif next_rel == rel_type.PP:
if prev_rel == rel_type.DR:
return set([rel_type.DR, rel_type.PO, rel_type.PP])
elif prev_rel == rel_type.PO:
return set([rel_type.PO, rel_type.PP])
elif prev_rel == rel_type.PPI:
return set([rel_type.PO, rel_type.PP, rel_type.PPI, rel_type.EQ])
else:
return set([next_rel])
elif next_rel == rel_type.PO:
if prev_rel == rel_type.PO:
return ALL_RELATIONS
elif prev_rel == rel_type.PPI:
return set([rel_type.PO, rel_type.PPI])
else:
return set([rel_type.DR, rel_type.PO, rel_type.PP])
else:
if prev_rel == rel_type.DR:
return ALL_RELATIONS
elif prev_rel == rel_type.PP:
return set([next_rel])
else:
return set([rel_type.DR, rel_type.PO, rel_type.PPI])
|
...this is what the Irish author Jonathan Swift already knew. What was important in the 18th century is still important today. You definitely need beans of high quality to make a nice cup of coffee. That’s why we only use Arabica beans of the firm Heimbs (oldest coffee roasting house in Germany) for our coffee specialities.
Beside our hot drinks we offer a big choice of non-alcoholics, draught beer, wheat beer and Franconian wines. |
"""
This module implements the Emocracy game rules as far as score keeping is
concerned. The rest of the game rules are in actions.py
this module needs to get a lot bigger..
"""
from gamelogic.levels import change_score
VOTE_SCORE = 1
USER_VOTE_SCORE = 20
TAG_SCORE = 1
PROPOSE_SCORE = 2
PROPOSE_VOTE_SCORE = 1
ISSUE_VOTE_SCORE = 1
def vote(user, issue, direction , voted_already):
"""Score keeping for voting."""
if not voted_already:
# User only gets poinst if it is the first vote on the issue.
change_score(user , VOTE_SCORE )
if direction in [-1, 1]:
# Proposer only gets points if the issue gets a for or against vote
change_score(issue.user , PROPOSE_VOTE_SCORE )
issue.score += ISSUE_VOTE_SCORE
# Write all changes back to the database.
issue.save()
def vote_user(user, voted_user, direction, voted_already):
"""score keeping for voting on an other user
"""
if not voted_already:
# User only gets points if user is the first vote.
change_score(voted_user, USER_VOTE_SCORE)
change_score(user, USER_VOTE_SCORE)
change_score(voted_user, 0) #check parelement score voted_user
def propose(user):
"""Score keeping for proposing of issues"""
change_score(user, PROPOSE_SCORE)
def tag(user, tag):
pass
def multiply(user, issue):
pass
|
Serving all of Lake County, and McHenry County IL.
Whether its Commercial Electrical Wiring or Home Electric Repair Let BTB be your Electrical Contractor. We will send electricians who take pride in their work.
Vice President & Chief Estimator of BTB. Bill has 31 plus years of electrical expertise. Including estimating and job management on projects ranging from $1,000 to $1.25M. Bill has held the supervising electrical license from the City of Chicago for over 25 yrs. He is well versed in NEC codes as well as village amendments. As a certified trade instructor for over 10 yrs., Bill has helped hundreds of electricians make it through their apprenticeship program. With the array of experience Bill has gathered over the years, working in the field and as an estimator/project manager - he has many ideas and techniques to better manage and execute projects of all sizes.
President & Administrative Service Manager - Tina has over 20 years in a construction office atmosphere. Tina performs accounting and office administrative functions to ensure proficiency in office operations. Tina is experienced with Residential Builder billing practices as well as Commercial AIA Billing, Certified Payroll, Contracts, Insurance, Village licensing, Permits, Bonding Matters & Notary. Tina develops relationships with our clients and their support people to make sure BTB is meeting all licensing, permit, insurance and billing requirements, so projects can progress without any holdup for our customers or their clients.
BTB is a family owned and operated electrical contractor. We are licensed with the City of Chicago and are registered contractors in various Villages and Cities throughout the Chicago area. We work with several national residential home builders on new residential construction projects. We specialize in Commercial Build outs, and lighting design. We have service technicians available for residential and commercial service needs. We offer 24 Hr. Emergency Services at 847-875-8063 for Lake & McHenry County IL.
Copyright © Tina Boeckmann. All rights reserved. |
#! /usr/bin/env python
import math
import numpy
from scipy.stats import fisher_exact as fisher
import re
target = re.compile( '([\+\-])([0-9]+)([ACGTNRMacgtnrm]+)' )
remove_chr = re.compile( '\^.' )
class FisherInfo:
def __init__(self):
self.chr = 0
self.start = 0
self.end = 0
self.ref = ""
self.tumor_bases = {
"A": 0,
"C": 0,
"G": 0,
"T": 0,
"a": 0,
"c": 0,
"g": 0,
"t": 0
}
self.ctrl_bases = {
"A": 0,
"C": 0,
"G": 0,
"T": 0,
"a": 0,
"c": 0,
"g": 0,
"t": 0
}
self.rna_bases = {
"A": 0,
"C": 0,
"G": 0,
"T": 0,
"a": 0,
"c": 0,
"g": 0,
"t": 0
}
self.tumor_quals = {
"A": [],
"C": [],
"G": [],
"T": [],
"a": [],
"c": [],
"g": [],
"t": []
}
self.ctrl_quals = {
"A": [],
"C": [],
"G": [],
"T": [],
"a": [],
"c": [],
"g": [],
"t": []
}
self.rna_quals = {
"A": [],
"C": [],
"G": [],
"T": [],
"a": [],
"c": [],
"g": [],
"t": []
}
def bases_format_process(self, read_bases, qual_list):
deleted = 0
iter = target.finditer( read_bases )
for m in iter:
site = m.start()
type = m.group( 1 )
num = m.group( 2 )
bases = m.group( 3 )[ 0:int( num ) ]
read_bases = read_bases[ 0:site - deleted ] + read_bases[ site + int( num ) + len( num ) + 1 - deleted: ]
deleted += 1 + len( num ) + int( num )
# Remove '^.' and '$'
read_bases = remove_chr.sub( '', read_bases )
read_bases = read_bases.translate( None, '$' )
# Error check
if len( read_bases ) != len( qual_list ):
print >> sys.stderr, ("mpileup data is not good: {0}, {1}".format( read_bases, read_bases ))
return None
# Count mismatch
return read_bases
def set_mpileup_data(self, mp_list):
# Prepare mpileup data
self.chr = mp_list[0]
self.start = mp_list[1]
self.end = mp_list[1]
tumor_bases = self.bases_format_process(mp_list[4], mp_list[5])
for base in tumor_bases:
self.add_tumor_base(base)
for base, qual in zip(tumor_bases, mp_list[5]):
self.add_tumor_quals(base, qual)
if len(mp_list) > 7:
ctrl_bases = self.bases_format_process(mp_list[7], mp_list[8])
for base in ctrl_bases:
self.add_ctrl_base(base)
for base, qual in zip(ctrl_bases, mp_list[8]):
self.add_ctrl_quals(base, qual)
if len(mp_list) > 10:
rna_bases = self.bases_format_process(mp_list[10], mp_list[11])
for base in rna_bases:
self.add_rna_base(base)
for base, qual in zip(rna_bases, mp_list[11]):
self.add_rna_quals(base, qual)
def set_ref(self,ref):
self.ref = ref
def add_base(self,bases,base):
if base in 'ATGCatgc':
bases[base] += 1
def add_tumor_base(self, base):
self.add_base(self.tumor_bases, base)
def add_ctrl_base(self, base):
self.add_base(self.ctrl_bases, base)
def add_rna_base(self, base):
self.add_base(self.rna_bases, base)
def add_quals(self, quals, base, qual):
if base in 'ATGCatgc':
ord_qual = (int(ord(qual))-33)
q = quals[base]
q.append(min(ord_qual,41))
def add_tumor_quals(self, base, qual):
self.add_quals(self.tumor_quals, base, qual)
def add_ctrl_quals(self, base, qual):
self.add_quals(self.ctrl_quals, base, qual)
def add_rna_quals(self, base, qual):
self.add_quals(self.rna_quals, base, qual)
def get_depth(self, bases):
count = 0
for n in "ACGTacgt":
count += bases[n]
return count
def get_tumor_depth(self):
return self.get_depth(self.tumor_bases)
def get_ctrl_depth(self):
return self.get_depth(self.ctrl_bases)
def get_rna_depth(self):
return self.get_depth(self.rna_bases)
def get_depth_plus_strand(self, bases):
count = 0
for n in "ACGT":
count += bases[n]
return count
def get_tumor_depth_plus_strand(self):
return self.get_depth_plus_strand(self.tumor_bases)
def get_ctrl_depth_plus_strand(self):
return self.get_depth_plus_strand(self.ctrl_bases)
def get_rna_depth_plus_strand(self):
return self.get_depth_plus_strand(self.rna_bases)
def get_depth_minus_strand(self, bases):
count = 0
for n in "acgt":
count += bases[n]
return count
def get_tumor_depth_minus_strand(self):
return self.get_depth_minus_strand(self.tumor_bases)
def get_ctrl_depth_minus_strand(self):
return self.get_depth_minus_strand(self.ctrl_bases)
def get_rna_depth_minus_strand(self):
return self.get_depth_minus_strand(self.rna_bases)
def get_tumor_base_total(self, base):
return (self.tumor_bases[base.upper()] + self.tumor_bases[base.lower()])
def get_ctrl_base_total(self, base):
return (self.ctrl_bases[base.upper()] + self.ctrl_bases[base.lower()])
def get_rna_base_total(self, base):
return (self.rna_bases[base.upper()] + self.rna_bases[base.lower()])
def get_tumor_base_plus_strand(self, base):
return (self.tumor_bases[base.upper()])
def get_ctrl_base_plus_strand(self, base):
return (self.ctrl_bases[base.upper()])
def get_rna_base_plus_strand(self, base):
return (self.rna_bases[base.upper()])
def get_tumor_base_minus_strand(self, base):
return (self.tumor_bases[base.lower()])
def get_ctrl_base_minus_strand(self, base):
return (self.ctrl_bases[base.lower()])
def get_rna_base_minus_strand(self, base):
return (self.rna_bases[base.lower()])
def get_misrate(self,mis_base_count,depth):
if mis_base_count == 0:
return float(0)
else:
return (mis_base_count / float(depth))
def get_tumor_misrate(self,base):
return self.get_misrate(self.get_tumor_base_total(base), self.get_tumor_depth())
def get_ctrl_misrate(self,base):
return self.get_misrate(self.get_ctrl_base_total(base), self.get_ctrl_depth())
def get_rna_misrate(self,base):
return self.get_misrate(self.get_rna_base_total(base), self.get_rna_depth())
def get_strand_ratio(self,mis_base_count_plus,mis_base_count_minus):
if (mis_base_count_plus + mis_base_count_minus) == 0:
return float(-1)
elif mis_base_count_plus == 0:
return float(0)
else:
return (mis_base_count_plus / float(mis_base_count_plus + mis_base_count_minus))
def get_tumor_strand_ratio(self,base):
return self.get_strand_ratio(self.get_tumor_base_plus_strand(base), self.get_tumor_base_minus_strand(base))
def get_ctrl_strand_ratio(self, base):
return self.get_strand_ratio(self.get_ctrl_base_plus_strand(base), self.get_ctrl_base_minus_strand(base))
def get_rna_strand_ratio(self, base):
return self.get_strand_ratio(self.get_rna_base_plus_strand(base), self.get_rna_base_minus_strand(base))
def get_fisher_pvalue(self,base):
odds_ratio, fisher_pvalue = fisher(
((int(self.get_tumor_base_total(self.ref)), int(self.get_ctrl_base_total(self.ref))),
(int(self.get_tumor_base_total(base)), int(self.get_ctrl_base_total(base)))),
alternative='two-sided'
)
val = float(0.0)
if fisher_pvalue < 10**(-60):
val = float(60.0)
elif fisher_pvalue > 1.0 - 10**(-10) :
val = float(0.0)
else:
val = -math.log( fisher_pvalue, 10 )
return val
def lod_qual(self, base):
score = float(0)
for qual in self.tumor_quals[base]:
q = float(qual)
p = 10**-(q/10)
score += -math.log(p/(1-p),10)
return score
def get_lod_score(self,base):
return (self.lod_qual(base.upper()) + self.lod_qual(base.lower()))
def get_lod_score_plus_strand(self,base):
return self.lod_qual(base.upper())
def get_lod_score_minus_strand(self,base):
return self.lod_qual(base.lower())
def get_score_median(self,base):
med = 0
if len(self.tumor_quals[base]) != 0 or len(self.tumor_quals[base.lower()]) != 0:
alt_array = self.tumor_quals[base] + self.tumor_quals[base.lower()]
med = numpy.median(alt_array)
return med
|
We're ready to show you why nobody performs better export factoring than First Financial Factoring. Our factoring business is based on the belief that a deep understanding of our clients' industries results in faster cash advances, lower rates and active roles in helping our clients reach their long term goals. While many other small business factoring companies are choosing to specialize in specific industries and business sectors, we've equipped ourselves to respond to everyone by hiring a comprehensive staff of industry insiders.
So whether you work in construction, health care, trucking or in any other market, we can offer alternatives to financing your business that are developed with a complete understanding of your company's needs and priorities. No other funding provider in our industry can match this level of flexibility and expertise.
If you're either having a hard time paying your operating costs due to lengthy billing cycles or you just don't have the cash on hand that you need to expand, you might be considering loans or venture capitalists as a means of funding your company. But there are alternatives to financing your business that don't involve tying up your assets or turning over partial control of your company to someone else. Factoring your accounts payable with us allows you to collect on your invoices immediately through cash advances at affordable rates.
A factoring business is preferable to a bank loan because the rates are far more competitive and because you're not borrowing against capital you haven't earned yet. And compared to venture capitalists, factoring allows you to maintain a steady income while retaining complete control over the direction of your business. |
"""
Utilities for Dox.
"""
from ace import config
import os.path
from os import getcwd, mkdir, remove, walk
import hashlib
def check_init():
"""
Checks if Dox has been properly initialized.
"""
env = config.get_env()
if not env.has_option('Project','project'):
raise ValueError('Project not set. Set project with `ace project set` command.')
if not env.has_section('Dox'):
raise ValueError('Dox not initalized. Initialze Dox with `ace dox init --content-type=<content-type> --body-field=<body-field> --key-field=<key-field>` command.')
def dox_dir():
"""
Gets or creates the .dox directory.
"""
dox_dirpath = os.path.join(getcwd(),'.dox')
if not os.path.exists(dox_dirpath):
mkdir(dox_dirpath)
return dox_dirpath
def is_modified(markdown_file_path):
"""
Tests if the markdown file has been modified.
"""
with open(markdown_file_path,'r') as markdown_file:
hashfile_path = '%s.hash' % os.path.join(dox_dir(),'hashes',os.path.split(markdown_file.name)[1])
if os.path.exists(hashfile_path):
d = hashlib.sha256()
d.update(markdown_file.read())
digest = d.hexdigest()
with open(hashfile_path) as hashfile:
stored_hash = hashfile.read()
if stored_hash != digest:
return True # non-matching hashes - file is modified
else:
return False # hashes match - file has not been modified
else:
return True # no stored hash - file is modified by definition
def write_hash(markdown_file_path):
"""
Scans the file and records a hash digest of the contents.
"""
with open(markdown_file_path) as markdown_file:
d = hashlib.sha256()
d.update(markdown_file.read())
digest = d.hexdigest()
hash_file_path = '%s.hash' % os.path.join(dox_dir(),'hashes',os.path.split(markdown_file.name)[1])
with open(hash_file_path,'wb') as hash_file:
hash_file.write(digest)
def clean_hashes():
"""
Cleans the local file hash directory out.
"""
hash_path = os.path.join(dox_dir(),'hashes')
if os.path.exists(hash_path):
for root, dirs, files in walk(hash_path):
for name in files:
if name.endswith('.hash'):
remove(os.path.join(root,name))
else:
mkdir(hash_path)
def get_keyfields():
"""
Gets the keyfields data.
"""
dirpath = dox_dir()
keyfield_path = os.path.join(dirpath,'keyfields.json')
if os.path.exists(keyfield_path):
with open(keyfield_path,'r') as keyfield_file:
keyfield_data = json.loads(keyfield_file.read())
return keyfield_data
else:
return {}
def write_keyfields(data):
"""
Writes the keyfield data file.
"""
dirpath = dox_dir()
keyfield_path = os.path.join(dirpath,'keyfields.json')
with open(keyfield_path,'wb') as keyfield_file:
keyfield_file.write(json.dumps(data))
def get_keymap():
"""
Gets the keymap data.
"""
dirpath = dox_dir()
keymap_path = os.path.join(dirpath,'keymap.json')
if os.path.exists(keymap_path):
with open(keymap_path,'r') as keymap_file:
keymap_data = json.loads(keymap_file.read())
return keymap_data
else:
return {}
def write_keymap(data):
"""
Saves the keymap data.
"""
dirpath = dox_dir()
keymap_path = os.path.join(dirpath,'keymap.json')
with open(keymap_path,'wb') as keymap_file:
keymap_file.write(json.dumps(data))
|
Super Craft Bros Server, 100 arenas, Over 300 players online. Featuring 9 classes all customized with custom skills. Featuring Seth Blings original arenas. Coin shop for player perks.
Hello Guys in this video i am going to show you cracked supercraftbros server it is my first video and it is probably going to be pretty bad in the video i a.
If you want to play hunger games and spleef, I know servers for that. But I am looking for the same thing you are looking for.. Find the best Minecraft servers with our multiplayer server list. Browse detailed information on each server and vote for your favourite.. Our game server hipexel is and will be allowed on our server at any time. Make sure your minecraft client software is updated too 1.8.8 preferably to play. . |
#coding=utf-8
#
# Code to test the system.
#
import unittest
import urlparse
import os
from webtest import TestApp
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
from google.appengine.api import mail_stub
from google.appengine.api import user_service_stub
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.taskqueue import taskqueue_stub
from debianmeeting import application
APP_ID = u'debianmeeting'
AUTH_DOMAIN = 'gmail.com'
LOGGED_IN_ADMIN = 'test2@example.com'
LOGGED_IN_USER = 'test3@example.com'
TITLE = 'test1'
PREWORK = 'test4'
USER_PREWORK = 'test4'
USER_REALNAME = 'Mr Test9'
CAPACITY = 123456789
class SystemTest(unittest.TestCase):
def setUp(self):
"""set up stub
"""
# API proxy
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
# have a dummy datastore
stub = datastore_file_stub.DatastoreFileStub(
APP_ID,
'/dev/null',
'/dev/null')
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', stub)
os.environ['APPLICATION_ID'] = APP_ID
# user authentication
apiproxy_stub_map.apiproxy.RegisterStub(
'user', user_service_stub.UserServiceStub())
os.environ['AUTH_DOMAIN'] = AUTH_DOMAIN
os.environ['USER_EMAIL'] = LOGGED_IN_ADMIN
# I don't know why this is needed but there's a warning from taskqueue.
os.environ['HTTP_HOST'] = 'localhost:8080'
# mail
apiproxy_stub_map.apiproxy.RegisterStub(
'mail', mail_stub.MailServiceStub())
# memcache
apiproxy_stub_map.apiproxy.RegisterStub(
'memcache', memcache_stub.MemcacheServiceStub())
# taskqueue
apiproxy_stub_map.apiproxy.RegisterStub(
'taskqueue', taskqueue_stub.TaskQueueServiceStub())
self.taskqueue_stub = apiproxy_stub_map.apiproxy.GetStub( 'taskqueue' )
self.taskqueue_stub._root_path = os.path.dirname(__file__)
# ==============================================================
# Utility functions
# ==============================================================
def login(self, username):
"""change login account"""
os.environ['USER_EMAIL'] = username
def createPageCommitHelper(self, app, capacity=CAPACITY):
"""
Creates an event.
@return eventid
"""
response = app.post('/eventadmin/register', {
'eventid': 'na',
'title': TITLE,
'prework': PREWORK,
'capacity': capacity,
})
self.assertEqual('302 Moved Temporarily', response.status)
self.assertTrue('/thanks?eventid=' in response.location)
eventid = response.location.split('=')[1]
return eventid
def verifyThanksPage(self, app, eventid):
"""verify that the Thanks Page content is okay."""
response = app.get('/thanks?eventid=%s' % eventid)
self.assertEqual('200 OK', response.status)
self.assertTrue(eventid in response)
def userEventEntryFormSimple(self, app, eventid, new_entry):
response = app.get('/event', {
'eventid': eventid,
'ui': 'simple',
})
self.assertEqual('200 OK', response.status)
self.assertTrue('<!-- simple_ui -->' in response)
self.assertEqual(not new_entry, '<!-- not new entry -->' in response)
return response
def userEventEntryForm(self, app, eventid, new_entry):
"""Show the page user is prompted with before registration to an event.
"""
response = app.get('/event', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertTrue('<!-- non_simple_ui -->' in response)
self.assertEqual(not new_entry, '<!-- not new entry -->' in response)
return response
def checkUserEventEntryFormReturnValue(
self, app, eventid, remaining_seats, response):
"""Check remaining seats value for event entry form."""
self.assertTrue(str(remaining_seats) in response)
def userEventEntry(self, app, eventid, capacity=CAPACITY,
user_realname=USER_REALNAME):
"""Register user to event.
Check that state changes before and after the event.
"""
# check entry page has right number of remaining seats in the
# two possible UIs.
self.checkUserEventEntryFormReturnValue(
app, eventid, capacity,
self.userEventEntryFormSimple(app, eventid, True))
self.checkUserEventEntryFormReturnValue(
app, eventid, capacity,
self.userEventEntryForm(app, eventid, True))
response = app.post('/eventregister', {
'eventid': eventid,
'user_prework': USER_PREWORK,
'user_attend': 'attend',
'user_enkai_attend': 'enkai_attend',
'user_realname': user_realname,
})
self.assertEqual('302 Moved Temporarily', response.status)
self.assertTrue('/thanks?eventid=%s' % eventid
in response.location)
self.verifyThanksPage(app, eventid)
# check entry page has right number of remaining seats
self.checkUserEventEntryFormReturnValue(
app, eventid, capacity - 1,
self.userEventEntryFormSimple(app, eventid, False))
self.checkUserEventEntryFormReturnValue(
app, eventid, capacity - 1,
self.userEventEntryForm(app, eventid, False))
def createEnquete(self, app, eventid, question_text = '''question 1
question 2
question 3'''):
"""Create an enquete. Should be ran as the admin."""
response = app.get('/enquete/edit', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
response = app.post('/enquete/editdone', {
'eventid': eventid,
'overall_message': 'hello',
'question_text': question_text,
})
self.assertEqual('200 OK', response.status)
# make sure the next time to edit will show the content the
# next time.
response = app.get('/enquete/edit', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertTrue(question_text in response)
# ==============================================================
# Tests
# ==============================================================
def testTopPage(self):
"""test displaying of the top page."""
app = TestApp(application)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertTrue('Debian勉強会予約管理システム' in response)
def testCreatePage(self):
app = TestApp(application)
response = app.get('/newevent')
self.assertEqual('200 OK', response.status)
self.assertTrue('幹事用イベント管理ページ' in response)
def testCreatePageCommit(self):
app = TestApp(application)
eventid = self.createPageCommitHelper(app)
# basic sanity checking of the event ID value.
self.assertEqual(len(eventid), 40)
def testListKnownAdminEvents(self):
"""Check admin dashboard if the newly created event can be seen.
"""
app = TestApp(application)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertFalse(TITLE in response)
# generate event data
self.createPageCommitHelper(app)
# check the event is viewable.
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertTrue(TITLE in response)
def testThanksPageFailCase(self):
"""test that Thanks page will fail when wrong eventid is requested."""
app = TestApp(application)
# try to get some incorrect eventid
eventid = 'zzz'
response = app.get('/thanks?eventid=%s' % eventid, status=404)
self.assertTrue(eventid in response)
def testUserRegisterEvent(self):
"""Test user registration workflow.
"""
# generate event data first
app = TestApp(application)
eventid = self.createPageCommitHelper(app)
# check user does not see the event yet
self.login(LOGGED_IN_USER)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertFalse(TITLE in response)
# check user sees the event after registering
self.userEventEntry(app, eventid)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertTrue(TITLE in response)
def testUserRegisterEventFull(self):
"""Test user registration failure workflow.
"""
# generate event data first
app = TestApp(application)
# generate a event with capacity of 1
eventid = self.createPageCommitHelper(app, capacity=1)
# check user does not see the event yet
self.login(LOGGED_IN_USER)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertFalse(TITLE in response)
# check user sees the event after registering
self.userEventEntry(app, eventid, capacity=1)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertTrue(TITLE in response)
# check adding a different user to the event
self.login(LOGGED_IN_ADMIN)
response = app.post('/eventregister', {
'eventid': eventid,
'user_prework': USER_PREWORK,
'user_attend': 'attend',
'user_enkai_attend': 'enkai_attend',
'user_realname': USER_REALNAME,
}, status=404)
self.assertTrue('you cannot reserve a place' in response)
def testAdminReviewEvent(self):
"""Verify the event admin summary review flow.
"""
app = TestApp(application)
# register the event
eventid = self.createPageCommitHelper(app)
# user joins the event
self.login(LOGGED_IN_USER)
self.userEventEntry(app, eventid)
self.login(LOGGED_IN_ADMIN)
response = app.get('/eventadmin/summary', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertTrue(LOGGED_IN_USER in response)
self.assertTrue(USER_PREWORK in response)
def testLatexEnqueteEscape(self):
app = TestApp(application)
eventid = self.createPageCommitHelper(app)
# user joins the event
self.login(LOGGED_IN_USER)
self.userEventEntry(app, eventid,
user_realname='man_with_underscore')
# be the admin and create the enquete.
self.login(LOGGED_IN_ADMIN)
response = app.get('/eventadmin/preworklatex', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertTrue('man\_{}with\_{}underscore' in response.body)
def testEnqueteCreate(self):
"""Test Enquete creation flow.
"""
# generate event data first
app = TestApp(application)
eventid = self.createPageCommitHelper(app)
# user joins the event
self.login(LOGGED_IN_USER)
self.userEventEntry(app, eventid)
# does not see enquete request because there is no enquete yet.
response = app.get('/', {
'eventid': eventid,
})
self.assertFalse('アンケートに回答する' in response)
# be the admin and create the enquete.
self.login(LOGGED_IN_ADMIN)
self.createEnquete(app, eventid)
# admin sends out the enquete mail.
response = app.get('/enquete/sendmail', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
# user responds to enquete
# user sees top page with enquete requirement.
self.login(LOGGED_IN_USER)
response = app.get('/', {
'eventid': eventid,
})
self.assertTrue('アンケートに回答する' in response)
# user responds to enquete
response = app.get('/enquete/respond', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertTrue('question 1' in response)
self.assertTrue('question 2' in response)
self.assertTrue('question 3' in response)
response = app.post('/enquete/responddone', {
'eventid': eventid,
'question0': 0,
'question1': 5,
'question2': 4,
'overall_comment': 'hello world',
})
self.assertEqual('200 OK', response.status)
# user no longer sees top page with enquete requirement
response = app.get('/', {
'eventid': eventid,
})
self.assertFalse('アンケートに回答する' in response)
# admin views the list
self.login(LOGGED_IN_ADMIN)
response = app.get('/enquete/showresult', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertEquals("question 1,question 2,question 3,自由記入\r\nNA,5,4,hello world\r\n", response.body)
# admin views all the results
self.login(LOGGED_IN_ADMIN)
response = app.get('/enquete/showallresults')
self.assertEqual('200 OK', response.status)
if __name__ == '__main__':
unittest.main()
|
Please help me sort out my love life. I am fairly happily married to my high school sweetheart but am missing something emotional here and it’s breaking my heart to not feel a stronger connection to him. A short time ago I met a guy who I also thought was terrific but I think I scared him away. I am not looking to marry the guy (he is already happily married he told me and I don’t want to leave hubby).
Will I be able to connect with my hubby better and will I reconnect with the stranger again sometime… even as friends? Or are they in conflict with each other? Or is there some entirely new arrangement headed my way that hasn’t shown itself yet? Also, I am finally getting into my preferred career (visual art and perhaps animation) and think that will open different doors to me, refresh my outlook.
You are coming through quite vulnerable, which is very dangerous to your marriage. Particularly because your husband isn’t aware of it.
Stay away from the “stranger.” Actually, all strangers, unless you really want to open the door to more confusion and a pretty intense affair. If you go down that road, you will find yourself emotionally torn between two men, and some pretty intense struggling with “right” and “wrong”. It just looks like a whole bunch of heartache that you can avoid, if you choose to.
As for your marriage, it feels like it is coasting along on auto-pilot. It can continue on indefinitely this way, or you can create some chaos and turn it around. Things aren’t going to “just change” — you have to create that change. Unfortunately, the communication between you and your hubby is lacking when it comes to the serious and emotional issues. Table conversation is great, but it is not enough to keep the passion alive.
If you really want to improve the connection with your husband, you are going to have to take a risk by being brutally honest with the man, and equally willing to listen to him. I’m not saying that it is going to be easy, and it seems that things will get a little darker and bit stormy. But only for a while. That’s what I mean by creating some chaos.
It will be a highly charged, emotional situation. Things will get loud, but I’m not seeing any physical confrontation. If that is something that concerns you, even in the least, have a third party around to play mediator. Ultimately, if either of are willing to go, counseling would be the best venue.
Be forewarned, he is not going to react well to a conversation that starts out with the line, “Honey, I’m not happy,” but it needs to be done if you want to alter the course that has currently been set in motion.
What you are unaware of is on many levels he feels the same way. Passion and connections between two people are like there own living entities. These things need attention, nurturing and caring in order to remain healthy and strong. They require effort, and sometimes getting your hands dirty in order to maintain them.
Your marriage does not have to fail, nor do you have to sacrifice the emotional aspects of the relationship in order to keep it together. But the time to work on it, change it, is now. Six months from now will most likely be too late.
While changes and advancement in your career will do wonders for you personally, it is not the key to your personal fulfillment. Yes, it helps, and it does make you feel more accomplished and empowered. But the bottom line is, you can have your career and a fantastic love life with your husband, if you are willing to take the risk and try.
Good luck to you with whatever you decide!
Is your relationship on auto-pilot? Call one of our Love Psychics at 1.800.573.4830 or click here now. |
import re
from datetime import datetime
from ika.models import Account, Channel
from ika.utils import tokenize_modestring
class IRCModeMixin:
modesdef = dict()
def __init__(self):
self.modes = dict()
@property
def modestring(self):
string = '+'
params = list()
for k, v in self.modes.items():
if not isinstance(v, set):
string += k
if v:
params.append(v)
if len(params) > 0:
string += ' ' + ' '.join(params)
return string
@property
def listmodestring(self):
string = '+'
params = list()
for k, v in self.modes.items():
if isinstance(v, set):
for e in v:
string += k
params.append(e)
if len(params) > 0:
string += ' ' + ' '.join(params)
return string
def update_modes(self, *modes):
adds, removes = tokenize_modestring(self.modesdef, *modes)
for k, v in adds.items():
if isinstance(v, set):
s = self.modes.get(k, set())
self.modes[k] = s | v
else:
self.modes[k] = v
for k, v in removes.items():
if isinstance(v, set):
self.modes[k] -= v
if len(self.modes[k]) == 0:
del self.modes[k]
else:
del self.modes[k]
return adds, removes
class IRCUser(IRCModeMixin):
def __init__(self, uid, timestamp, nick, host, dhost, ident, ipaddress, signon, gecos):
super().__init__()
self.uid = uid
self.timestamp = int(timestamp)
self.nick = nick
self.host = host
self.dhost = dhost
self.ident = ident
self.ipaddress = ipaddress
self.signon = int(signon)
self.gecos = gecos
self.opertype = None
self.metadata = dict()
# For backref
self.channels = set()
def __str__(self):
return self.nick
def __repr__(self):
return f'<IRCUser {self.mask}>'
def match_mask(self, mask):
pattern = re.escape(mask)
pattern = pattern.replace('\*', '.+?')
pattern = '^{}$'.format(pattern)
return re.match(pattern, self.mask, re.IGNORECASE) is not None
@property
def mask(self):
return '{}!{}@{}'.format(self.nick, self.ident, self.dhost)
@property
def account(self) -> Account:
name = self.metadata.get('accountname')
return name and Account.get(name)
@property
def connected_at(self):
return datetime.fromtimestamp(self.signon)
@property
def is_operator(self):
return self.opertype == 'NetAdmin'
@property
def is_service(self):
return self.opertype == 'Services'
def update_modes(self, *modes):
adds, removes = super().update_modes(*modes)
if 'o' in removes.keys():
self.opertype = None
class IRCChannel(IRCModeMixin):
umodesdef = dict()
def __init__(self, name, timestamp):
super().__init__()
self.name = name
self.timestamp = int(timestamp)
self.users = dict()
self.usermodes = dict()
self.metadata = dict()
def __str__(self):
return self.name
def __repr__(self):
return f'<IRCChannel {self.name}>'
@property
def umodestring(self):
return ' '.join([f'{"".join(mode)},{uid}' for uid, mode in self.usermodes.items()])
@property
def channel(self):
try:
return Channel.get(self.name)
except UnicodeEncodeError:
# surrogates are not allowed.
return None
def update_modes(self, *modes):
super().update_modes(*modes)
adds, removes = tokenize_modestring(self.umodesdef, *modes)
for mode, v in adds.items():
for uid in v:
self.usermodes.setdefault(uid, set())
self.usermodes[uid].add(mode)
for mode, v in removes.items():
for uid in v:
self.usermodes[uid].remove(mode)
def generate_synchronizing_modestring(self, uid=None, account=None, mask=None):
if account and mask:
raise ValueError('Exactly one of [account, mask] must be set')
if not self.channel:
return ''
to_be_added = list()
to_be_removed = list()
if uid:
usermodes = {uid: self.usermodes[uid]}
else:
usermodes = self.usermodes
for uid, umode in usermodes.items():
user = self.users[uid]
if user.is_service:
continue
if mask and (not user.match_mask(mask)):
continue
if account and (user.account != account):
continue
flags = self.channel.get_flags_by_user(user)
modes = flags.modes
adds = modes - umode
removes = umode - modes
for add in adds:
to_be_added.append((add, uid))
for remove in removes:
to_be_removed.append((remove, uid))
modestring = str()
params = list()
if len(to_be_added) > 0:
modestring += '+'
for mode, uid in to_be_added:
modestring += mode
params.append(uid)
if len(to_be_removed) > 0:
modestring += '-'
for mode, uid in to_be_removed:
modestring += mode
params.append(uid)
if len(params) > 0:
modestring += ' '
modestring += ' '.join(params)
return modestring
|
The Urban collection by Parker is renowned for its distinguished character, reliability and optimal performance qualities. The sleek structure of this feminine rollerball pen is produced from premium brass metal with a bold magenta lacquer finish, which is elegantly contrasted by chrome plated trims. This rollerball is designed to generate only the smoothest of writing experiences and makes a superb writing companion in both personal and professional writing endeavors. The pen can also be personalised with an engraved name or message, and would make a fantastic gift to celebrate a special occasion.
A distinguished rollerball pen with exceptional performance qualities, rendered with a vibrant magenta lacquer finish and shiny chrome plated trims. |
from setuptools import setup, Extension
from codecs import open
from os import path
# from Michael Hoffman's http://www.ebi.ac.uk/~hoffman/software/sunflower/
class NumpyExtension(Extension):
def __init__(self, *args, **kwargs):
from numpy import get_include
from numpy.distutils.misc_util import get_info
kwargs.update(get_info('npymath'))
kwargs['include_dirs'] += [get_include()]
Extension.__init__(self, *args, **kwargs)
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyhacrf',
version='0.1.2',
packages=['pyhacrf'],
install_requires=['numpy>=1.9', 'PyLBFGS>=0.1.3'],
ext_modules=[NumpyExtension('pyhacrf.algorithms',
['pyhacrf/algorithms.c'])],
url='https://github.com/dirko/pyhacrf',
download_url='https://github.com/dirko/pyhacrf/tarball/0.1.2',
license='BSD',
author='Dirko Coetsee',
author_email='dpcoetsee@gmail.com',
description='Hidden alignment conditional random field, a discriminative string edit distance',
long_description=long_description,
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
)
|
Highly recommend! I'd been skeptical about hiring movers for the first time, but it was so worth it! They gave me a great flat rate fee that included no hidden fees or charges, and they took care of everything. They were gentle, wrapped everything nicely, disassembled and reassembled furniture, and were QUICK! They also happily went out of their way to move some items that they weren't anticipating moving, and they did it with no problem. If you want stress-free, try them out. |
#! /usr/bin/env python2
"""
Copyright:
derive_pathway+steps.py Obtain gene list from pathway databases
Copyright (C) 2016 William Brazelton, Alex Hyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import atexit
import argparse
import os
import pycyc
import re
import stat
from subprocess import CalledProcessError, Popen
import sys
import time
__author__ = 'Alex Hyer'
__email__ = 'theonehyer@gmail.com'
__license__ = 'GPLv3'
__maintainer__ = 'Alex Hyer'
__status__ = 'Alpha'
__version__ = '0.0.1a16'
def print_nested_list(lst, level=0):
yield(' ' * (level - 1) + '+---' * (level > 0) + str(lst[0]))
for l in lst[1:]:
if type(l) is list:
for i in print_nested_list(l, level + 1):
yield i
else:
yield(' ' * level + '+---' + str(l))
# This method is literally just the Python 3.5.1 which function from the
# shutil library in order to permit this functionality in Python 2.
# Minor changes to style were made to account for indentation.
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise
# we have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in
pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
def main(args):
"""Run program
Args:
args (NameSpace): ArgParse arguments controlling program flow
"""
def shutdown(pid):
print('>>> Shutdown sequence initiated.')
print('>>> Terminating Pathway Tools LISP Daemon')
pid.terminate()
pid.wait()
print('>>> Daemon destroyed.')
print('>>> Until next time. :)')
print('>>> Hi, I am DPS (Derive Pathway Steps).')
print('>>> I will be analyzing pathways for you today.')
print('>>> I am using the {0} database as per your command.'
.format(args.database))
if args.database == 'metacyc':
# Obtain executable
pathway_tools = which('pathway-tools', path=args.executable)
if pathway_tools is None:
raise EnvironmentError('I cannot find pathway-tools: please '
'specify -e.')
else:
print('>>> I found pathway-tools: {0}.'.format(pathway_tools))
# Start pathway-tools daemon
while True:
print('>>> Summoning Pathway Tools LISP Daemon.')
pid = Popen([pathway_tools, '-lisp', '-api'],
stderr=open(os.devnull, 'w'),
stdout=open(os.devnull, 'w'))
print('>>> Let\'s give it five seconds to spawn.')
time.sleep(5)
if os.path.exists('/tmp/ptools-socket') and \
stat.S_ISSOCK(os.stat('/tmp/ptools-socket').st_mode):
print('>>> The daemon is is up!')
break
else:
print('>>> The daemon took too long to boot. :(')
print('>>> This makes me sad, so I will kill it.')
pid.kill()
print('>>> Let\'s wait five seconds for it to die!')
time.sleep(5)
pid.poll()
if pid.returncode is None:
raise CalledProcessError('Pathway Tools won\'t die!')
else:
print('>>> The daemon is dead!')
print('>>> I miss it. :( I\'m going to try again. :)')
atexit.register(shutdown, pid)
# Connect to daemon
try:
metacyc = pycyc.open('meta')
except IOError:
print('>>> I cannot connect to Pathway Tools Daemon.')
print('>>> Here is the original error message:')
raise
else:
print('>>> I have connected to the Pathway Tools Daemon.')
print('>>> Phenomenal cosmic powers! Itty bitty memory footprint!')
# Index genes file
print('>>> Indexing {0}.'.format(args.reactions_file.name))
reactions_to_genes = {}
start_time = time.time()
for line in args.reactions_file:
parts = line.strip().split()
reactions_to_genes[parts[0]] = (parts[1], parts[2:])
end_time = time.time()
print('>>> I indexed {0} reactions in {1} seconds.'
.format(str(len(reactions_to_genes)),
str(end_time - start_time)))
print('>>> I\'m so fast.')
# Index all pathways by name
print('>>> Time to index all the pathways from Metacyc.')
pathways = {}
start_time = time.time()
for frame in metacyc.all_pathways():
pathways[frame.common_name] = frame
end_time = time.time()
print('>>> I indexed {0} pathways in {1} seconds.'
.format(str(len(pathways)), str(end_time - start_time)))
print('>>> Aren\'t you proud of me?')
# Index gene abundance
print('>>> Recording gene abundances from {0}.'
.format(args.abundance_file.name))
abundances = {}
start_time = time.time()
for line in args.abundance_file:
gene, abundance = line.strip().split('\t')
abundances[gene] = abundance
end_time = time.time()
print('>>> I indexed {0} gene abundances in {1} seconds.'
.format(str(len(abundances)), str(end_time - start_time)))
# Obtain pathway of interest
print('>>> Time to do some science!')
print('>>> Note: you can input all or part of a pathway name.')
print('>>> Type "q" for input at any time to exit the program.')
while True: # Rest of program runs in a loop until user ends it
possibilities = {}
user_input = raw_input('>>> Enter a pathway: ')
if user_input.lower() == 'q':
break
for name, frame in pathways.items():
if user_input in name:
possibilities[name] = frame
if len(possibilities) == 0:
print('>>> I couldn\'t find any pathways matching your '
'request.')
print('>>> Try an alternative name for the pathway.')
continue
print('>>> I found {0} pathways matching your request.'
.format(str(len(possibilities))))
shutdown = False
restart = False
pathway = None
while True:
print('>>> Here are possible pathways:')
max_entry = len(possibilities) - 1
for possibility in enumerate(possibilities.items()):
print('{0}: {1}'.format(str(possibility[0]),
possibility[1][1].common_name))
path_num = raw_input('>>> Select a pathway ("r" to restart): ')
if path_num.lower() == 'q':
shutdown = True
break
elif path_num.lower() == 'r':
restart = True
break
else:
try:
path_num = int(path_num)
except ValueError:
print('>>> Your answer is not an integer.')
print('>>> I only understand integers.')
print('>>> Please correct.')
continue
if path_num > max_entry or path_num < 0:
print('>>> {0} is not a valid pathway.'
.format(str(path_num)))
print('>>> Valid pathways are: {0}.'.format(' '.join(
[str(i) for i in range(max_entry + 1)])))
print('>>> Try again.')
continue
pathway = possibilities[possibilities.keys()[path_num]]
print('>>> You selected: {0}.'.format(pathway.common_name))
print('>>> Neat! I\'ll analyze it now.')
break
if restart is True:
continue
if shutdown is True:
break
# Add genes and abundances to pathway reactions
print('>>> Collecting reactions in pathway.')
try:
if type(pathway.reaction_list) is list:
rxns = [str(rxn) for rxn in pathway.reaction_list]
else:
rxns = [str(pathway.reaction_list)]
except KeyError:
print('>>> I cannot access the reactions for this pathway. :(')
print('>>> I\'m sorry I\'ve failed you. :(')
print('>>> Please have me analyze something else.')
continue
print('>>> Analyzing pathway for key reactions.')
if hasattr(pathway, 'key_reactions') is True and\
pathway.key_reactions is not None:
key_rxns = [str(key) for key in pathway.key_reactions]
for rxn in enumerate(rxns):
if rxn[1] in key_rxns:
rxns[rxn[0]] = rxn[1] + '*'
print('>>> Acquiring gene families for each reaction from {0}.'
.format(args.reactions_file.name))
reactions = {}
for rxn in rxns:
rxn_name = re.sub('\*$', '', rxn)
if rxn_name in reactions_to_genes.keys():
ec, uniref_list = reactions_to_genes[rxn_name]
rxn_name = rxn + ' (' + ec + ')'
reactions[rxn_name] = {}
for uniref in uniref_list:
reactions[rxn_name][uniref] = 0.0
print('>>> Adding abundances from {0}.'
.format(args.abundance_file.name))
for rxn in reactions.keys():
for gene in reactions[rxn]:
if gene in abundances.keys():
reactions[rxn][gene] = abundances[gene]
print('>>> Removing unused gene families.')
for rxn in reactions.keys():
for uniref in reactions[rxn].keys():
if reactions[rxn][uniref] == 0.0:
del reactions[rxn][uniref]
for rxn in reactions.keys():
if reactions[rxn] == {}:
reactions[rxn] = 'None\tN/A'
continue
# Format reactions for printing
rxn_list = [pathway.common_name]
for rxn in reactions.keys():
if reactions[rxn] == 'None\tN/A':
temp = [rxn, ['None\tN/A']]
rxn_list.append(temp)
elif type(reactions[rxn]) is dict:
temp = [rxn]
for uniref in reactions[rxn].keys():
temp.append('{0}\t{1}'.format(uniref,
str(reactions[rxn][uniref])))
rxn_list.append(temp)
# Print output
print('>>> I\'ve finished analyzing everything!')
print('>>> Here it is (asterisks represent key reactions):')
rxn_print = [rxn for rxn in print_nested_list(rxn_list)]
for rxn in rxn_print:
print(rxn)
# Save output
print('>>> What file would you like me to save this to?')
print('>>> Type "n" if you don\'t want to save this output.')
while True:
out_file = raw_input('>>> File: ')
if out_file.lower() != 'n' and out_file.lower() != 'q':
try:
with open(out_file, 'w') as out_handle:
for rxn in rxn_print:
out_handle.write(rxn + os.linesep)
print('>>> Output written to {0}.'.format(out_file))
break
except IOError as error:
print('>>> I could not write to {0}.'.format(out_file))
print('>>> Original error:')
print(error)
print('>>> Let\'s try again (enter "n" to skip).')
elif out_file.lower() == 'q':
shutdown = True
break
else:
break
if shutdown is True:
break
print('>>> All done!')
print('>>> Let\'s do more science (enter "q" to exit program)!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.
RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(title='Database',
dest='database')
metacyc = subparsers.add_parser('metacyc',
help='Analyze MetaCyc Database')
metacyc.add_argument('abundance_file',
metavar='Abundance File',
type=argparse.FileType('r'),
help='TSV containing gene ID and abundance columns')
metacyc.add_argument('reactions_file',
metavar='Reactions File',
type=argparse.FileType('r'),
help='metacyc1 file mapping Unirefs to reactions')
metacyc.add_argument('-e', '--executable',
default=None,
type=str,
help='pathways-tree executable if not in PATH')
args = parser.parse_args()
main(args)
sys.exit(0)
|
Singing is my greatest joy in life and I’ve been making music since early childhood. I hold a BA with a music major and emphasis on voice, and classes in music education, choir conducting. My teaching experience includes work with adults and young people. The ukulele is a wonderful instrument to use as an accompaniment to singing. I believe there can never be too many songs to hold in our hearts! My workshops start very simply, building on success, in a happy, relaxed environment. We will sing and play many songs, in each session! |
# -*- encoding: utf-8 -*-
#
# This script unpacks the payload from Winnti Group samples using their custom
# packer. For details, see:
# https://www.welivesecurity.com/wp-content/uploads/2019/10/ESET_Winnti.pdf
#
# For feedback or questions contact us at: github@eset.com
# https://github.com/eset/malware-research/
#
# Author:
# Marc-Etienne M.Léveillé <leveille@eset.com>
#
# This code is provided to the community under the two-clause BSD license as
# follows:
#
# Copyright (C) 2019 ESET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import struct
from Crypto.Cipher import ARC4
import hashlib
import json
# Those are the last functions of the shellcode returning the address of the
# payload header (minus 3)
GET_PAYLOAD_FUNCTIONS = [
# 32-bit
"558BECE800000000585DC3".decode('hex'),
# 64-bit
"E80000000058C3".decode('hex')
]
def get_payload_indexes(s):
r = []
for p in GET_PAYLOAD_FUNCTIONS:
i = s.find(p)
while i >= 0:
r.append(i + len(p))
i = s.find(p, i + 1)
r.sort()
return r
for path in sys.argv[1:]:
with open(path, 'rb') as f:
file_content = f.read()
for offset_to_bin in get_payload_indexes(file_content):
rc4_key, \
added_code_size, \
rc4_key_size, \
filename_size, \
filename_wide_size, \
pe_size, \
launch_type = \
struct.unpack("16s" + "I" * 6, file_content[offset_to_bin:][:40])
if launch_type not in (1, 2):
sys.stderr.write(
"Possibly invalid header (launch_type = {:d}) at {:s}:{:d}\n".format(
launch_type, path, offset_to_bin
)
)
rc4_key = ''.join([ chr(ord(c) ^ 0x37) for c in rc4_key[:rc4_key_size] ])
i = offset_to_bin + 40
filename = ARC4.new(rc4_key).decrypt(file_content[i:][:filename_size])[:-1]
i += filename_size
filename_wide = ARC4.new(rc4_key).decrypt(file_content[i:][:filename_wide_size])
filename_wide = filename_wide.decode('utf-16')[:-1]
i += filename_wide_size
pe = ARC4.new(rc4_key).decrypt(file_content[i:][:pe_size])
if pe[:2] == 'MZ':
payload_sha1 = hashlib.sha1(pe).hexdigest()
desc = {
"parent_sha1": hashlib.sha1(file_content).hexdigest(),
"rc4_key": rc4_key,
"filename": filename,
"filename_w": filename_wide,
"launch_type": launch_type,
"payload_sha1": payload_sha1,
}
if os.path.exists(payload_sha1):
sys.stderr.write("File {:s} already exists, skipping\n".format(
payload_sha1
))
else:
with file(payload_sha1, "wb") as o:
o.write(pe)
json.dump(desc, sys.stdout)
sys.stdout.write("\n")
else:
sys.stderr.write(
"Payload not decrypted sucessfully at {:s}:{:d}\n".format(
path, offset_to_bin
)
)
|
So you want to make your own Cross Stitch patterns. Good! We need more creative people coming up with all sorts of neat designs! It’s fun, rewarding, and there’s nothing quite like the feeling of finishing up a design made entirely by you, or seeing other people stitch your art!
I’m only really familiar with PCStitch, so that’s what I’ll be explaining. But WinStitch/MacStitch and STOIK are also great programs with similar functions, so don’t be afraid to try those out as well!
I’m not going to be super thorough on each of these features, I’m just going over the general steps to consider when creating a design. Additionally, this is only my process and it is not the only way to do things. Please do experiment and find the process that works for you! But as I get tons of questions on my process, I figured it was time to write it down.
Ideally, you are turning your own unique artwork to turn into a pattern. But also it’s fun to get game sprites or existing images and stitch them as well. So no judgment here. For video game sprites, I’d recommend going to The Spriters Resource, looking up your favorite game and skipping ahead to the ‘Importing into PCStitch’ section.
If you just happen to find cool pixel art online (or any art), I would recommend asking the artist if they’re okay with you stitching it before going through this process. It’s simply polite, and usually they’re more than excited that you even want to.
Oh. Well that’s… kind of boring. I don’t remember picking blobby purple masses in game. Let’s use the inventory icon instead.
There we go. That’s what my brain associates with the kingsblood plant. Let’s draw that.
If you already have Photoshop, or some other editing program, that’s good too. Look up tutorials online for doing pixel art in your program of choice, and go to town.
I kept to a 28×28 pixel work area so I could fit it into a pendant later. I roughly sketched it up first (left), and then slowly filled it in and added shading and all that. I used the muted colors from the ‘in game’ image, while keeping the general shape of the icon.
This isn’t an art tutorial, so I won’t show you “The rest of the f*ng owl“, but I am showing you my general process to get an idea of what all goes into pattern design. We haven’t even gotten to PCStitch and I’ve already spent an hour or two on this thing.
You can also draw directly into PCStitch, Winstitch, etc. Though it’s not as time saving as you would think, honestly. But this whole process should be about experimenting and doing what works best for YOU. Plus, pixel art programs can’t account for backstitch, so if you’re doing backstitch heavy designs, it may be best to just draw straight into a cross stitch program.
Pick out your file, click the ‘show preview’ button and… oh. Uh. What’s that blurry mess?
So the most common mistake is not adjusting for size. Whether you’re importing a photo or a small sprite, you want to define how big the end pattern is going to be. In this case, stretching a 28×28 pixel art up to 100×100 makes it look… well, awful. So for pixel art you’ll want to use the same size as the original design.
For photos, you just want to make it whatever size you’re willing to stitch it as.
Well, now you’ve got your image at the right size. And are thoroughly regretting choosing anything with purple in it. For now, simply make sure you’ve also selected your brand of thread from the Floss tab (in this case, DMC Stranded Cotton).
50 colors is more than enough for this little guy, but if you’re importing a full color photo or artwork, you may want to turn that up even if you’re not planning to keep all 500 colors; we’ll work on reducing colors later.
Now that it’s in here, the first thing you want to do for most sprites is remove the background color. Since my image was transparent, PCStitch automatically filled it all with black. This is not great if your sprite already has black in it, so keep that in mind and make the background of your image a unique color so it’s easy to remove. Just right-click and delete.
After that, one of the most time consuming parts of making designs is picking out floss colors. As you’ve seen, some colors look fine (The lighter greens), and others look awful. (the purples). You’ll probably want to bring out your box of floss and look at some of these colors in person too, as cross stitch programs are notorious for not displaying some colors quite right. Just start going through and picking any colors that look wrong.
Click the color you want to change down in the floss palette.
Click the color you like in the ‘Available colors’ menu.
Click the Replace button to replace all stitches of that color.
Since I designed this with few colors in mind, I don’t have a lot to reduce for this example. However, when importing game sprites I often see colors that only have 1 or two stitches. Usually, anything less than 15 I’m going to investigate and see if it’s necessary. Sometimes it is, like two stitches for tiny eyes. Other times, I already have a very similar color next to it and can combine the two.
This makes it real easy to see where those stitches are so you can replace them with nearby colors as needed. I also like to go through each and every color while in this highlight mode and see if there’s any stray stitches off by themselves. Even if there’s a lot of stitches of a color, no one wants to re-thread a needle for just a stitch or two.
Once you’re done combining and reducing colors right click any color, go to palette tools and ‘clear unused entries.’ to get rid of any colors in your palette that have 0 stitches.
One of my least favorite things about buying patterns online is discovering that some of the symbols are far too similar to each other. So you want to go through and make sure your pattern is legible. You can change how your pattern is displayed right up in the menu.
I like to keep to simple symbols and make sure they’re not too similar to each other. If I have a design with a ton of colors and it’s harder to pick unique symbols, at least make sure any that are too similar to each other are at least not even remotely a similar color or in the same area of a pattern. That way it should be obvious to the stitcher if they’ve started stitching with the wrong color. Always feel free to call me out on it if symbols in my patterns are hard to distinguish.
We’re almost there! Time to export the design.
I like to include both a full color “Blocks & Symbols” version as well as a Black&White “Symbols” version.
You might also choose to have a ‘Virtual Sitches’ version without the gridlines to show more or less what your design will look like once stitched.
Arguably one of the most important parts of designing is actually stitching your patterns. Whether you do it, or end up getting someone else to test it, I encourage you to actually stitch it if you’re planning to sell your pattern.
You’ll find that along the way a color doesn’t look quite the same in person, or looks different when placed next to another color. Or maybe the symbols were more similar than they looked in the program. Or you missed that one stray pixel off to the right that you’re too lazy to even stitch yourself.
This is when a design truly stands out. When it’s been stitched and approved by a real life human.
When I test stitched this one I ended up changing out two of the greens entirely, and made the border around it a dark grey rather than the 939 I had originally chosen. I also cleaned up a few stray stitches that didn’t quite seem to belong once they were stitched.
This was tent stitched over 1 on 28 count fabric, and on fabric I ended up not liking because the holes weren’t well defined and it was harder to keep stitches even. Usually my tent stitching has much better coverage (IMO), but I still think it turned out pretty good!
Total time from finding image references to finishing stitching was about ~4-5 hours.
Sirithre wrote this tutorial. If you have additional questions about using PCStitch, feel free to ask her directly or ask in the Pixel Stitch Discord. |
# -*- coding: utf-8 -*-
#
# A NNTP Binary File Representation
#
# Copyright (C) 2015-2016 Chris Caron <lead2gold@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
from newsreap.NNTPContent import NNTPContent
from newsreap.Utils import bytes_to_strsize
class NNTPBinaryContent(NNTPContent):
"""
A Binary file representation
"""
def __init__(self, filepath=None, part=None, total_parts=None,
begin=None, end=None, total_size=None,
work_dir=None, sort_no=10000, *args, **kwargs):
""" Intitialize NNTPBinaryContent
"""
super(NNTPBinaryContent, self).__init__(
filepath=filepath,
part=part, total_parts=total_parts,
begin=begin, end=end, total_size=total_size,
work_dir=work_dir,
sort_no=sort_no, *args, **kwargs)
def __repr__(self):
"""
Return a printable version of the file being read
"""
if self.part is not None:
return '<NNTPBinaryContent sort=%d filename="%s" part=%d/%d len=%s />' % (
self.sort_no,
self.filename,
self.part,
self.total_parts,
bytes_to_strsize(len(self)),
)
else:
return '<NNTPBinaryContent sort=%d filename="%s" len=%s />' % (
self.sort_no,
self.filename,
bytes_to_strsize(len(self)),
)
|
Brock & Kiraz English bark.
How to cite this entry? Sebastian P. Brock & George A. Kiraz, Gorgias Concise Syriac-English, English-Syriac Dictionary (Piscataway, NJ: Gorgias Press, 2015) ܩܠܦܠܦܐ [from sedra.bethmardutho.org, accessed on Apr. 21, 2019]. |
# import the necessary packages
from sklearn import svm
from sklearn.calibration import CalibratedClassifierCV
# from sklearn.cross_validation import train_test_split
# resolvendo problemas de compatibilidade
from sklearn.model_selection import train_test_split
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2
import os
data_path = "DBIM/alldb"
model_pxl = CalibratedClassifierCV(svm.LinearSVC())
model_hst = CalibratedClassifierCV(svm.LinearSVC())
def image_to_feature_vector(image, size=(32, 32)):
# resize the image to a fixed size, then flatten the image into
# a list of raw pixel intensities
return cv2.resize(image, size).flatten()
def extract_color_histogram(image, bins=(8, 8, 8)):
# extract a 3D color histogram from the HSV color space using
# the supplied number of `bins` per channel
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([hsv], [0, 1, 2], None, bins,
[0, 180, 0, 256, 0, 256])
# handle normalizing the histogram if we are using OpenCV 2.4.X
if imutils.is_cv2():
hist = cv2.normalize(hist)
# otherwise, perform "in place" normalization in OpenCV 3 (I
# personally hate the way this is done
else:
cv2.normalize(hist, hist)
# return the flattened histogram as the feature vector
return hist.flatten()
def initializate(data_p = "DBIM/alldb"):
data_path = dbp
model_pxl = CalibratedClassifierCV(svm.LinearSVC())
model_hst = CalibratedClassifierCV(svm.LinearSVC())
def fit(info=False):
# grab the list of images that we'll be describing
if(info):
print("[INFO] describing images...")
imagePaths = list(paths.list_images(data_path))
# initialize the raw pixel intensities matrix, the features matrix,
# and labels list
rawImages = []
features = []
labels = []
# loop over the input images
for (i, imagePath) in enumerate(imagePaths):
# load the image and extract the class label (assuming that our
# path as the format: /path/to/dataset/{class}/{image_num}.jpg
image = cv2.imread(imagePath)
label = imagePath.split(os.path.sep)[2]
# extract raw pixel intensity "features", followed by a color
# histogram to characterize the color distribution of the pixels
# in the image
pixels = image_to_feature_vector(image)
hist = extract_color_histogram(image)
# update the raw images, features, and labels matricies,
# respectively
rawImages.append(pixels)
features.append(hist)
labels.append(label)
# show an update every 1,000 images
if i > 0 and i % 1000 == 0 and info:
print("[INFO] processed {}/{}".format(i, len(imagePaths)))
# show some information on the memory consumed by the raw images
# matrix and features matrix
rawImages = np.array(rawImages)
features = np.array(features)
labels = np.array(labels)
if(info):
print("[INFO] pixels matrix: {:.2f}MB".format(
rawImages.nbytes / (1024 * 1000.0)))
print("[INFO] features matrix: {:.2f}MB".format(
features.nbytes / (1024 * 1000.0)))
(trainRI, testRI, trainRL, testRL) = train_test_split(
rawImages, labels, test_size=0, random_state=42)
(trainFeat, testFeat, trainLabels, testLabels) = train_test_split(
features, labels, test_size=0, random_state=42)
model_pxl.fit(trainRI, trainRL)
model_hst.fit(trainFeat, trainLabels)
def get_predict_proba(model, input):
prob = model.predict_proba(input)
label = model.predict(input)[0]
return {'label':label, '0':prob[0][0] ,'1':prob[0][1], '2': prob[0][2] }
def print_proba(ret, full=False):
if(full):
print("SVM")
print("\n PIXEL")
print("Probability:")
print("label 0: " + str(ret['pxl']['0']) )
print("label 1: " + str(ret['pxl']['1']))
print("label 2: " + str(ret['pxl']['2']))
print("image label:" + str(ret['pxl']['label']))
print("")
print("\n HISTOGRAM")
print("Probability:")
print("label 0: " + str(ret['hst']['0']) )
print("label 1: " + str(ret['hst']['1']))
print("label 2: " + str(ret['hst']['2']))
print("image label:" + str(ret['hst']['label']))
print("")
else:
print("SVM\n")
print("Label: " + str(ret['pxl']['label']) +
" prob:" + str(ret['pxl'][str(ret['pxl']['label'])]))
print("Label: " + str(ret['hst']['label']) +
" prob:" + str(ret['hst'][str(ret['hst']['label'])]))
def classify(img_path, imshow=False):
img = cv2.imread(img_path)
if(imshow):
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
pxl = image_to_feature_vector(np.array(img)).reshape(1,-1)
hst = extract_color_histogram(np.array(img)).reshape(1,-1)
pxl_c = get_predict_proba(model_pxl, pxl)
hst_c = get_predict_proba(model_hst, hst)
return {'pxl':pxl_c, 'hst':hst_c } |
Stanton's focus is to set new standards of innovation and design in uniquely decorative carpets.
The corporation's self-branded predominantly wool product line features high quality, designer-driven patterns and styles available through select flooring dealers throughout the country.
Wiltons and the Karaman collection, all wool decorative patterns in a lush cut pile broadloom with runner coordinates. |
import boto.sqs
import logging
import time
import boto
import json
import numpy as np
import tempfile
import hashlib
from collections import defaultdict
from boto.sqs.message import Message
from boto.s3.key import Key
import base64
import datetime
from boto.dynamodb2.exceptions import ConditionalCheckFailedException
import os.path
from datadirac.data import NetworkInfo
from masterdirac.models.aggregator import ( TruthGPUDiracModel,
RunGPUDiracModel, DataForDisplay )
import masterdirac.models.run as run_mdl
import random
import pandas
import re
class TruthException(Exception):
pass
class FileCorruption(Exception):
pass
class DirtyRunException(Exception):
pass
class InvalidMask(Exception):
#the given mask doesnt parse
pass
MASK_PATTERN_MATCH = r'([\[\(]\d+,\d+[\)\]])'
MASK_PATTERN_PARSE = r'[\[\(](\d+),(\d+)[\)\]]'
class ResultSet(object):
"""
Abstract BaseClass
For representing and manipulating a result
"""
def __init__(self, instructions ):
self.logger = logging.getLogger(__name__)
self._result_bucket = None
self._alleles = None
#print instructions
self._instructions = instructions
self.run_id = instructions['run_id']
self._file_id = instructions['file_id']
self.result_files = instructions['result_files']
self.sample_allele = instructions['sample_allele']
self.sample_names = instructions['sample_names']
self.shuffle = instructions['shuffle']
self.strain = instructions['strain']
self.num_networks = instructions['num_networks']
self._data = None
self._classified = None
self._truth = None
self._compare_mat = None
@property
def nsamp(self):
return len(self.sample_names)
@property
def file_id(self):
return self._file_id
@property
def nnets(self):
return self.num_networks
@property
def alleles(self):
if not self._alleles:
self._alleles = self.result_files.keys()
self._alleles.sort()
return self._alleles
@property
def data(self):
if self._data is None:
stacked = []
for allele in self.alleles:
stacked.append(self._get_data(allele))
self._data = np.array(stacked)
return self._data
@property
def classified(self):
if self._classified is None:
self._classified = np.argmax( self.data, axis=0 )
return self._classified
@property
def truth(self):
if self._truth is None:
classes = []
for a in self.alleles:
classes.append(set([sn for _, sn in self.sample_allele[a]]))
def clsfy(classes, s):
for i,_set in enumerate(classes):
if s in _set:
return i
t_list = [clsfy(classes, sname) for sname in self.sample_names]
self._truth = np.array(t_list)
return self._truth
def get_run_id(self):
return self.run_id
def get_strain(self):
return self.strain
def get_result_files(self):
return self.result_files
def archive_package(self):
return (self.file_id, self._instructions, self.data)
@property
def compare_mat(self):
if self._compare_mat is None:
truth_mat = np.tile(self.truth, (self.nnets, 1))
#T.D. could slice here or compute once and check with map
self._compare_mat = (truth_mat == self.classified)
return self._compare_mat
class S3ResultSet(ResultSet):
def __init__(self, instructions, from_gpu_bucket_name ):
"""
instructions dictified run model
"""
super(S3ResultSet,self).__init__(instructions)
self._s3_from_gpu= from_gpu_bucket_name
self._from_gpu_bucket = None
@property
def from_gpu_bucket(self):
"""
Returns the S3 bucket object that contains the gpu generated
results
"""
attempts = 0
while not self._from_gpu_bucket:
attempts += 1
try:
conn = boto.connect_s3()
self._from_gpu_bucket = conn.get_bucket(self._s3_from_gpu)
except:
if attempts > 5:
raise
msg = "Could not connect to %s. Trying again. "
msg = msg % self._s3_from_gpu
self.logger.exception( msg )
time.sleep(2 + (random.random() * attempts))
return self._from_gpu_bucket
def _get_data(self, allele ):
"""
Returns the given alleles rms matrix
"""
complete = False
count = 0
while not complete:
try:
with tempfile.SpooledTemporaryFile() as temp:
key = self.from_gpu_bucket.get_key( self.result_files[allele] )
key.get_contents_to_file( temp )
temp.seek(0)
buffered_matrix = np.load( temp )
complete = True
except Exception as e:
print e
#print "error on get[%r], trying again" % self.result_files[allele]
count += 1
if count > 1:
raise FileCorruption('Error on File [%s] [%r]' % (allele,
self.result_files[allele] ) )
pass
return buffered_matrix[:self.nnets, :self.nsamp]
class LocalResultSet(ResultSet):
def __init__(self, instructions, data_obj ):
super(LocalResultSet, self).__init__( instructions )
self._data = data_obj
self.local_path = local_path
class Masked(object):
"""
Receives a resultset object and a mask (start, end) i.e. (5,10)
Returns the accuracy for all networks over that range as a numpy
array
"""
def __init__(self, result_set, mask):
self._result_set = result_set
self._mask = mask
@property
def mask(self):
return self._mask
@property
def run_id(self):
return self._result_set.run_id
@property
def result_set(self):
return self._result_set
@property
def accuracy(self):
"""
Returns a vector representing the accuracy of a each network
given this age range and ordering
"""
rs = self.result_set
mask_map = self._select_range()
accuracy = rs.compare_mat[:,mask_map].sum(axis=1)/float(len(mask_map))
return accuracy
def _select_range(self):
"""
Returns the set of samples within the afformentioned age range.
(numpy vector containing their indices)
T.D. could only compute once
"""
rs = self.result_set
start = float(self.mask[0]) - .0001
end = float(self.mask[1]) + .0001
samp = set([])
for _, sl in rs.sample_allele.iteritems():
samp |= set([ sample_name for age, sample_name in sl if start <= age < end ])
return np.array([i for i,s in enumerate(rs.sample_names) if s in samp])
class ResultSetArchive(object):
def __init__( self,run_id, num_result_sets=100):
self.logger = logging.getLogger(__name__)
self._run_id = run_id
self._num = num_result_sets
self._rs_ctr = 0 # a single archive count
self._arch_ctr = 0 # the total count for this resultset archive
self._instructions = {}
self._data = {}
self._sent = {}
self._file_name = hashlib.md5()
self._arch_name = hashlib.md5()
self._truth = False
@property
def run_id(self):
return self._run_id
def add_result_set( self, result_set):
(file_id, inst, data) = result_set.archive_package()
self._instructions[file_id] = inst
self._data[file_id] = data
self._file_name.update( file_id )
self._rs_ctr += 1
self._arch_ctr += 1
if not result_set.shuffle:
self._truth = True
if self._rs_ctr >= self._num:
self.write()
def write(self):
self._write_instructions()
self._write_data()
self._sent[self.file_hash] = self._instructions.keys()
self._arch_name.update( self.file_hash )
self._instructions = {}
self._data = {}
self._rs_ctr = 0
self._file_name = hashlib.md5()
@property
def file_hash(self):
return self._file_name.hexdigest()
@property
def archive_hash(self):
return self._arch_name.hexdigest()
@property
def sent(self):
return self._sent
class S3ResultSetArchive(ResultSetArchive):
def __init__(self,run_id, bucket_name, path=None, num_result_sets=100 ):
super(S3ResultSetArchive,self).__init__(run_id, num_result_sets)
self._bucket_name = bucket_name
self._bucket = None
self._path = path
def _write_data(self):
with tempfile.SpooledTemporaryFile() as temp:
json.dump( self._instructions, temp)
temp.seek(0)
key = Key(self.bucket)
if self._path:
key.key = '%s/%s.json' % ( self._path, self.file_hash)
else:
key.key = '%s.json' % self.file_hash
key.set_contents_from_file( temp )
def _write_instructions(self):
with tempfile.SpooledTemporaryFile() as temp:
np.savez(temp, **self._data)
temp.seek(0)
key = Key(self.bucket)
if self._path:
key.key = '%s/%s.npz' % ( self._path, self.file_hash)
else:
key.key = '%s.npz' % self.file_hash
key.set_contents_from_file( temp )
@property
def bucket(self):
attempts = 0
while not self._bucket:
attempts += 1
try:
conn = boto.connect_s3()
self._bucket = conn.get_bucket(self._bucket_name)
except:
if attempts > 5:
raise
msg = "Could not connect to %s. Trying again. "
msg = msg % self._bucket_name
self.logger.exception( msg )
time.sleep(2 + (random.random() * attempts))
return self._bucket
def close_archive(self):
if self._rs_ctr > 0:
self.write()
with tempfile.SpooledTemporaryFile() as temp:
json.dump( self._sent, temp)
temp.seek(0)
key = Key(self.bucket)
if self._path:
key.key = '%s/%s.manifest.json' % ( self._path, self.archive_hash)
else:
key.key = '%s.manifest.json' % self.archive_hash
key.set_contents_from_file( temp )
run_mdl.insert_ANRunArchive( self.run_id, self.archive_hash, self._arch_ctr,
bucket = self._bucket_name,
archive_manifest = '%s.manifest.json' % self.archive_hash,
path = self._path, truth = self._truth)
if __name__ == "__main__":
sqs = boto.connect_sqs()
d2a = sqs.create_queue( 'from-data-to-agg-b6-canonical-q92-bak' )
archive = S3ResultSetArchive('this-is-a-test-run-id', 'an-scratch-bucket',
path="S3ResultSetArchiveTest3", num_result_sets=9 )
ctr = 0
for i in range(2):
messages = d2a.get_messages(10)
for message in messages:
ctr += 1
instructions = json.loads( message.get_body() )
rs = S3ResultSet(instructions, 'an-from-gpu-to-agg-b6-canonical-q92')
"""
print "rs.nsamp"
print rs.nsamp
print "rs.file_id"
print rs.file_id
print "rs.nnets"
print rs.nnets
print "rs.alleles"
print rs.alleles
print "rs.data"
print rs.data
print "rs.classified"
print rs.classified
print "rs.truth"
print rs.truth
print "rs.get_run_id()"
print rs.get_run_id()
print "rs.get_strain()"
print rs.get_strain()
print "rs.get_result_files()"
print rs.get_result_files()
print "rs.archive_package()"
print rs.archive_package()
for m in ["[0,100)", "[10,20)", "[13,17)", "[0,100)"]:
mrs = Masked( rs, m)
print "Mask id"
print mrs.mask_id
print "mrs.mask"
print mrs.mask
print "Masked accuracy"
print mrs.accuracy()
"""
archive.add_result_set( rs )
print ctr
print archive.sent
archive.close_archive()
|
Stay Safe: There are steep cliffs at this park. Please do not climb over the chain link fence. Watch children closely. Fences are provided only near the parking area and lookout. Hike at your own risk.
The park conserves forests of ponderosa pine at the northern limit of its range, and diverse low elevation lakes and marshes. The uplands, marshes, and lakes are rich ecosystems supporting abundant wildlife.
A spectacular display of colour illustrates the park’s rich geology in the Chasm Creek Valley and part of the Bonaparte River Valley. Successive lava flows form layers in varying tones of red, brown, yellow and purple, which have been revealed in the steep lava-layered canyon walls through erosion over the past 10 million years.
At the end of the last ice age, 10,000 years ago, water from the melting glaciers carried so much silt that it carved the 8 km long, 600-metre wide and 300-metre deep Chasm. An esker (ridge of gravel) formed by the glacier stretches 40 km upstream, northwest from the head of the Chasm.
Chasm Provincial Park protects the unique river canyon of the Chasm Creek Valley and part of the Bonaparte River Valley. In 1995, the park was recommended for expansion through the Cariboo Chilcotin Land-Use Plan. It was enlarged from 141 hectares to 3067 hectares to protect more of the area’s colourful geological formations and ponderosa pine forests. The unique features of Chasm Provincial Park offer hiking opportunities and spectacular backdrops for the avid photographer.
Facilities include a pull out viewing area and a larger parking area with a toilet.
The park is located along Chasm Creek. It can be accessed by taking Highway 97 to 16 km north of Clinton, and then driving 4 km to the park on a paved road east of Highway 97. It can also be accessed from further north off Highway 97 about 15 km southwest of 70 Mile House. Please refer to the Cariboo Forest Region Recreation Map (East) published by the Ministry of Forests for more information. Topographical map number 1:50,000 92P/3 shows land contours in detail. The closest communities, towns and cities are 70 Mile House, 100 Mile House and Clinton.
History: This park was established in 1940 to protect the Painted Chasm. In 1995, the park was recommended for expansion through the Cariboo Chilcotin Land-Use Plan. It was enlarged from 141 hectares to 3067 hectares to protect more of the area’s colourful geological formations and ponderosa pine forests. At the end of the last ice age, 10,000 years ago, water from the melting glaciers carried so much silt that it carved the 8 km long, 600 m wide and 300 m deep Chasm. An esker (ridge of gravel) formed by the glacier stretches 40 km upstream, northwest from the head of the Chasm. Layers of volcanic lava can be distinguished in the steep canyon walls.
Conservation: Chasm Provincial Park protects a lava-layered canyon formed by glacial melt water erosion. The park also conserves forests of ponderosa pine at the northern limit of its range, and diverse low elevation lakes and marshes. The uplands, marshes, and lakes are rich ecosystems supporting abundant wildlife.
Wildlife: Bighorn sheep inhabit the steep wall of the canyon. Moose, mule deer, black bear, coyote, small mammals, songbirds and birds of prey inhabit this area.
Cycling is permitted. Bicycles must keep to roadways. Bicycle helmets are mandatory in British Columbia.
This park has informal hiking trails (generally following old roads – there are no signs to mark the routes). An old road, which leads along the southern edge of the chasm, offers occasional spectacular views and a very pleasant experience of dry pine and fir forest. There are steep cliffs at this park. Watch children closely. Fences are provided only near the parking area and lookout. Hike at your own risk.
There are horseback riding opportunities in this park.
There is a viewpoint. The view is of a spectacular display of colour which illustrates the park’s rich geology in the Chasm Creek Valley and part of the Bonaparte River Valley. Successive lava flows form layers in varying tones of red, brown, yellow and purple, which have been revealed in the steep lava-layered canyon walls through erosion over the past 10 million years.
There is a viewpoint and parking area for day-use activities, but no developed picnic area. The view is a spectacular display of colour illustrating the park’s rich geology in the Chasm Creek Valley and part of the Bonaparte River Valley. Successive lava flows form layers in varying tones of red, brown, yellow and purple, which have been revealed in the steep lava-layered canyon walls through erosion over the past 10 million years. |
from ecell4.reaction_reader.decorator2 import species_attributes, reaction_rules
from ecell4.reaction_reader.network import generate_reactions
@species_attributes
def attributegen():
R(r1,r2,r=(r1,r2)) | R0
L(l1,l2,l=(l1,l2)) | L0
@reaction_rules
def rulegen():
# Ligand addition
R(r) + L(_1,_2,l=[_1,_2]) == R(r^1).L(_1^1,_2,l=[_1,_2]) | (kp1, km1)
# R(r) + L(l1,l2) == R(r^1).L(l1^1,l2) | (kp1, km1)
# Chain elongation
R(r) + L(_1,_2^_,l=[_1,_2]) == R(r^1).L(_1^1,_2^_,l=[_1,_2]) | (kp2, km2)
# R(r) + L(l1,l2^_) == R(r^1).L(l1^1,l2^_) | (kp2, km2)
# Ring closure
R(r).L(l) == R(r^1).L(l^1) | (kp3, km3)
if __name__ == "__main__":
newseeds = []
for i, (sp, attr) in enumerate(attributegen()):
print i, sp, attr
newseeds.append(sp)
print ''
rules = rulegen()
for i, rr in enumerate(rules):
print i, rr
print ''
seeds, reactions = generate_reactions(
newseeds, rules, max_stoich={"R": 5, "L": 5})
for i, seed in enumerate(seeds):
print i, seed
# print ''
# for i, reaction in enumerate(reactions):
# print i, reaction
# setOption("SpeciesLabel","HNauty")
# begin model
# begin parameters
# kp1 1
# km1 1
# kp2 1
# km2 1
# kp3 1
# km3 1
# R0 3e5
# L0 3e5
# end parameters
#
# begin seed species
# R(r,r) R0
# L(l,l) L0
# end seed species
#
# begin reaction rules
# # Ligand addition
# R(r) + L(l,l) <-> R(r!1).L(l!1,l) kp1,km1
#
# # Chain elongation
# R(r) + L(l,l!+) <-> R(r!1).L(l!1,l!+) kp2,km2
#
# # Ring closure
# R(r).L(l) <-> R(r!1).L(l!1) kp3,km3
# end reaction rules
# end model
#
# ## actions ##
# generate_network({overwrite=>1,max_stoich=>{R=>5,L=>5}})
|
A district court has discretion to retroactively adjust a temporary child support obligation. The never-married parents in this case separated, and a temporary child support obligation was established. For the final order, the court set the father’s income at an amount higher than the amount used for the temporary order, which increased the amount of child support. The court declined to modify the order retroactive to the date of the temporary order. The father appealed the income calculation, and the mother cross-appealed on the issue of retroactivity. The appellate court found no abuse of discretion in the court’s determination of the father’s income. The appellate court also found no abuse of discretion in the decision not to retroactively increase the temporary amount. Temporary child support orders are not appealable until the appeal from the final order. The mother argued that she had presented evidence to the court at the time of the temporary hearing that supported the higher income amount. The court’s adoption of the higher income figure for the final ordrder confirmed her position during the temporary hearing and the temporary amount should be modified accordingly. The appellate court found the lower amount was reasonable for the temporary award based on the available evidence at that time. |
import ctypes
_jboolean = ctypes.c_ubyte
_jbyte = ctypes.c_ubyte
_jchar = ctypes.c_short
_jshort = ctypes.c_int16
_jint = ctypes.c_int32
_jlong = ctypes.c_int64
_jfloat = ctypes.c_float
_jdouble = ctypes.c_double
_jsize = _jint
class _jobject_struct(ctypes.Structure):
__fields = []
_jobject = ctypes.POINTER(_jobject_struct)
_jclass = _jobject
_jthrowable = _jobject
_jstring = _jobject
_jarray = _jobject
_jobjectArray = _jarray
_jbooleanArray = _jarray
_jbyteArray = _jarray
_jcharArray = _jarray
_jshortArray = _jarray
_jintArray = _jarray
_jlongArray = _jarray
_jfloatArray = _jarray
_jdoubleArray = _jarray
_jobjectArray = _jarray
_jweak = _jobject
class _jvalue(ctypes.Union):
_fields_ = [
('z', _jboolean),
('b', _jbyte),
('c', _jchar),
('s', _jshort),
('i', _jint),
('j', _jlong),
('f', _jfloat),
('d', _jdouble),
('l', _jobject),
]
class _jmethodID_struct(ctypes.Structure):
_fields_ = []
_jmethodID = ctypes.POINTER(_jmethodID_struct)
class _jfieldID_struct(ctypes.Structure):
_fields_ = []
_jfieldID = ctypes.POINTER(_jfieldID_struct)
class _JNINativeMethod(ctypes.Structure):
_fields = [
('name', ctypes.c_char_p, ),
('signature', ctypes.c_char_p),
('fnPtr', ctypes.c_void_p),
]
class _JavaVMOption(ctypes.Structure):
_fields = [
('optionString', ctypes.c_char_p),
('extraInfo', ctypes.c_void_p),
]
class _JavaVMInitArgs(ctypes.Structure):
_fields = [
('version', _jint),
('nOptions', _jint),
('options', ctypes.POINTER(_JavaVMOption)),
('ignoreUnrecognized', _jboolean)
]
class _JavaVM(ctypes.Structure):
_fields = [
('functions', ctypes.c_void_p),
# really a ctypes.POINTER(_JNIInvokeInterface)
]
class _JNIInvokeInterface(ctypes.Structure):
_fields = [
('reserved0', ctypes.c_void_p),
('reserved1', ctypes.c_void_p),
('reserved2', ctypes.c_void_p),
('DestroyJavaVM',
ctypes.POINTER(ctypes.CFUNCTYPE(
_jint,
ctypes.POINTER(_JavaVM) # JavaVM* vm
))
),
('AttachCurrentThread',
ctypes.POINTER(ctypes.CFUNCTYPE(
_jint,
ctypes.POINTER(_JavaVM), # JavaVM* vm
ctypes.POINTER(ctypes.c_void_p), # void** penv
ctypes.c_void_p, # void* args
))
),
('DetachCurrentThread',
ctypes.POINTER(ctypes.CFUNCTYPE(
_jint,
ctypes.POINTER(_JavaVM), # JavaVM* vm
))
),
('GetEnv',
ctypes.POINTER(ctypes.CFUNCTYPE(
_jint,
ctypes.POINTER(_JavaVM), # JavaVM* vm
ctypes.POINTER(ctypes.c_void_p), # void** penv
_jint, # jint version
))
),
('AttachCurrentThreadAsDaemon',
ctypes.POINTER(ctypes.CFUNCTYPE(
_jint,
ctypes.POINTER(_JavaVM), # JavaVM* vm
ctypes.POINTER(ctypes.c_void_p), # void** penv
ctypes.c_void_p, # void* args
))
),
]
class _JNIEnv(ctypes.Structure):
_fields = [
('functions', ctypes.c_void_p),
# really a ctypes.POINTER(_JNINativeInterface)
]
class _JNINativeInterface(ctypes.Structure):
_fields = [
('reserved0', ctypes.c_void_p),
('reserved1', ctypes.c_void_p),
('reserved2', ctypes.c_void_p),
('reserved3', ctypes.c_void_p),
('foo',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.c_int, # a
ctypes.POINTER(ctypes.c_int), # b
))
),
('GetVersion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
))
),
('DefineClass',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(ctypes.c_char), # name
_jobject, # loader
ctypes.POINTER(_jbyte), # buf
_jsize, # len
))
),
('FindClass',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(ctypes.c_char), # name
))
),
('FromReflectedMethod',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # method
))
),
('FromReflectedField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # field
))
),
('ToReflectedMethod',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # cls
_jmethodID, # methodID
_jboolean, # isStatic
))
),
('GetSuperclass',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # sub
))
),
('IsAssignableFrom',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # sub
_jclass, # sup
))
),
('ToReflectedField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # cls
_jfieldID, # fieldID
_jboolean, # isStatic
))
),
('Throw',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jthrowable, # obj
))
),
('ThrowNew',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(ctypes.c_char), # msg
))
),
('ExceptionOccurred',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
))
),
('ExceptionDescribe',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
))
),
('ExceptionClear',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
))
),
('FatalError',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(ctypes.c_char), # msg
))
),
('PushLocalFrame',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jint, # capacity
))
),
('PopLocalFrame',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # result
))
),
('NewGlobalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # lobj
))
),
('DeleteGlobalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # gref
))
),
('DeleteLocalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
('IsSameObject',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj1
_jobject, # obj2
))
),
('NewLocalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # ref
))
),
('EnsureLocalCapacity',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jint, # capacity
))
),
('AllocObject',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
))
),
# NewObject skipped because of varargs
# NewObjectV skipped because of varargs
('NewObjectA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
('GetObjectClass',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
('IsInstanceOf',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
))
),
('GetMethodID',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(ctypes.c_char), # name
ctypes.POINTER(ctypes.c_char), # sig
))
),
# CallObjectMethod skipped because of varargs
# CallObjectMethodV skipped because of varargs
('CallObjectMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallBooleanMethod skipped because of varargs
# CallBooleanMethodV skipped because of varargs
('CallBooleanMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallByteMethod skipped because of varargs
# CallByteMethodV skipped because of varargs
('CallByteMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallCharMethod skipped because of varargs
# CallCharMethodV skipped because of varargs
('CallCharMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallShortMethod skipped because of varargs
# CallShortMethodV skipped because of varargs
('CallShortMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallIntMethod skipped because of varargs
# CallIntMethodV skipped because of varargs
('CallIntMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallLongMethod skipped because of varargs
# CallLongMethodV skipped because of varargs
('CallLongMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallFloatMethod skipped because of varargs
# CallFloatMethodV skipped because of varargs
('CallFloatMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallDoubleMethod skipped because of varargs
# CallDoubleMethodV skipped because of varargs
('CallDoubleMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallVoidMethod skipped because of varargs
# CallVoidMethodV skipped because of varargs
('CallVoidMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualObjectMethod skipped because of varargs
# CallNonvirtualObjectMethodV skipped because of varargs
('CallNonvirtualObjectMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualBooleanMethod skipped because of varargs
# CallNonvirtualBooleanMethodV skipped because of varargs
('CallNonvirtualBooleanMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualByteMethod skipped because of varargs
# CallNonvirtualByteMethodV skipped because of varargs
('CallNonvirtualByteMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualCharMethod skipped because of varargs
# CallNonvirtualCharMethodV skipped because of varargs
('CallNonvirtualCharMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualShortMethod skipped because of varargs
# CallNonvirtualShortMethodV skipped because of varargs
('CallNonvirtualShortMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualIntMethod skipped because of varargs
# CallNonvirtualIntMethodV skipped because of varargs
('CallNonvirtualIntMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualLongMethod skipped because of varargs
# CallNonvirtualLongMethodV skipped because of varargs
('CallNonvirtualLongMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualFloatMethod skipped because of varargs
# CallNonvirtualFloatMethodV skipped because of varargs
('CallNonvirtualFloatMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualDoubleMethod skipped because of varargs
# CallNonvirtualDoubleMethodV skipped because of varargs
('CallNonvirtualDoubleMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualVoidMethod skipped because of varargs
# CallNonvirtualVoidMethodV skipped because of varargs
('CallNonvirtualVoidMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
('GetFieldID',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(ctypes.c_char), # name
ctypes.POINTER(ctypes.c_char), # sig
))
),
('GetObjectField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetBooleanField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetByteField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetCharField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetShortField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetIntField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetLongField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetFloatField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetDoubleField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('SetObjectField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jobject, # val
))
),
('SetBooleanField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jboolean, # val
))
),
('SetByteField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jbyte, # val
))
),
('SetCharField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jchar, # val
))
),
('SetShortField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jshort, # val
))
),
('SetIntField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jint, # val
))
),
('SetLongField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jlong, # val
))
),
('SetFloatField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jfloat, # val
))
),
('SetDoubleField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jdouble, # val
))
),
('GetStaticMethodID',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(ctypes.c_char), # name
ctypes.POINTER(ctypes.c_char), # sig
))
),
# CallStaticObjectMethod skipped because of varargs
# CallStaticObjectMethodV skipped because of varargs
('CallStaticObjectMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticBooleanMethod skipped because of varargs
# CallStaticBooleanMethodV skipped because of varargs
('CallStaticBooleanMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticByteMethod skipped because of varargs
# CallStaticByteMethodV skipped because of varargs
('CallStaticByteMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticCharMethod skipped because of varargs
# CallStaticCharMethodV skipped because of varargs
('CallStaticCharMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticShortMethod skipped because of varargs
# CallStaticShortMethodV skipped because of varargs
('CallStaticShortMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticIntMethod skipped because of varargs
# CallStaticIntMethodV skipped because of varargs
('CallStaticIntMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticLongMethod skipped because of varargs
# CallStaticLongMethodV skipped because of varargs
('CallStaticLongMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticFloatMethod skipped because of varargs
# CallStaticFloatMethodV skipped because of varargs
('CallStaticFloatMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticDoubleMethod skipped because of varargs
# CallStaticDoubleMethodV skipped because of varargs
('CallStaticDoubleMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticVoidMethod skipped because of varargs
# CallStaticVoidMethodV skipped because of varargs
('CallStaticVoidMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # cls
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
('GetStaticFieldID',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(ctypes.c_char), # name
ctypes.POINTER(ctypes.c_char), # sig
))
),
('GetStaticObjectField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticBooleanField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticByteField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticCharField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticShortField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticIntField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticLongField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticFloatField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticDoubleField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('SetStaticObjectField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jobject, # value
))
),
('SetStaticBooleanField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jboolean, # value
))
),
('SetStaticByteField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jbyte, # value
))
),
('SetStaticCharField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jchar, # value
))
),
('SetStaticShortField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jshort, # value
))
),
('SetStaticIntField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jint, # value
))
),
('SetStaticLongField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jlong, # value
))
),
('SetStaticFloatField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jfloat, # value
))
),
('SetStaticDoubleField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jdouble, # value
))
),
('NewString',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(_jchar), # unicode
_jsize, # len
))
),
('GetStringLength',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
))
),
('GetStringChars',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
ctypes.POINTER(_jboolean), # isCopy
))
),
('ReleaseStringChars',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
ctypes.POINTER(_jchar), # chars
))
),
('NewStringUTF',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(ctypes.c_char), # utf
))
),
('GetStringUTFLength',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
))
),
('GetStringUTFChars',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
ctypes.POINTER(_jboolean), # isCopy
))
),
('ReleaseStringUTFChars',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
ctypes.POINTER(ctypes.c_char), # chars
))
),
('GetArrayLength',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jarray, # array
))
),
('NewObjectArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
_jclass, # clazz
_jobject, # init
))
),
('GetObjectArrayElement',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobjectArray, # array
_jsize, # index
))
),
('SetObjectArrayElement',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobjectArray, # array
_jsize, # index
_jobject, # val
))
),
('NewBooleanArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewByteArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewCharArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewShortArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewIntArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewLongArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewFloatArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewDoubleArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('GetBooleanArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbooleanArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetByteArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbyteArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetCharArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jcharArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetShortArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetIntArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetLongArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jlongArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetFloatArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jfloatArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetDoubleArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jdoubleArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('ReleaseBooleanArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbooleanArray, # array
ctypes.POINTER(_jboolean), # elems
_jint, # mode
))
),
('ReleaseByteArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbyteArray, # array
ctypes.POINTER(_jbyte), # elems
_jint, # mode
))
),
('ReleaseCharArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jcharArray, # array
ctypes.POINTER(_jchar), # elems
_jint, # mode
))
),
('ReleaseShortArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
ctypes.POINTER(_jshort), # elems
_jint, # mode
))
),
('ReleaseIntArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
ctypes.POINTER(_jint), # elems
_jint, # mode
))
),
('ReleaseLongArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jlongArray, # array
ctypes.POINTER(_jlong), # elems
_jint, # mode
))
),
('ReleaseFloatArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jfloatArray, # array
ctypes.POINTER(_jfloat), # elems
_jint, # mode
))
),
('ReleaseDoubleArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jdoubleArray, # array
ctypes.POINTER(_jdouble), # elems
_jint, # mode
))
),
('GetBooleanArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbooleanArray, # array
_jsize, # start
_jsize, # l
ctypes.POINTER(_jboolean), # buf
))
),
('GetByteArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbyteArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jbyte), # buf
))
),
('GetCharArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jcharArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jchar), # buf
))
),
('GetShortArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jshort), # buf
))
),
('GetIntArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jint), # buf
))
),
('GetLongArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jlongArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jlong), # buf
))
),
('GetFloatArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jfloatArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jfloat), # buf
))
),
('GetDoubleArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jdoubleArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jdouble), # buf
))
),
('SetBooleanArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbooleanArray, # array
_jsize, # start
_jsize, # l
ctypes.POINTER(_jboolean), # buf
))
),
('SetByteArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbyteArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jbyte), # buf
))
),
('SetCharArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jcharArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jchar), # buf
))
),
('SetShortArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jshort), # buf
))
),
('SetIntArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jint), # buf
))
),
('SetLongArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jlongArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jlong), # buf
))
),
('SetFloatArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jfloatArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jfloat), # buf
))
),
('SetDoubleArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jdoubleArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jdouble), # buf
))
),
('RegisterNatives',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(_JNINativeMethod), # methods
_jint, # nMethods
))
),
('UnregisterNatives',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
))
),
('MonitorEnter',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
('MonitorExit',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
('GetJavaVM',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(ctypes.POINTER(_JavaVM)), # vm
))
),
('GetStringRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
_jsize, # start
_jsize, # len
ctypes.POINTER(_jchar), # buf
))
),
('GetStringUTFRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
_jsize, # start
_jsize, # len
ctypes.POINTER(ctypes.c_char), # buf
))
),
('GetPrimitiveArrayCritical',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jarray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('ReleasePrimitiveArrayCritical',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jarray, # array
ctypes.c_void_p, # carray
_jint, # mode
))
),
('GetStringCritical',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # string
ctypes.POINTER(_jboolean), # isCopy
))
),
('ReleaseStringCritical',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # string
ctypes.POINTER(_jchar), # cstring
))
),
('NewWeakGlobalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
('DeleteWeakGlobalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jweak, # ref
))
),
('ExceptionCheck',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
))
),
('NewDirectByteBuffer',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.c_void_p, # address
_jlong, # capacity
))
),
('GetDirectBufferAddress',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # buf
))
),
('GetDirectBufferCapacity',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # buf
))
),
('GetObjectRefType',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
]
|
Homeschooling is one of those adventures that can be expensive! You think it’s going to be inexpensive, but then everything starts adding up. That’s why I am happy to share How to Homeschool Multiple Grade Levels on a Budget. If you have multiple grade levels to homeschool, it can be hard to do it on a budget. You aren’t the first person to homeschool multiple grade levels though, it’s all about learning from those who have gone before you!
I am a BookShark Homeschool Curriculum partner this year. While I was not paid for this post, I did receive free curriculum in exchange for sharing my opinion.
Homeschooling on a budget is a must for most of us. When you have children of all ages, it can get expensive fast. That’s why learning how to homeschool multiple grade levels on a budget is a must and is truly easier than you think!
This is one of the biggest reasons that BookShark is a highly recommended curriculum. Being able to use one level over a few ages and grades really has a ton of benefits. Katie started with Level 6 for her son who was in 5th grade early this year. While they didn’t finish the full curriculum by the end of their school year, they can go back and use this level for him to continue into what would traditionally be 6th grade.
How is this possible? Because this curriculum is all about teaching naturally on the level your child is currently learning. With the History and Language Arts working together so well, all you have to do is chose additional things like a spelling program, creative writing program, or your own additional homemade worksheets. So, for my 5th grader and my 3rd grader, I could easily use Level 4 or even Level 5. I would work through the same books and timeline with them together, but then add in closer to grade level spelling drills, handwriting practice or even creative writing prompts.
BookShark gives you a chance to cover multiple ages of children all for one price. You can do read aloud books together, and let children take turns with the books, or even pick up extra copies if desired.
If you’re going to continue homeschooling, don’t get rid of your previously used curriculum. You can easily rescue that curriculum for your next child. Reusing curriculum is a very budget friendly way to homeschool multiple grade levels on a budget. Here are some tips for reusing curriculum.
Make copies of worksheets instead of having the student write in the workbook.
Use a separate sheet of papers for the answers.
Remind your child to take extra good care of the curriculum.
Take precautions to protect the outside of the workbooks and books!
This is a perfect way to use curriculum that you can purchase and use multiple years without spending so much up front. With BookShark, you naturally have an option to reuse for younger children or use together if close in educational level.
One of our favorite things is to visit local science museums. There are tons of great learning experiences that appeal to all ages in one location. We’ve often spent hours learning about waterworks, plant life, or even engineering. The best thing about science museums is that they usually have the same concepts repeated in multiple areas on multiple levels to ensure everyone can understand and learn no matter their age.
We have used our BookShark Science curriculum to learn about many things, and even done the experiments that come with the curriculum, but sometimes we want to see things on a larger scale. Heading to the local museum helps us to do more hands on learning while still having fun with science.
Not every concept has to be taught differently. For example, you could spend time teaching all your children about fractions. They don’t have to be a certain grade level to necessarily learn a new concept. You can do unit studies to help save money, but also your sanity. This is one of the best methods of education, as it also allows you to dive into deeper concepts with older students while reviewing basics.
It is also another great reason that using BookShark works so well. You are teaching a lot of the main concepts together but can also break things down to grade level if needed.
If you have not used the library as a resource for homeschooling multiple grade levels on a budget, then you’re missing out. You can even look online to see what is available before you head to the library. There are apps in which you can use your library card to read books. You can put books on hold and you can go exploring to find books you never thought you’d use. Utilize the library as much as you can, especially if you’re using a homemade curriculum with your kids.
BookShark really fosters a love of reading in your children. The library is a great resource that can help you find even more books that fit the topic of study, or those extra copies needed for you to read together.
Weekly field trips are a huge part of our homeschool routine. We can easily teach while exploring a museum, a local arboretum, the capital building, or even city hall. In fact, one friend took her children recently to a local grocery store and had the kids learn a little about each department and how they work. It is simple to adapt to individual needs of your children but still be family-friendly in the process.
BookShark gives you a 4-day school week that makes field trips fit easily into your routine. You aren’t tied down to a Monday-Friday school schedule, and that makes it a ton easier to manage fun field trips and downtime.
So, the next time you’re wondering if you can homeschool multiple grade levels on a budget, know that you can. Do you have any tips to pass on to other homeschool parents? |
from django.conf import settings
from django.utils.http import urlquote
class ChangeContentType(object):
STATIC_CONTENT = [settings.MEDIA_URL,
settings.STATIC_URL,
settings.ADMIN_MEDIA_PREFIX,
]
FILE_ASSOCIATION = {'htc': 'text/x-component'}
def is_supported(self, path):
for p in self.STATIC_CONTENT:
if path.startswith(p):
return True
def process_response(self, request, response):
path = urlquote(request.get_full_path())
try:
extension = path.split('.')[-1]
except IndexError:
extension = None
if self.is_supported(path) and extension in self.FILE_ASSOCIATION:
response['Content-Type'] = self.FILE_ASSOCIATION[extension]
return response
class StagingMarquee(object):
def process_response(self, request, response):
content = response.content
index = content.upper().find('</BODY>')
if index == -1:
return response
marquee = "<div style='color:red;position:absolute;top:0;font-weight:bold;font-size:20px;'>STAGING</div>"
response.content = content[:index] + marquee + content[index:]
return response
|
Home › THOMPSON141: The Royal Crescent, Bath.
THOMPSON141: The Royal Crescent, Bath.
The superb work of Dave Thompson showing a beautiful design of The Royal Crescent, Bath. |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from report import report_sxw
from osv import osv
class report_parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_parser, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'cr':cr,
'uid': uid,
'general_info': self.general_info,
'tab_year': self.tab_year
})
def tab_year(self, obj):
cr = self.localcontext.get('cr')
uid = self.localcontext.get('uid')
users_active_obj = self.pool.get('users.active')
all_active_users_ids = users_active_obj.search(cr, uid,[])
all_active_users = users_active_obj.browse(cr, uid, all_active_users_ids)
tab_year = []
for active_user in all_active_users :
if active_user.year in tab_year :
print 'continue'
else :
tab_year.append(active_user.year)
return tab_year
def general_info(self, obj):
cr = self.localcontext.get('cr')
uid = self.localcontext.get('uid')
context = self.localcontext
db = cr.dbname
obj_user = self.pool.get('res.users')
users_active_obj = self.pool.get('users.active')
res_lang_obj = self.pool.get('res.lang')
user = obj_user.browse(cr, uid, uid, context=context)
obj_company = obj_user.browse(cr, uid, uid, context=context).company_id
company_name = obj_company.partner_id.name
lang_ids = res_lang_obj.search(cr, uid, [('code','=',context.get('lang'))])
if len(lang_ids) == 1 :
lang = res_lang_obj.browse(cr, uid, lang_ids[0])
format = lang.date_format
data = {
'company_name' : company_name,
'user' : user,
'database' : db,
'date_print' : time.strftime(format),
}
return data
report_sxw.report_sxw('report.users.active',
'users.active',
'addons/users_statistics/report/users_by_month.mako',
parser=report_parser) |
Poe’s “For Annie” has been popular with anthologists and their readers for generations. It is a great accomplishment, undeniably [page 453:] effective, although a tour de force.(1) The subject is quietude, but a most unquiet rhythm is employed, obviously suggested by Thomas Hood’s “Bridge of Sighs” — a poem Poe recited in his lectures on “The Poetic Principle” in 1848 and 1849.
On March 23, 1849, Poe wrote to Mrs. Richmond: “I enclose also some other lines ‘For Annie’ — and will you let me know in what manner they impress you?” He added that he had sent them to the Flag of Our Union. This mention clearly refers to the present poem; “A Dream Within a Dream” had been called “For Annie” in a manuscript previously sent to Mrs. Richmond.
Poe’s relation with Gleason and Ballou’s Flag of Our Union was not a happy one. On April 20, 1849, he wrote to Willis: “The poem which I enclose ... has been just published in a paper [page 455:] for which sheer necessity compels me to write ... It pays well as times go — but ... whatever I send it I feel I am consigning to the tomb of the Capulets.” He went on to ask Willis to reprint the lines as “From a late [Boston] paper” and to write an introduction in the Home Journal. The Flag regularly came out a full week before the date it bore, and Poe rightly assumed that “For Annie” would be in the issue appearing April 21. Willis sent the script Poe had given him to the printer at once, with the note: “Will Mr. Babcock please put this on the second page this week, & leave me twenty lines room for an introduction.” The result was that even if the Flag actually reached dealers before the Home Journal, Poe’s poem appeared in two papers bearing the same date. The Flag, not unnaturally, protested in the issue for May 12. Poe soon thereafter sent Mrs. Richmond word that the Flag had misprinted his lines — an accusation which (if true at all) can only mean that the editor refused to make changes in proof at the last minute. Poe added that the Flag still had two of his articles. These had been paid for and were printed, but the paper for which he felt such contempt purchased nothing more of Poe’s.
On May 23 Poe sent a Home Journal clipping to E. H. N. Patterson, thus authorizing its publication in that young editor’s paper, the Spectator of Oquawka, Illinois.(7) Poe sent to Griswold in June “perfect copies” of “Annabel Lee” and “For Annie” for use in Poets and Poetry of America; the former is a manuscript, but the latter may well have been a corrected clipping.(8) Griswold used the same text in the Works as in his anthology.
(A) Manuscript sent to Mrs. Richmond, March 23, 1849, facsimiled in London Bibliophile, May 1909; (B) Boston Flag of Our Union for April 28, 1849; (C) manuscript sent to N. P. Willis on April 20, 1849; (D) Home Journal for April 28, 1849; (E) Oquawka Spectator, May 16, 1849; (F) Poets [page 456:] and Poetry of America, 10th edition (dated 1850, issued late in 1849), p. 422; (G) Works (1850), II, 48-51; (H) manuscript sent to Susan Archer Talley on September 26, 1849, now lost; (Z) Richmond Examiner proofsheets from Whitty, Complete Poems (1911), pp. 74-77.
The text adopted is Griswold’s (G), which has a superior reading in line 97. The presentation manuscript (A), once lent to Ingram, was in the Harold Peirce Sale, Philadelphia, May 6, 1903, lot 958, and is now owned by Colonel Richard Gimbel. The manuscript given to Willis (C), now incomplete, was once in the collection of the late William H. Koester. The manuscript sent Miss Talley on September 26, 1849, is mentioned in the article “Last Days of Edgar A. Poe” by Susan Archer Talley Weiss in Scribner’s Magazine for March 1878 (15:714) but was destroyed during the Civil War. The text in Griswold’s Poets and Poetry of America (F) is verbally like that in Works (G), but the printer used apostrophes thus: in line 6, conquer’d; 28, madden’d; 29, burn’d; yo, Drown’d; 79, extinguish’d; 80, cover’d. The Examiner proofsheets (Z) are said to have had a unique reading, in line 45.
10, 12 Wilbur (Poe, p. 150), calls the double use of length “a lame pun ... probably not so intended.” It seems to me rather a successful use of “absolute rhyme,” since it has been so rarely noticed.
22 See “The Beloved Physician” for a note on Poe’s heart trouble.
25 Many readers may regret that Poe chose to be so clinically accurate here. He defended Shelley’s use of “sicken” in a review of a book called The Poetry of Life (by Sarah Stickney) in the Southern Literary Messenger, January 1836.
35 “Napthaline” is better spelled “napthalene.” Moore, in a note to Lalla Rookh, quotes Scott Waring as saying, “Naptha [sic] is used by the Persians (as we are told by Milton it was in Hell) for lamps.” It is clear, combustible rock oil, procured by the ancients from asphaltum, usually brought from the Dead Sea. Wilbur (Poe, p. 150) points out the allusion to Phlegethon, the fiery river of Hades. See also line 53.
37 John 4:14: “But whosoever drinketh of the water that I shall give him shall never thirst, but the water that I shall give him shall be in him a well of water springing up into everlasting life.” The unusual form of the participle drank is in all texts authorized by Poe.
39-44 Wilbur, p. 150, suggests an allusion to Lethe, the river of forgetfulness.
53 Tantalus was tortured by water he could not drink and fruit he could not reach; see also line 35.
56-58 Roses and myrtles are symbols of love.
83 The Blessed Virgin is called Regina angelorum (queen of the angels). Compare Poe’s “Hymn,” addressed to our Lady.
102 In “Landor’s Cottage” we are told, “The eyes of Annie ... were spiritual gray; her hair a light chestnut.” The eyes of the lady in “To One in Paradise” were changed to gray in the last revised version, perhaps for Annie’s benefit.
1 Quinn (p. 600) says it “is one of Poets finest poems ... He reproduced an emotional state by a short throbbing measure ... the very incoherencies mirror perfectly the mood.” N. P. Willis invented the word “individualesque” for it, in the introduction he wrote at Poe’s request and published with “For Annie” in the Home Journal of April 28, 1849. This introduction, headed “Odd Poem,” was reprinted by Killis Campbell (Poems, p. 288); most of it has little pertinence.
2 Stedman, Poets of America (Boston and New York, 1885), p. 246. Campbell (Poems, p. 289) gives these and other favorable opinions, and some from critics who disliked the poem, because of the clinical details in some stanzas.
3 Phillips, II, 1293ff., quoting directly a letter of December 18, 1915, from Mrs. George P. Lawrence, Mr. Richmond’s niece.
4 Complete copies of Poe’s letters were made surreptitiously before the originals were destroyed. About thirty years ago my late friend, James Southall Wilson, read them under pledge not to make copies, a pledge which he kept. He did, however, tell me of the nature of the passage to which I refer. I have been unable to trace these transcripts myself.
5 In Poe’s day laudanum was a specific remedy for toothache and diarrhœa, was sold without a prescription by all druggists, and was often taken for other maladies, real or imagined.
6 Smith, Edgar Allan Poe: How to Know Him (Indianapolis, 1921), p. 232, Mallarmé’s interpretation appeared (with Edouard Manet’s illustrations) in the first edition of his famous book Les Poèmes d’Edgar Poe: Traduetion en Prose (Paris, 1889), pp. 190-191.
7 Apparently Patterson had already received a copy of the Home Journal or a clipping from it, since he published an identical text in his paper for May 16. I have used the file in the library of Knox College, Galesburg, Illinois.
8 A letter of “May 17, 1849,” addressed to Mrs. Lewis, promising to make a copy of “For Annie” for her that day and signed “Edgar A. Poe,” has no history prior to 1935 and I cannot regard it as authentic. It is hard to believe we should have no earlier trace of so important a manuscript if the Brooklyn poetess ever had it. |
#pylint: disable=bare-except, invalid-name, too-many-nested-blocks, too-many-locals, too-many-branches
"""
Optional utilities to communicate with ONcat.
ONcat is an online data catalog used internally at ORNL.
@copyright: 2018 Oak Ridge National Laboratory
"""
import sys
import datetime
import logging
from django.conf import settings
try:
import pyoncat
HAVE_ONCAT = True
except:
HAVE_ONCAT = False
from fitting.models import CatalogCache
def decode_time(timestamp):
"""
Decode timestamp and return a datetime object
:param timestamp: timestamp to decode
"""
try:
tz_location = timestamp.rfind('+')
if tz_location < 0:
tz_location = timestamp.rfind('-')
if tz_location > 0:
date_time_str = timestamp[:tz_location]
# Get rid of fractions of a second
sec_location = date_time_str.rfind('.')
if sec_location > 0:
date_time_str = date_time_str[:sec_location]
return datetime.datetime.strptime(date_time_str, "%Y-%m-%dT%H:%M:%S")
except:
logging.error("Could not parse timestamp '%s': %s", timestamp, sys.exc_value)
return None
def get_run_info(instrument, run_number):
"""
Legacy issue:
Until the facility information is stored in the DB so that we can
retrieve the facility from it, we'll have to use the application
configuration.
:param str instrument: instrument name
:param str run_number: run number
:param str facility: facility name (SNS or HFIR)
"""
facility = 'SNS'
if hasattr(settings, 'FACILITY_INFO'):
facility = settings.FACILITY_INFO.get(instrument, 'SNS')
return _get_run_info(instrument, run_number, facility)
def _get_run_info(instrument, run_number, facility='SNS'):
"""
Get ONCat info for the specified run
Notes: At the moment we do not catalog reduced data
:param str instrument: instrument short name
:param str run_number: run number
:param str facility: facility name (SNS or HFIR)
"""
run_info = {}
cached_entry = [] #CatalogCache.objects.filter(data_path="%s/%s" % (instrument, run_number))
if len(cached_entry) > 0:
return dict(title=cached_entry[0].title, proposal=cached_entry[0].proposal)
if not HAVE_ONCAT:
return run_info
try:
oncat = pyoncat.ONCat(
settings.CATALOG_URL,
# Here we're using the machine-to-machine "Client Credentials" flow,
# which requires a client ID and secret, but no *user* credentials.
flow = pyoncat.CLIENT_CREDENTIALS_FLOW,
client_id = settings.CATALOG_ID,
client_secret = settings.CATALOG_SECRET,
)
oncat.login()
datafiles = oncat.Datafile.list(
facility = facility,
instrument = instrument.upper(),
projection = ['experiment', 'location', 'metadata.entry.title'],
tags = ['type/raw'],
ranges_q = 'indexed.run_number:%s' % str(run_number)
)
if datafiles:
run_info['title'] = datafiles[0].metadata.get('entry', {}).get('title', None)
run_info['proposal'] = datafiles[0].experiment
run_info['location'] = datafiles[0].location
except:
logging.error("Communication with ONCat server failed: %s", sys.exc_value)
return run_info
|
ASAP Supply Chain is pleased to inform you that aviation part number C049-1 is now available and in stock. This part is a Wire Harness Assy manufactured by Robinson Helicopter. Are you in the market for a quick and competitive quote for part number C049-1? |
#!/usr/bin/env python
#===-- coff-dump.py - COFF object file dump utility-------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
#
# COFF File Definition
#
def string_table_entry (offset):
return ('ptr', '+ + PointerToSymbolTable * NumberOfSymbols 18 %s' % offset, ('scalar', 'cstr', '%s'))
def secname(value):
if value[0] == '/':
return string_table_entry(value[1:].rstrip('\0'))
else:
return '%s'
def symname(value):
parts = struct.unpack("<2L", value)
if parts[0] == 0:
return string_table_entry(parts[1])
else:
return '%s'
file = ('struct', [
('MachineType', ('enum', '<H', '0x%X', {
0x0: 'IMAGE_FILE_MACHINE_UNKNOWN',
0x1d3: 'IMAGE_FILE_MACHINE_AM33',
0x8664: 'IMAGE_FILE_MACHINE_AMD64',
0x1c0: 'IMAGE_FILE_MACHINE_ARM',
0xebc: 'IMAGE_FILE_MACHINE_EBC',
0x14c: 'IMAGE_FILE_MACHINE_I386',
0x200: 'IMAGE_FILE_MACHINE_IA64',
0x904: 'IMAGE_FILE_MACHINE_M32R',
0x266: 'IMAGE_FILE_MACHINE_MIPS16',
0x366: 'IMAGE_FILE_MACHINE_MIPSFPU',
0x466: 'IMAGE_FILE_MACHINE_MIPSFPU16',
0x1f0: 'IMAGE_FILE_MACHINE_POWERPC',
0x1f1: 'IMAGE_FILE_MACHINE_POWERPCFP',
0x166: 'IMAGE_FILE_MACHINE_R4000',
0x1a2: 'IMAGE_FILE_MACHINE_SH3',
0x1a3: 'IMAGE_FILE_MACHINE_SH3DSP',
0x1a6: 'IMAGE_FILE_MACHINE_SH4',
0x1a8: 'IMAGE_FILE_MACHINE_SH5',
0x1c2: 'IMAGE_FILE_MACHINE_THUMB',
0x169: 'IMAGE_FILE_MACHINE_WCEMIPSV2',
})),
('NumberOfSections', ('scalar', '<H', '%d')),
('TimeDateStamp', ('scalar', '<L', '%d')),
('PointerToSymbolTable', ('scalar', '<L', '0x%0X')),
('NumberOfSymbols', ('scalar', '<L', '%d')),
('SizeOfOptionalHeader', ('scalar', '<H', '%d')),
('Characteristics', ('flags', '<H', '0x%x', [
(0x0001, 'IMAGE_FILE_RELOCS_STRIPPED', ),
(0x0002, 'IMAGE_FILE_EXECUTABLE_IMAGE', ),
(0x0004, 'IMAGE_FILE_LINE_NUMS_STRIPPED', ),
(0x0008, 'IMAGE_FILE_LOCAL_SYMS_STRIPPED', ),
(0x0010, 'IMAGE_FILE_AGGRESSIVE_WS_TRIM', ),
(0x0020, 'IMAGE_FILE_LARGE_ADDRESS_AWARE', ),
(0x0080, 'IMAGE_FILE_BYTES_REVERSED_LO', ),
(0x0100, 'IMAGE_FILE_32BIT_MACHINE', ),
(0x0200, 'IMAGE_FILE_DEBUG_STRIPPED', ),
(0x0400, 'IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP', ),
(0x0800, 'IMAGE_FILE_NET_RUN_FROM_SWAP', ),
(0x1000, 'IMAGE_FILE_SYSTEM', ),
(0x2000, 'IMAGE_FILE_DLL', ),
(0x4000, 'IMAGE_FILE_UP_SYSTEM_ONLY', ),
(0x8000, 'IMAGE_FILE_BYTES_REVERSED_HI', ),
])),
('Sections', ('array', '1', 'NumberOfSections', ('struct', [
('Name', ('scalar', '<8s', secname)),
('VirtualSize', ('scalar', '<L', '%d' )),
('VirtualAddress', ('scalar', '<L', '%d' )),
('SizeOfRawData', ('scalar', '<L', '%d' )),
('PointerToRawData', ('scalar', '<L', '0x%X' )),
('PointerToRelocations', ('scalar', '<L', '0x%X' )),
('PointerToLineNumbers', ('scalar', '<L', '0x%X' )),
('NumberOfRelocations', ('scalar', '<H', '%d' )),
('NumberOfLineNumbers', ('scalar', '<H', '%d' )),
('Charateristics', ('flags', '<L', '0x%X', [
(0x00000008, 'IMAGE_SCN_TYPE_NO_PAD'),
(0x00000020, 'IMAGE_SCN_CNT_CODE'),
(0x00000040, 'IMAGE_SCN_CNT_INITIALIZED_DATA'),
(0x00000080, 'IMAGE_SCN_CNT_UNINITIALIZED_DATA'),
(0x00000100, 'IMAGE_SCN_LNK_OTHER'),
(0x00000200, 'IMAGE_SCN_LNK_INFO'),
(0x00000800, 'IMAGE_SCN_LNK_REMOVE'),
(0x00001000, 'IMAGE_SCN_LNK_COMDAT'),
(0x00008000, 'IMAGE_SCN_GPREL'),
(0x00020000, 'IMAGE_SCN_MEM_PURGEABLE'),
(0x00020000, 'IMAGE_SCN_MEM_16BIT'),
(0x00040000, 'IMAGE_SCN_MEM_LOCKED'),
(0x00080000, 'IMAGE_SCN_MEM_PRELOAD'),
(0x00F00000, 'IMAGE_SCN_ALIGN', {
0x00100000: 'IMAGE_SCN_ALIGN_1BYTES',
0x00200000: 'IMAGE_SCN_ALIGN_2BYTES',
0x00300000: 'IMAGE_SCN_ALIGN_4BYTES',
0x00400000: 'IMAGE_SCN_ALIGN_8BYTES',
0x00500000: 'IMAGE_SCN_ALIGN_16BYTES',
0x00600000: 'IMAGE_SCN_ALIGN_32BYTES',
0x00700000: 'IMAGE_SCN_ALIGN_64BYTES',
0x00800000: 'IMAGE_SCN_ALIGN_128BYTES',
0x00900000: 'IMAGE_SCN_ALIGN_256BYTES',
0x00A00000: 'IMAGE_SCN_ALIGN_512BYTES',
0x00B00000: 'IMAGE_SCN_ALIGN_1024BYTES',
0x00C00000: 'IMAGE_SCN_ALIGN_2048BYTES',
0x00D00000: 'IMAGE_SCN_ALIGN_4096BYTES',
0x00E00000: 'IMAGE_SCN_ALIGN_8192BYTES',
}),
(0x01000000, 'IMAGE_SCN_LNK_NRELOC_OVFL'),
(0x02000000, 'IMAGE_SCN_MEM_DISCARDABLE'),
(0x04000000, 'IMAGE_SCN_MEM_NOT_CACHED'),
(0x08000000, 'IMAGE_SCN_MEM_NOT_PAGED'),
(0x10000000, 'IMAGE_SCN_MEM_SHARED'),
(0x20000000, 'IMAGE_SCN_MEM_EXECUTE'),
(0x40000000, 'IMAGE_SCN_MEM_READ'),
(0x80000000, 'IMAGE_SCN_MEM_WRITE'),
])),
('SectionData', ('ptr', 'PointerToRawData', ('blob', 'SizeOfRawData'))),
('Relocations', ('ptr', 'PointerToRelocations', ('array', '0', 'NumberOfRelocations', ('struct', [
('VirtualAddress', ('scalar', '<L', '0x%X')),
('SymbolTableIndex', ('scalar', '<L', '%d' )),
('Type', ('enum', '<H', '%d', ('MachineType', {
0x14c: {
0x0000: 'IMAGE_REL_I386_ABSOLUTE',
0x0001: 'IMAGE_REL_I386_DIR16',
0x0002: 'IMAGE_REL_I386_REL16',
0x0006: 'IMAGE_REL_I386_DIR32',
0x0007: 'IMAGE_REL_I386_DIR32NB',
0x0009: 'IMAGE_REL_I386_SEG12',
0x000A: 'IMAGE_REL_I386_SECTION',
0x000B: 'IMAGE_REL_I386_SECREL',
0x000C: 'IMAGE_REL_I386_TOKEN',
0x000D: 'IMAGE_REL_I386_SECREL7',
0x0014: 'IMAGE_REL_I386_REL32',
},
0x8664: {
0x0000: 'IMAGE_REL_AMD64_ABSOLUTE',
0x0001: 'IMAGE_REL_AMD64_ADDR64',
0x0002: 'IMAGE_REL_AMD64_ADDR32',
0x0003: 'IMAGE_REL_AMD64_ADDR32NB',
0x0004: 'IMAGE_REL_AMD64_REL32',
0x0005: 'IMAGE_REL_AMD64_REL32_1',
0x0006: 'IMAGE_REL_AMD64_REL32_2',
0x0007: 'IMAGE_REL_AMD64_REL32_3',
0x0008: 'IMAGE_REL_AMD64_REL32_4',
0x0009: 'IMAGE_REL_AMD64_REL32_5',
0x000A: 'IMAGE_REL_AMD64_SECTION',
0x000B: 'IMAGE_REL_AMD64_SECREL',
0x000C: 'IMAGE_REL_AMD64_SECREL7',
0x000D: 'IMAGE_REL_AMD64_TOKEN',
0x000E: 'IMAGE_REL_AMD64_SREL32',
0x000F: 'IMAGE_REL_AMD64_PAIR',
0x0010: 'IMAGE_REL_AMD64_SSPAN32',
},
}))),
('SymbolName', ('ptr', '+ PointerToSymbolTable * SymbolTableIndex 18', ('scalar', '<8s', symname)))
])))),
]))),
('Symbols', ('ptr', 'PointerToSymbolTable', ('byte-array', '18', '* NumberOfSymbols 18', ('struct', [
('Name', ('scalar', '<8s', symname)),
('Value', ('scalar', '<L', '%d' )),
('SectionNumber', ('scalar', '<H', '%d' )),
('_Type', ('scalar', '<H', None )),
('SimpleType', ('enum', '& _Type 15', '%d', {
0: 'IMAGE_SYM_TYPE_NULL',
1: 'IMAGE_SYM_TYPE_VOID',
2: 'IMAGE_SYM_TYPE_CHAR',
3: 'IMAGE_SYM_TYPE_SHORT',
4: 'IMAGE_SYM_TYPE_INT',
5: 'IMAGE_SYM_TYPE_LONG',
6: 'IMAGE_SYM_TYPE_FLOAT',
7: 'IMAGE_SYM_TYPE_DOUBLE',
8: 'IMAGE_SYM_TYPE_STRUCT',
9: 'IMAGE_SYM_TYPE_UNION',
10: 'IMAGE_SYM_TYPE_ENUM',
11: 'IMAGE_SYM_TYPE_MOE',
12: 'IMAGE_SYM_TYPE_BYTE',
13: 'IMAGE_SYM_TYPE_WORD',
14: 'IMAGE_SYM_TYPE_UINT',
15: 'IMAGE_SYM_TYPE_DWORD',
})), # (Type & 0xF0) >> 4
('ComplexType', ('enum', '>> & _Type 240 4', '%d', {
0: 'IMAGE_SYM_DTYPE_NULL',
1: 'IMAGE_SYM_DTYPE_POINTER',
2: 'IMAGE_SYM_DTYPE_FUNCTION',
3: 'IMAGE_SYM_DTYPE_ARRAY',
})),
('StorageClass', ('enum', '<B', '%d', {
-1: 'IMAGE_SYM_CLASS_END_OF_FUNCTION',
0: 'IMAGE_SYM_CLASS_NULL',
1: 'IMAGE_SYM_CLASS_AUTOMATIC',
2: 'IMAGE_SYM_CLASS_EXTERNAL',
3: 'IMAGE_SYM_CLASS_STATIC',
4: 'IMAGE_SYM_CLASS_REGISTER',
5: 'IMAGE_SYM_CLASS_EXTERNAL_DEF',
6: 'IMAGE_SYM_CLASS_LABEL',
7: 'IMAGE_SYM_CLASS_UNDEFINED_LABEL',
8: 'IMAGE_SYM_CLASS_MEMBER_OF_STRUCT',
9: 'IMAGE_SYM_CLASS_ARGUMENT',
10: 'IMAGE_SYM_CLASS_STRUCT_TAG',
11: 'IMAGE_SYM_CLASS_MEMBER_OF_UNION',
12: 'IMAGE_SYM_CLASS_UNION_TAG',
13: 'IMAGE_SYM_CLASS_TYPE_DEFINITION',
14: 'IMAGE_SYM_CLASS_UNDEFINED_STATIC',
15: 'IMAGE_SYM_CLASS_ENUM_TAG',
16: 'IMAGE_SYM_CLASS_MEMBER_OF_ENUM',
17: 'IMAGE_SYM_CLASS_REGISTER_PARAM',
18: 'IMAGE_SYM_CLASS_BIT_FIELD',
100: 'IMAGE_SYM_CLASS_BLOCK',
101: 'IMAGE_SYM_CLASS_FUNCTION',
102: 'IMAGE_SYM_CLASS_END_OF_STRUCT',
103: 'IMAGE_SYM_CLASS_FILE',
104: 'IMAGE_SYM_CLASS_SECTION',
105: 'IMAGE_SYM_CLASS_WEAK_EXTERNAL',
107: 'IMAGE_SYM_CLASS_CLR_TOKEN',
})),
('NumberOfAuxSymbols', ('scalar', '<B', '%d' )),
('AuxillaryData', ('blob', '* NumberOfAuxSymbols 18')),
])))),
])
#
# Definition Interpreter
#
import sys, types, struct, re
Input = None
Stack = []
Fields = {}
Indent = 0
NewLine = True
def indent():
global Indent
Indent += 1
def dedent():
global Indent
Indent -= 1
def write(input):
global NewLine
output = ""
for char in input:
if NewLine:
output += Indent * ' '
NewLine = False
output += char
if char == '\n':
NewLine = True
sys.stdout.write(output)
def read(format):
return struct.unpack(format, Input.read(struct.calcsize(format)))
def read_cstr():
output = ""
while True:
char = Input.read(1)
if len(char) == 0:
raise RuntimeError ("EOF while reading cstr")
if char == '\0':
break
output += char
return output
def push_pos(seek_to = None):
Stack [0:0] = [Input.tell()]
if seek_to:
Input.seek(seek_to)
def pop_pos():
assert(len(Stack) > 0)
Input.seek(Stack[0])
del Stack[0]
def print_binary_data(size):
value = ""
while size > 0:
if size >= 16:
data = Input.read(16)
size -= 16
else:
data = Input.read(size)
size = 0
value += data
bytes = ""
text = ""
for index in xrange(16):
if index < len(data):
if index == 8:
bytes += "- "
ch = ord(data[index])
bytes += "%02X " % ch
if ch >= 0x20 and ch <= 0x7F:
text += data[index]
else:
text += "."
else:
if index == 8:
bytes += " "
bytes += " "
write("%s|%s|\n" % (bytes, text))
return value
idlit = re.compile("[a-zA-Z_][a-zA-Z0-9_-]*")
numlit = re.compile("[0-9]+")
def read_value(expr):
input = iter(expr.split())
def eval():
token = input.next()
if expr == 'cstr':
return read_cstr()
if expr == 'true':
return True
if expr == 'false':
return False
if token == '+':
return eval() + eval()
if token == '-':
return eval() - eval()
if token == '*':
return eval() * eval()
if token == '/':
return eval() / eval()
if token == '&':
return eval() & eval()
if token == '|':
return eval() | eval()
if token == '>>':
return eval() >> eval()
if token == '<<':
return eval() << eval()
if len(token) > 1 and token[0] in ('=', '@', '<', '!', '>'):
val = read(expr)
assert(len(val) == 1)
return val[0]
if idlit.match(token):
return Fields[token]
if numlit.match(token):
return int(token)
raise RuntimeError("unexpected token %s" % repr(token))
value = eval()
try:
input.next()
except StopIteration:
return value
raise RuntimeError("unexpected input at end of expression")
def write_value(format,value):
format_type = type(format)
if format_type is types.StringType:
write(format % value)
elif format_type is types.FunctionType:
write_value(format(value), value)
elif format_type is types.TupleType:
Fields['this'] = value
handle_element(format)
elif format_type is types.NoneType:
pass
else:
raise RuntimeError("unexpected type: %s" % repr(format_type))
def handle_scalar(entry):
iformat = entry[1]
oformat = entry[2]
value = read_value(iformat)
write_value(oformat, value)
return value
def handle_enum(entry):
iformat = entry[1]
oformat = entry[2]
definitions = entry[3]
value = read_value(iformat)
if type(definitions) is types.TupleType:
selector = read_value(definitions[0])
definitions = definitions[1][selector]
if value in definitions:
description = definitions[value]
else:
description = "unknown"
write("%s (" % description)
write_value(oformat, value)
write(")")
return value
def handle_flags(entry):
iformat = entry[1]
oformat = entry[2]
definitions = entry[3]
value = read_value(iformat)
write_value(oformat, value)
indent()
for entry in definitions:
mask = entry[0]
name = entry[1]
if len (entry) == 3:
map = entry[2]
selection = value & mask
if selection in map:
write("\n%s" % map[selection])
else:
write("\n%s <%d>" % (name, selection))
elif len(entry) == 2:
if value & mask != 0:
write("\n%s" % name)
dedent()
return value
def handle_struct(entry):
global Fields
members = entry[1]
newFields = {}
write("{\n");
indent()
for member in members:
name = member[0]
type = member[1]
if name[0] != "_":
write("%s = " % name.ljust(24))
value = handle_element(type)
if name[0] != "_":
write("\n")
Fields[name] = value
newFields[name] = value
dedent()
write("}")
return newFields
def handle_array(entry):
start_index = entry[1]
length = entry[2]
element = entry[3]
newItems = []
write("[\n")
indent()
start_index = read_value(start_index)
value = read_value(length)
for index in xrange(value):
write("%d = " % (index + start_index))
value = handle_element(element)
write("\n")
newItems.append(value)
dedent()
write("]")
return newItems
def handle_byte_array(entry):
ent_size = entry[1]
length = entry[2]
element = entry[3]
newItems = []
write("[\n")
indent()
item_size = read_value(ent_size)
value = read_value(length)
end_of_array = Input.tell() + value
prev_loc = Input.tell()
index = 0
while Input.tell() < end_of_array:
write("%d = " % index)
value = handle_element(element)
write("\n")
newItems.append(value)
index += (Input.tell() - prev_loc) / item_size
prev_loc = Input.tell()
dedent()
write("]")
return newItems
def handle_ptr(entry):
offset = entry[1]
element = entry[2]
value = None
offset = read_value(offset)
if offset != 0:
push_pos(offset)
value = handle_element(element)
pop_pos()
else:
write("None")
return value
def handle_blob(entry):
length = entry[1]
write("\n")
indent()
value = print_binary_data(read_value(length))
dedent()
return value
def handle_element(entry):
handlers = {
'struct': handle_struct,
'scalar': handle_scalar,
'enum': handle_enum,
'flags': handle_flags,
'ptr': handle_ptr,
'blob': handle_blob,
'array': handle_array,
'byte-array': handle_byte_array,
}
if not entry[0] in handlers:
raise RuntimeError ("unexpected type '%s'" % str (entry[0]))
return handlers[entry[0]](entry)
if len(sys.argv) <= 1 or sys.argv[1] == '-':
import StringIO
Input = StringIO.StringIO(sys.stdin.read())
else:
Input = open (sys.argv[1], "rb")
try:
handle_element(file)
finally:
Input.close()
Input = None
|
Get the advantage of all features today and see for yourself how you get instant control of all your videos.
You may continue to evaluate the software for as long as you like and at any time unlock by adding a license.
Just wanted to let you know what a godsend this program is. Well worth the money spent. I have found a lot of clips of videos that I no longer thought I had.
We do not bundle any adware with the installer and we always scan for viruses.
We have redone the user interface to make it easier to get started for new users and more efficient for experienced users. We kept all the advanced features from previous version so you can still save layouts over one or several computer screens.
We have added a general “Search All” functionality for quick searches.
We now automatically extract meta data from the video file, like how its compressed, video dimension and so on. Meta data is stored as extended video properties and can be enabled / disabled from the preferences.
You can now reassign shortcuts to your liking.
Added new “Preview” windows for companion images. The preview window shows the currently selected companion image. Move the image with the mouse and zoom with the mouse wheel.
Added new “Companion image browser” windows for companion images. In the companion image bro3wser you can can view all companion images for a video and manage them easier.
64 bit, i.e. allow for higher resolution videos and thumbnails.
New keyword manager and keyword colors.
Improvements to the integrated video player.
Integrated video player now supports MadVR (To improve video quality).
Improvements to the search engine, its faster, much more powerful and queries can be saved and reused later.
Keywording and layouts has been simplified and is easier to use.
Documentation update and hopefully easier to follow.
Improvements to the indexing engine. If you had problem indexing some videos with previous version please try it again with this update.
New functionality to export to csv files.
New functionality to save a video as a jpeg contact sheet image.
New functionality to import actors based on a folder structure.
Tweaks to the user interface to make more space for video content, most notable here is that the title bar of the program is gone.
Preparation for Windows 10 anniversity update. |
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import string
import re
from commoncode.text import toascii
"""
Extract raw ASCII strings from (possibly) binary strings.
Both plain ASCII and UTF-16-LE-encoded (aka. wide) strings are extracted.
The later is found typically in some Windows PEs.
This is more or less similar to what GNU Binutils strings does.
Does not recognize and extract non-ASCII characters
Some alternative and references:
https://github.com/fireeye/flare-floss (also included)
http://stackoverflow.com/questions/10637055/how-do-i-extract-unicode-character-sequences-from-an-mz-executable-file
http://stackoverflow.com/questions/1324067/how-do-i-get-str-translate-to-work-with-unicode-strings
http://stackoverflow.com/questions/11066400/remove-punctuation-from-unicode-formatted-strings/11066687#11066687
https://github.com/TakahiroHaruyama/openioc_scan/blob/d7e8c5962f77f55f9a5d34dbfd0799f8c57eff7f/openioc_scan.py#L184
"""
# at least three characters are needed to consider some blob as a good string
MIN_LEN = 3
def strings_from_file(location, buff_size=1024 * 1024, ascii=False, clean=True, min_len=MIN_LEN):
"""
Yield unicode strings made only of ASCII characters found in file at location.
Process the file in chunks (to limit memory usage). If ascii is True, strings
are converted to plain ASCII "str or byte" strings instead of unicode.
"""
min_len = MIN_LEN
with open(location, 'rb') as f:
while 1:
buf = f.read(buff_size)
if not buf:
break
for s in strings_from_string(buf, clean=clean, min_len=min_len):
if ascii:
s = toascii(s)
s = s.strip()
if not s or len(s) < min_len:
continue
yield s
# Extracted text is digit, letters, punctuation and white spaces
punctuation = re.escape(string.punctuation)
whitespaces = ' \t\n\r'
printable = 'A-Za-z0-9' + whitespaces + punctuation
null_byte = '\x00'
ascii_strings = re.compile(
# plain ASCII is a sequence of printable of a minimum length
'('
+ '[' + printable + ']'
+ '{' + str(MIN_LEN) + ',}'
+ ')'
# or utf-16-le-encoded ASCII is a sequence of ASCII+null byte
+ '|'
+ '('
+ '(?:' + '[' + printable + ']' + null_byte + ')'
+ '{' + str(MIN_LEN) + ',}'
+ ')'
).finditer
def strings_from_string(binary_string, clean=False, min_len=0):
"""
Yield strings extracted from a (possibly binary) string. The strings are ASCII
printable characters only. If clean is True, also clean and filter short and
repeated strings.
Note: we do not keep the offset of where a string was found (e.g. match.start).
"""
for match in ascii_strings(binary_string):
s = decode(match.group())
if s:
if clean:
for ss in clean_string(s, min_len=min_len):
yield ss
else:
yield s
def string_from_string(binary_string, clean=False, min_len=0):
"""
Return a unicode string string extracted from a (possibly binary) string,
removing all non printable characters.
"""
return u' '.join(strings_from_string(binary_string, clean, min_len))
def decode(s):
"""
Return a decoded unicode string from s or None if the string cannot be decoded.
"""
if '\x00' in s:
try:
return s.decode('utf-16-le')
except UnicodeDecodeError:
pass
else:
return s.decode('ascii')
remove_junk = re.compile('[' + punctuation + whitespaces + ']').sub
def clean_string(s, min_len=MIN_LEN,
junk=string.punctuation + string.digits + string.whitespace):
"""
Yield cleaned strings from string s if it passes some validity tests:
* not made of white spaces
* with a minimum length
* not made of only two repeated character
* not made of only of digits, punctuations and whitespaces
"""
s = s.strip()
def valid(st):
st = remove_junk('', st)
return (st and len(st) >= min_len
# ignore character repeats, e.g need more than two unique characters
and len(set(st.lower())) > 1
# ignore string made only of digit or punctuation
and not all(c in junk for c in st))
if valid(s):
yield s.strip()
#####################################################################################
# TODO: Strings classification
# Classify strings, detect junk, detect paths, symbols, demangle symbols, unescape
# http://code.activestate.com/recipes/466293-efficient-character-escapes-decoding/?in=user-2382677
def is_file(s):
"""
Return True if s looks like a file name.
Exmaple: dsdsd.dll
"""
filename = re.compile('^[\w_\-]+\.\w{1,4}$', re.IGNORECASE).match
return filename(s)
def is_shared_object(s):
"""
Return True if s looks like a shared object file.
Example: librt.so.1
"""
so = re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE).match
return so(s)
def is_posix_path(s):
"""
Return True if s looks like a posix path.
Example: /usr/lib/librt.so.1 or /usr/lib
"""
# TODO: implement me
posix = re.compile('^/[\w_\-].*$', re.IGNORECASE).match
posix(s)
return False
def is_relative_path(s):
"""
Return True if s looks like a relative posix path.
Example: usr/lib/librt.so.1 or ../usr/lib
"""
relative = re.compile('^(?:([^/]|\.\.)[\w_\-]+/.*$', re.IGNORECASE).match
return relative(s)
def is_win_path(s):
"""
Return True if s looks like a win path.
Example: c:\usr\lib\librt.so.1.
"""
winpath = re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE).match
return winpath(s)
def is_c_source(s):
"""
Return True if s looks like a C source path.
Example: this.c
FIXME: should get actual algo from contenttype.
"""
return s.endswith(('.c', '.cpp', '.hpp', '.h'))
def is_java_source(s):
"""
Return True if s looks like a Java source path.
Example: this.java
FIXME: should get actual algo from contenttype.
"""
return s.endswith(('.java', '.jsp', '.aj',))
def is_glibc_ref(s):
"""
Return True if s looks like a reference to GLIBC as typically found in
Elfs.
"""
return '@@GLIBC' in s
def is_java_ref(s):
"""
Return True if s looks like a reference to a java class or package in a
class file.
"""
jref = re.compile('^.*$', re.IGNORECASE).match
# TODO: implement me
jref(s)
return False
def is_win_guid(s):
"""
Return True if s looks like a windows GUID/APPID/CLSID.
"""
guid = re.compile('"\{[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}\}"', re.IGNORECASE).match
# TODO: implement me
guid(s)
return False
class BinaryStringsClassifier(object):
"""
Classify extracted strings as good or bad/junk.
The types of strings that are recognized include:
file
file_path
junk
text
"""
# TODO: Implement me
if __name__ == '__main__':
# also usable a simple command line script
import sys
location = sys.argv[1]
for s in strings_from_file(location):
print(s)
|
Join our team! We're always looking for motivated, friendly people to be a part of our organization!
Call for more details regarding all current openings at Dakota Connection Casino.
Please submit your application to Human Resources before 4:00 p.m. on the day the position closes. |
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
"""
Cryptography-related functions for handling JAR signature block files.
:author: Konstantin Shemyak <konstantin@shemyak.com>
:license: LGPL v.3
"""
from M2Crypto import SMIME, X509, BIO, RSA, DSA, EC, m2
class CannotFindKeyTypeError(Exception):
"""
Failed to determine the type of the private key.
"""
pass
class SignatureBlockVerificationError(Exception):
"""
The Signature Block File verification failed.
"""
pass
def private_key_type(key_file):
"""
Determines type of the private key: RSA, DSA, EC.
:param key_file: file path
:type key_file: str
:return: one of "RSA", "DSA" or "EC"
:except CannotFindKeyTypeError
"""
keytypes = (("RSA", RSA), ("DSA", DSA), ("EC", EC))
for key, ktype in keytypes:
try:
ktype.load_key(key_file)
except (RSA.RSAError, DSA.DSAError, ValueError):
continue
else:
return key
else:
raise CannotFindKeyTypeError()
def create_signature_block(openssl_digest, certificate, private_key,
extra_certs, data):
"""
Produces a signature block for the data.
Reference
---------
http://docs.oracle.com/javase/7/docs/technotes/guides/jar/jar.html#Digital_Signatures
Note: Oracle does not specify the content of the "signature
file block", friendly saying that "These are binary files
not intended to be interpreted by humans".
:param openssl_digest: alrogithm known to OpenSSL used to digest the data
:type openssl_digest: str
:param certificate: filename of the certificate file (PEM format)
:type certificate: str
:param private_key:filename of private key used to sign (PEM format)
:type private_key: str
:param extra_certs: additional certificates to embed into the signature (PEM format)
:type extra_certs: array of filenames
:param data: the content to be signed
:type data: bytes
:returns: content of the signature block file as produced by jarsigner
:rtype: bytes
""" # noqa
smime = SMIME.SMIME()
with BIO.openfile(private_key) as k, BIO.openfile(certificate) as c:
smime.load_key_bio(k, c)
if extra_certs is not None:
# Could we use just X509.new_stack_from_der() instead?
stack = X509.X509_Stack()
for cert in extra_certs:
stack.push(X509.load_cert(cert))
smime.set_x509_stack(stack)
pkcs7 = smime.sign(BIO.MemoryBuffer(data),
algo=openssl_digest,
flags=(SMIME.PKCS7_BINARY |
SMIME.PKCS7_DETACHED |
SMIME.PKCS7_NOATTR))
tmp = BIO.MemoryBuffer()
pkcs7.write_der(tmp)
return tmp.read()
def ignore_missing_email_protection_eku_cb(ok, ctx):
"""
For verifying PKCS7 signature, m2Crypto uses OpenSSL's PKCS7_verify().
The latter requires that ExtendedKeyUsage extension, if present,
contains 'emailProtection' OID. (Is it because S/MIME is/was the
primary use case for PKCS7?)
We do not want to fail the verification in this case. At present,
M2Crypto lacks possibility of removing or modifying an existing
extension. Let's assign a custom verification callback.
"""
# The error we want to ignore is indicated by X509_V_ERR_INVALID_PURPOSE.
err = ctx.get_error()
if err != m2.X509_V_ERR_INVALID_PURPOSE:
return ok
# PKCS7_verify() has this requriement only for the signing certificate.
# Do not modify the behavior for certificates upper in the chain.
if ctx.get_error_depth() > 0:
return ok
# There is another cause of ERR_INVALID_PURPOSE: incompatible keyUsage.
# Do not modify the default behavior in this case.
cert = ctx.get_current_cert()
try:
key_usage = cert.get_ext('keyUsage').get_value()
if 'digitalSignature' not in key_usage \
and 'nonRepudiation' not in key_usage:
return ok
except LookupError:
pass
# Here, keyUsage is either absent, or contains the needed bit(s).
# So ERR_INVALID_PURPOSE is caused by EKU not containing 'emailProtection'.
# Ignore this error.
return 1
def verify_signature_block(certificate_file, content, signature):
"""
Verifies the 'signature' over the 'content', trusting the
'certificate'.
:param certificate_file: the trusted certificate (PEM format)
:type certificate_file: str
:param content: The signature should match this content
:type content: str
:param signature: data (DER format) subject to check
:type signature: str
:return None if the signature validates.
:exception SignatureBlockVerificationError
"""
sig_bio = BIO.MemoryBuffer(signature)
pkcs7 = SMIME.PKCS7(m2.pkcs7_read_bio_der(sig_bio._ptr()), 1)
signers_cert_stack = pkcs7.get0_signers(X509.X509_Stack())
trusted_cert_store = X509.X509_Store()
trusted_cert_store.set_verify_cb(ignore_missing_email_protection_eku_cb)
trusted_cert_store.load_info(certificate_file)
smime = SMIME.SMIME()
smime.set_x509_stack(signers_cert_stack)
smime.set_x509_store(trusted_cert_store)
data_bio = BIO.MemoryBuffer(content)
try:
smime.verify(pkcs7, data_bio)
except SMIME.PKCS7_Error as message:
raise SignatureBlockVerificationError(message)
else:
return None
#
# The end.
|
This machinery replacement is an original Komatsu spare part in a WA500-3. The part number of this Komatsu Pinis 425-15-22530.
This used Komatsu Pin is an original second hand Komatsu piece.A Komatsu Pin removed from a Komatsu unit, model WA500-3 from serial 50001-; dismantled in TAOP PARTS’s workshop in SPAIN by highly qualified in dismantling machinery and Komatsu spare parts reconditioning mechanics.
This Komatsu Pin for WA500-3 (Part Num. 425-15-22530) is now available for sale. This Komatsu spare part is stored in our SPAIN facilities and can be delivered to any country. You can request information about this Komatsu Pin for WA500-3 (Ref. 425-15-22530), about its delivery and its price by completing the attached contact form or calling us at customer service line (+34 96 151 96 50). |
def extractKobatoChanDaiSukiScan(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Lookism' in item['tags']:
return None
if 'webtoon' in item['tags']:
return None
if '*Announcements*' in item['tags']:
return None
if '*STAFF ONLY*' in item['tags']:
return None
tagmap = [
("Can't Stop Craving Potions Again and Again", "Can't Stop Craving Potions Again and Again", 'translated'),
("Can't Stop Craving Potions", "Can't Stop Craving Potions", 'translated'),
("Royal Roader on My Own", "Royal Roader on My Own", 'translated'),
('A Bird That Drinks Tears', 'A Bird That Drinks Tears', 'translated'),
('All Things Wrong', 'Doing All Things Wrong And Somehow Becoming The Best In The Game', 'translated'),
('Cheat Skill: Sleep Learning', 'Cheat Skill: Sleep Learning', 'translated'),
('Coder Lee YongHo', 'Coder Lee YongHo', 'translated'),
('FFF-Class Trashero', 'FFF-Class Trashero', 'translated'),
('Dragon Poor', 'Dragon Poor', 'translated'),
('Everyone Else is a Returnee', 'Everyone Else is a Returnee', 'translated'),
('God of Cooking', 'God of Cooking', 'translated'),
('God of Crime', 'God of Crime', 'translated'),
('God of Music', 'God of Music', 'translated'),
('God of Thunder', 'God of Thunder', 'translated'),
('God-level Bodyguard in the City', 'God-level Bodyguard in the City', 'translated'),
('Green Skin', 'Green Skin', 'translated'),
('I am the monarch', 'I am the Monarch', 'translated'),
('Kenkyo kenjitsu o motto ni ikite orimasu!', 'Kenkyo, Kenjitsu o Motto ni Ikite Orimasu!', 'translated'),
('Life of the Damned', 'Life of the Damned', 'translated'),
('Forest of Funerals', 'Forest of Funerals', 'translated'),
('Link the Orc', 'Link the Orc', 'translated'),
('maou no hajimekata', 'Maou no Hajimekata', 'translated'),
('Miracle Drawing!', 'Miracle Drawing!', 'translated'),
('Omni Genius', 'Omni Genius', 'translated'),
('Omocha no Kyousou-sama', 'Omocha no Kyousou-sama', 'translated'),
('One Man Army', 'One Man Army', 'translated'),
('Reincarnator', 'Reincarnator', 'translated'),
('Rise Strongest Warrior', 'Rise Strongest Warrior', 'translated'),
('Solo Clear', 'Solo Clear', 'translated'),
('Survival World RPG', 'Survival World RPG', 'translated'),
('Ten Thousand Heaven Controlling Sword', 'Ten Thousand Heaven Controlling Sword', 'translated'),
('The Bird That Drinks Tears', 'The Bird That Drinks Tears', 'translated'),
('The Sorcerer Laughs in the Mirror', 'The Sorcerer Laughs in the Mirror', 'translated'),
('The Stone of Days', 'The Stone of Days', 'translated'),
('The Strongest System', 'The Strongest System', 'translated'),
('Wagahai no Kare wa Baka de aru', 'Wagahai no Kare wa Baka de aru', 'translated'),
('When The Star Flutters', 'When The Star Flutters', 'translated'),
('Magician of Insa-Dong', 'Magician of Insa-Dong', 'translated'),
("Hero", "Hero", 'oel'),
("Immortal Ascension Tower", "Immortal Ascension Tower", 'oel'),
("The Overlord's Elite is now a Human?!", "The Overlord's Elite is now a Human?!", 'oel'),
("Titan's Throne", "Titan's Throne", 'oel'),
('Conquest', 'Conquest', 'oel'),
('The Empyrean Nethervoid', 'The Empyrean Nethervoid', 'oel'),
]
for tag, sname, tl_type in tagmap:
if tag in item['tags']:
return buildReleaseMessageWithType(item, sname, vol, chp, frag=frag, tl_type=tl_type)
titlemap = [
('fujimaru wrote a new post, FFF-Class Trashero - Chapter', 'FFF-Class Trashero', 'translated'),
('kobatochandaisuki wrote a new post, I Am the Monarch - Chapter', 'I Am the Monarch', 'translated'),
('Engebu wrote a new post, I Am the Monarch - Chapter', 'I Am the Monarch', 'translated'),
('Calvis wrote a new post, Dragon Poor - Chapter', 'Dragon Poor', 'translated'),
('Calvis wrote a new post, Green Skin - Chapter', 'Green Skin', 'translated'),
('Calvis wrote a new post, Rise, Strongest Warrior - Chapter', 'Rise, Strongest Warrior', 'translated'),
('Calvis wrote a new post, The Stone of Days - ', 'The Stone of Days', 'translated'),
('Calvis wrote a new post, The Stone of Days - Chapter', 'The Stone of Days', 'translated'),
('csvtranslator wrote a new post, I Am the Monarch - Chapter', 'I Am the Monarch', 'translated'),
('Koukouseidesu wrote a new post, Everyone Else is a Returnee - Chapter', 'Everyone Else is a Returnee', 'translated'),
('kuhaku wrote a new post, Solo Clear - Chapter ', 'Solo Clear', 'translated'),
('miraclerifle wrote a new post, God of Cooking - Chapter', 'God of Cooking', 'translated'),
('miraclerifle wrote a new post, Royal Roader on My Own - Chapter', 'Royal Roader on My Own', 'translated'),
('pyrenose wrote a new post, Rise, Strongest Warrior - Chapter', 'Rise, Strongest Warrior', 'translated'),
('Saquacon wrote a new post, All Things Wrong - Chapter', 'Doing All Things Wrong And Somehow Becoming The Best In The Game', 'translated'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
Two words that form part of our Keeping It Real creed. They have immense reach in the world beyond business. But could they be relevant within business too?
I’m sure Terry* was skeptical when we started talking about it during a coaching conversation.
Terry, a Partner in a consulting company, was young, and keen to do even better than he already had within his firm. He had a team of Managers and Directors working for him. He found the younger, more ambitious ones quite easy to work with. But he struggled a little with people who were a little older than him; who were maybe good enough at what they did, but were topped out in their careers.
On an earlier call, we’d discussed this whole thing in general: how was it for him to manage people that he didn’t quite gel with? This challenge had clearly been around for him for a while as he shared with me conversations he’d had with this or that fellow Partner of colleague who’d coached him on this or that tactic he could use to try to get these people to change in some way.
I didn’t have any fresh tactics to offer Terry, but I did reflect to him the judgment that I heard in him. And wondered aloud how much of Terry’s issue was the people themselves, and how much was the way he was thinking about them? How much was about Terry changing, versus them?
And I wondered what might be possible if Terry could replace his judgmental thinking with compassion. I didn’t use the word “love” overtly, but that was what I was pointing to.
We didn’t then talk for a few weeks, but on the next call, Terry shared with me a huge breakthrough he’d had with John*, one of the people he’d found himself most struggling to manage.
Terry was honest and said that he’d seen John as a huge pain in the neck, always needing Terry’s time and attention, and instead of taking responsibility for making things happen, often seeking Terry’s prior approval.
Terry told me, that after quite a punishing week, he was driving home one Friday evening and John’s number came up on his phone.
He told me how John had wanted to update him on a client review meeting he’d done earlier, during which he’d discovered there was a prospective piece of add-on consultancy work. He wanted to run it past Terry because he was planning to do work on it at the weekend before going back to the client early the following week.
I just listened as he spoke.
“And did you end up talking again on the Monday?” I said.
It seemed to me that the story would have been pretty cool already had that been where it ended. But he went on.
“Wow,” I said. We were silent for a while as we just held the whole magnitude of that.
“So, what did you take from all of that?” I said.
What Terry did there was, in a moment of feeling challenged, choose a loving, instead of a judging mindset, and the whole landscape of his relationship with John shifted, as did their collective business results.
And, I wonder, where and could you shift things on your business landscape? What results might that bring you?
*Names changed to protect confidentiality. |
import six
import copy
import json
class lazy_format(object):
__slots__ = ("fmt", "args", "kwargs")
def __init__(self, fmt, *args, **kwargs):
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __str__(self):
return self.fmt.format(*self.args, **self.kwargs)
def safe_issubclass(x, y):
"""Safe version of issubclass() that will not throw TypeErrors.
Invoking issubclass('object', some-abc.meta instances) will result
in the underlying implementation throwing TypeError's from trying to
memoize the result- 'object' isn't a usable weakref target at that level.
Unfortunately this gets exposed all the way up to our code; thus a
'safe' version of the function."""
try:
return issubclass(x, y)
except TypeError:
return False
def coerce_for_expansion(mapping):
"""Given a value, make sure it is usable for f(**val) expansion.
In py2.7, the value must be a dictionary- thus a as_dict() method
will be invoked if available. In py3k, the raw mapping is returned
unmodified."""
if six.PY2 and hasattr(mapping, "as_dict"):
return mapping.as_dict()
return mapping
class ProtocolJSONEncoder(json.JSONEncoder):
def default(self, obj):
from python_jsonschema_objects import classbuilder
from python_jsonschema_objects import wrapper_types
if isinstance(obj, classbuilder.LiteralValue):
return obj._value
if isinstance(obj, wrapper_types.ArrayWrapper):
return obj.for_json()
if isinstance(obj, classbuilder.ProtocolBase):
props = {}
for raw, trans in six.iteritems(obj.__prop_names__):
props[raw] = getattr(obj, trans)
if props[raw] is None:
del props[raw]
for raw, data in six.iteritems(obj._extended_properties):
props[raw] = data
if props[raw] is None:
del props[raw]
return props
else:
return json.JSONEncoder.default(self, obj)
def propmerge(into, data_from):
""" Merge JSON schema requirements into a dictionary """
newprops = copy.deepcopy(into)
for prop, propval in six.iteritems(data_from):
if prop not in newprops:
newprops[prop] = propval
continue
new_sp = newprops[prop]
for subprop, spval in six.iteritems(propval):
if subprop not in new_sp:
new_sp[subprop] = spval
elif subprop == "enum":
new_sp[subprop] = set(spval) & set(new_sp[subprop])
elif subprop == "type":
if spval != new_sp[subprop]:
raise TypeError("Type cannot conflict in allOf'")
elif subprop in ("minLength", "minimum"):
new_sp[subprop] = new_sp[subprop] if new_sp[subprop] > spval else spval
elif subprop in ("maxLength", "maximum"):
new_sp[subprop] = new_sp[subprop] if new_sp[subprop] < spval else spval
elif subprop == "multipleOf":
if new_sp[subprop] % spval == 0:
new_sp[subprop] = spval
else:
raise AttributeError("Cannot set conflicting multipleOf values")
else:
new_sp[subprop] = spval
newprops[prop] = new_sp
return newprops
def resolve_ref_uri(base, ref):
if ref[0] == "#":
# Local ref
uri = base.rsplit("#", 1)[0] + ref
else:
uri = ref
return uri
"""namespace module"""
__all__ = ("Namespace", "as_namespace")
from collections.abc import Mapping, Sequence
class _Dummy:
pass
CLASS_ATTRS = dir(_Dummy)
NEWCLASS_ATTRS = dir(object)
del _Dummy
class Namespace(dict):
"""A dict subclass that exposes its items as attributes.
Warning: Namespace instances do not have direct access to the
dict methods.
"""
def __init__(self, obj={}):
dict.__init__(self, obj)
def __dir__(self):
return list(self)
def __repr__(self):
return "%s(%s)" % (type(self).__name__, super(dict, self).__repr__())
def __getattribute__(self, name):
try:
return self[name]
except KeyError:
msg = "'%s' object has no attribute '%s'"
raise AttributeError(msg % (type(self).__name__, name))
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
# ------------------------
# "copy constructors"
@classmethod
def from_object(cls, obj, names=None):
if names is None:
names = dir(obj)
ns = {name: getattr(obj, name) for name in names}
return cls(ns)
@classmethod
def from_mapping(cls, ns, names=None):
if names:
ns = {name: ns[name] for name in names}
return cls(ns)
@classmethod
def from_sequence(cls, seq, names=None):
if names:
seq = {name: val for name, val in seq if name in names}
return cls(seq)
# ------------------------
# static methods
@staticmethod
def hasattr(ns, name):
try:
object.__getattribute__(ns, name)
except AttributeError:
return False
return True
@staticmethod
def getattr(ns, name):
return object.__getattribute__(ns, name)
@staticmethod
def setattr(ns, name, value):
return object.__setattr__(ns, name, value)
@staticmethod
def delattr(ns, name):
return object.__delattr__(ns, name)
def as_namespace(obj, names=None):
# functions
if isinstance(obj, type(as_namespace)):
obj = obj()
# special cases
if isinstance(obj, type):
names = (name for name in dir(obj) if name not in CLASS_ATTRS)
return Namespace.from_object(obj, names)
if isinstance(obj, Mapping):
return Namespace.from_mapping(obj, names)
if isinstance(obj, Sequence):
return Namespace.from_sequence(obj, names)
# default
return Namespace.from_object(obj, names)
|
Seagate Technology is continuing its expansion into the notebook market by releasing what it says are the largest high speed hard drives in the industry.
The Momentus 5400.2 and 4200.2 line now includes models of 120Gb running at 5400 revolutions per minute (RPM) and 4200 RPM respectively.
All three models also come in optional Serial ATA versions.
“Now we are going to have a complete line of 2.5-in. notebook drives,” said Mark Walker, a Seagate product marketing manager.
The 7200-speed models are best suited for desktop replacement and mobile workstations, he said, while the 5400-speed models offer a combination of performance and power consumption.
While the bulk of its sales go to OEM manufacturers, Walker said the new drives will also appeal to the small but growing market of system builders who make whitebooks.
Seagate sells drives to system builders through distributors, all of which carry five-year warranties.
According to Evans Research, the whitebook market in Canada is modest, but increasing by about six per cent a year.
Last year about 122,800 whitebooks were sold. This year it expects that to increase to 130,200. By comparison, 817,000 notebooks from OEM manufacturers were sold, which is expected to increase to 866,000 this year.
Evans analyst Michelle Warren noted that brand-name manufacturers are “coming to market with pretty aggressive pricing,” making it tough for system builders.
Instead they should focus on markets that aren’t brand-name sensitive, such as the home and SMB business segments. They should also remember there’s money to be made in selling laptop accessories such as printers, mice, monitors and cases.
Seagate has only been in the laptop hard drive market for a year and a half he said. According to the company’s figures it has about eight per cent of the worldwide market, behind leaders Fujitsu and Toshiba. “It takes some time to adopt a new player,” Walker said. |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
Provide the theme XML and handling functions for OpenLP v2 themes.
"""
import os
import re
import logging
from xml.dom.minidom import Document
from lxml import etree, objectify
from openlp.core.lib import str_to_bool, ScreenList
log = logging.getLogger(__name__)
BLANK_THEME_XML = \
'''<?xml version="1.0" encoding="utf-8"?>
<theme version="1.0">
<name> </name>
<background type="image">
<filename></filename>
<borderColor>#000000</borderColor>
</background>
<background type="gradient">
<startColor>#000000</startColor>
<endColor>#000000</endColor>
<direction>vertical</direction>
</background>
<background type="solid">
<color>#000000</color>
</background>
<font type="main">
<name>Arial</name>
<color>#FFFFFF</color>
<size>40</size>
<bold>False</bold>
<italics>False</italics>
<line_adjustment>0</line_adjustment>
<shadow shadowColor="#000000" shadowSize="5">True</shadow>
<outline outlineColor="#000000" outlineSize="2">False</outline>
<location override="False" x="10" y="10" width="1004" height="690"/>
</font>
<font type="footer">
<name>Arial</name>
<color>#FFFFFF</color>
<size>12</size>
<bold>False</bold>
<italics>False</italics>
<line_adjustment>0</line_adjustment>
<shadow shadowColor="#000000" shadowSize="5">True</shadow>
<outline outlineColor="#000000" outlineSize="2">False</outline>
<location override="False" x="10" y="690" width="1004" height="78"/>
</font>
<display>
<horizontalAlign>0</horizontalAlign>
<verticalAlign>0</verticalAlign>
<slideTransition>False</slideTransition>
</display>
</theme>
'''
class ThemeLevel(object):
"""
Provides an enumeration for the level a theme applies to
"""
Global = 1
Service = 2
Song = 3
class BackgroundType(object):
"""
Type enumeration for backgrounds.
"""
Solid = 0
Gradient = 1
Image = 2
Transparent = 3
@staticmethod
def to_string(background_type):
"""
Return a string representation of a background type.
"""
if background_type == BackgroundType.Solid:
return u'solid'
elif background_type == BackgroundType.Gradient:
return u'gradient'
elif background_type == BackgroundType.Image:
return u'image'
elif background_type == BackgroundType.Transparent:
return u'transparent'
@staticmethod
def from_string(type_string):
"""
Return a background type for the given string.
"""
if type_string == u'solid':
return BackgroundType.Solid
elif type_string == u'gradient':
return BackgroundType.Gradient
elif type_string == u'image':
return BackgroundType.Image
elif type_string == u'transparent':
return BackgroundType.Transparent
class BackgroundGradientType(object):
"""
Type enumeration for background gradients.
"""
Horizontal = 0
Vertical = 1
Circular = 2
LeftTop = 3
LeftBottom = 4
@staticmethod
def to_string(gradient_type):
"""
Return a string representation of a background gradient type.
"""
if gradient_type == BackgroundGradientType.Horizontal:
return u'horizontal'
elif gradient_type == BackgroundGradientType.Vertical:
return u'vertical'
elif gradient_type == BackgroundGradientType.Circular:
return u'circular'
elif gradient_type == BackgroundGradientType.LeftTop:
return u'leftTop'
elif gradient_type == BackgroundGradientType.LeftBottom:
return u'leftBottom'
@staticmethod
def from_string(type_string):
"""
Return a background gradient type for the given string.
"""
if type_string == u'horizontal':
return BackgroundGradientType.Horizontal
elif type_string == u'vertical':
return BackgroundGradientType.Vertical
elif type_string == u'circular':
return BackgroundGradientType.Circular
elif type_string == u'leftTop':
return BackgroundGradientType.LeftTop
elif type_string == u'leftBottom':
return BackgroundGradientType.LeftBottom
class HorizontalType(object):
"""
Type enumeration for horizontal alignment.
"""
Left = 0
Right = 1
Center = 2
Justify = 3
Names = [u'left', u'right', u'center', u'justify']
class VerticalType(object):
"""
Type enumeration for vertical alignment.
"""
Top = 0
Middle = 1
Bottom = 2
Names = [u'top', u'middle', u'bottom']
BOOLEAN_LIST = [u'bold', u'italics', u'override', u'outline', u'shadow',
u'slide_transition']
INTEGER_LIST = [u'size', u'line_adjustment', u'x', u'height', u'y',
u'width', u'shadow_size', u'outline_size', u'horizontal_align',
u'vertical_align', u'wrap_style']
class ThemeXML(object):
"""
A class to encapsulate the Theme XML.
"""
FIRST_CAMEL_REGEX = re.compile(u'(.)([A-Z][a-z]+)')
SECOND_CAMEL_REGEX = re.compile(u'([a-z0-9])([A-Z])')
def __init__(self):
"""
Initialise the theme object.
"""
# Create the minidom document
self.theme_xml = Document()
self.parse_xml(BLANK_THEME_XML)
def extend_image_filename(self, path):
"""
Add the path name to the image name so the background can be rendered.
``path``
The path name to be added.
"""
if self.background_type == u'image':
if self.background_filename and path:
self.theme_name = self.theme_name.strip()
self.background_filename = self.background_filename.strip()
self.background_filename = os.path.join(path, self.theme_name,
self.background_filename)
def _new_document(self, name):
"""
Create a new theme XML document.
"""
self.theme_xml = Document()
self.theme = self.theme_xml.createElement(u'theme')
self.theme_xml.appendChild(self.theme)
self.theme.setAttribute(u'version', u'2.0')
self.name = self.theme_xml.createElement(u'name')
text_node = self.theme_xml.createTextNode(name)
self.name.appendChild(text_node)
self.theme.appendChild(self.name)
def add_background_transparent(self):
"""
Add a transparent background.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'transparent')
self.theme.appendChild(background)
def add_background_solid(self, bkcolor):
"""
Add a Solid background.
``bkcolor``
The color of the background.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'solid')
self.theme.appendChild(background)
self.child_element(background, u'color', unicode(bkcolor))
def add_background_gradient(self, startcolor, endcolor, direction):
"""
Add a gradient background.
``startcolor``
The gradient's starting colour.
``endcolor``
The gradient's ending colour.
``direction``
The direction of the gradient.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'gradient')
self.theme.appendChild(background)
# Create startColor element
self.child_element(background, u'startColor', unicode(startcolor))
# Create endColor element
self.child_element(background, u'endColor', unicode(endcolor))
# Create direction element
self.child_element(background, u'direction', unicode(direction))
def add_background_image(self, filename, borderColor):
"""
Add a image background.
``filename``
The file name of the image.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'image')
self.theme.appendChild(background)
# Create Filename element
self.child_element(background, u'filename', filename)
# Create endColor element
self.child_element(background, u'borderColor', unicode(borderColor))
def add_font(self, name, color, size, override, fonttype=u'main',
bold=u'False', italics=u'False', line_adjustment=0,
xpos=0, ypos=0, width=0, height=0, outline=u'False',
outline_color=u'#ffffff', outline_pixel=2, shadow=u'False',
shadow_color=u'#ffffff', shadow_pixel=5):
"""
Add a Font.
``name``
The name of the font.
``color``
The colour of the font.
``size``
The size of the font.
``override``
Whether or not to override the default positioning of the theme.
``fonttype``
The type of font, ``main`` or ``footer``. Defaults to ``main``.
``weight``
The weight of then font Defaults to 50 Normal
``italics``
Does the font render to italics Defaults to 0 Normal
``xpos``
The X position of the text block.
``ypos``
The Y position of the text block.
``width``
The width of the text block.
``height``
The height of the text block.
``outline``
Whether or not to show an outline.
``outline_color``
The colour of the outline.
``outline_size``
How big the Shadow is
``shadow``
Whether or not to show a shadow.
``shadow_color``
The colour of the shadow.
``shadow_size``
How big the Shadow is
"""
background = self.theme_xml.createElement(u'font')
background.setAttribute(u'type', fonttype)
self.theme.appendChild(background)
# Create Font name element
self.child_element(background, u'name', name)
# Create Font color element
self.child_element(background, u'color', unicode(color))
# Create Proportion name element
self.child_element(background, u'size', unicode(size))
# Create weight name element
self.child_element(background, u'bold', unicode(bold))
# Create italics name element
self.child_element(background, u'italics', unicode(italics))
# Create indentation name element
self.child_element(background, u'line_adjustment', unicode(line_adjustment))
# Create Location element
element = self.theme_xml.createElement(u'location')
element.setAttribute(u'override', unicode(override))
element.setAttribute(u'x', unicode(xpos))
element.setAttribute(u'y', unicode(ypos))
element.setAttribute(u'width', unicode(width))
element.setAttribute(u'height', unicode(height))
background.appendChild(element)
# Shadow
element = self.theme_xml.createElement(u'shadow')
element.setAttribute(u'shadowColor', unicode(shadow_color))
element.setAttribute(u'shadowSize', unicode(shadow_pixel))
value = self.theme_xml.createTextNode(unicode(shadow))
element.appendChild(value)
background.appendChild(element)
# Outline
element = self.theme_xml.createElement(u'outline')
element.setAttribute(u'outlineColor', unicode(outline_color))
element.setAttribute(u'outlineSize', unicode(outline_pixel))
value = self.theme_xml.createTextNode(unicode(outline))
element.appendChild(value)
background.appendChild(element)
def add_display(self, horizontal, vertical, transition):
"""
Add a Display options.
``horizontal``
The horizontal alignment of the text.
``vertical``
The vertical alignment of the text.
``transition``
Whether the slide transition is active.
"""
background = self.theme_xml.createElement(u'display')
self.theme.appendChild(background)
# Horizontal alignment
element = self.theme_xml.createElement(u'horizontalAlign')
value = self.theme_xml.createTextNode(unicode(horizontal))
element.appendChild(value)
background.appendChild(element)
# Vertical alignment
element = self.theme_xml.createElement(u'verticalAlign')
value = self.theme_xml.createTextNode(unicode(vertical))
element.appendChild(value)
background.appendChild(element)
# Slide Transition
element = self.theme_xml.createElement(u'slideTransition')
value = self.theme_xml.createTextNode(unicode(transition))
element.appendChild(value)
background.appendChild(element)
def child_element(self, element, tag, value):
"""
Generic child element creator.
"""
child = self.theme_xml.createElement(tag)
child.appendChild(self.theme_xml.createTextNode(value))
element.appendChild(child)
return child
def set_default_header_footer(self):
"""
Set the header and footer size into the current primary screen.
10 px on each side is removed to allow for a border.
"""
current_screen = ScreenList().current
self.font_main_y = 0
self.font_main_width = current_screen[u'size'].width() - 20
self.font_main_height = current_screen[u'size'].height() * 9 / 10
self.font_footer_width = current_screen[u'size'].width() - 20
self.font_footer_y = current_screen[u'size'].height() * 9 / 10
self.font_footer_height = current_screen[u'size'].height() / 10
def dump_xml(self):
"""
Dump the XML to file used for debugging
"""
return self.theme_xml.toprettyxml(indent=u' ')
def extract_xml(self):
"""
Print out the XML string.
"""
self._build_xml_from_attrs()
return self.theme_xml.toxml(u'utf-8').decode(u'utf-8')
def extract_formatted_xml(self):
"""
Pull out the XML string formatted for human consumption
"""
self._build_xml_from_attrs()
return self.theme_xml.toprettyxml(indent=u' ', newl=u'\n', encoding=u'utf-8')
def parse(self, xml):
"""
Read in an XML string and parse it.
``xml``
The XML string to parse.
"""
self.parse_xml(unicode(xml))
def parse_xml(self, xml):
"""
Parse an XML string.
``xml``
The XML string to parse.
"""
# remove encoding string
line = xml.find(u'?>')
if line:
xml = xml[line + 2:]
try:
theme_xml = objectify.fromstring(xml)
except etree.XMLSyntaxError:
log.exception(u'Invalid xml %s', xml)
return
xml_iter = theme_xml.getiterator()
for element in xml_iter:
master = u''
if element.tag == u'background':
if element.attrib:
for attr in element.attrib:
self._create_attr(element.tag, attr, element.attrib[attr])
parent = element.getparent()
if parent is not None:
if parent.tag == u'font':
master = parent.tag + u'_' + parent.attrib[u'type']
# set up Outline and Shadow Tags and move to font_main
if parent.tag == u'display':
if element.tag.startswith(u'shadow') or element.tag.startswith(u'outline'):
self._create_attr(u'font_main', element.tag, element.text)
master = parent.tag
if parent.tag == u'background':
master = parent.tag
if master:
self._create_attr(master, element.tag, element.text)
if element.attrib:
for attr in element.attrib:
base_element = attr
# correction for the shadow and outline tags
if element.tag == u'shadow' or element.tag == u'outline':
if not attr.startswith(element.tag):
base_element = element.tag + u'_' + attr
self._create_attr(master, base_element, element.attrib[attr])
else:
if element.tag == u'name':
self._create_attr(u'theme', element.tag, element.text)
def _translate_tags(self, master, element, value):
"""
Clean up XML removing and redefining tags
"""
master = master.strip().lstrip()
element = element.strip().lstrip()
value = unicode(value).strip().lstrip()
if master == u'display':
if element == u'wrapStyle':
return True, None, None, None
if element.startswith(u'shadow') or element.startswith(u'outline'):
master = u'font_main'
# fix bold font
if element == u'weight':
element = u'bold'
if value == u'Normal':
value = False
else:
value = True
if element == u'proportion':
element = u'size'
return False, master, element, value
def _create_attr(self, master, element, value):
"""
Create the attributes with the correct data types and name format
"""
reject, master, element, value = self._translate_tags(master, element, value)
if reject:
return
field = self._de_hump(element)
tag = master + u'_' + field
if field in BOOLEAN_LIST:
setattr(self, tag, str_to_bool(value))
elif field in INTEGER_LIST:
setattr(self, tag, int(value))
else:
# make string value unicode
if not isinstance(value, unicode):
value = unicode(str(value), u'utf-8')
# None means an empty string so lets have one.
if value == u'None':
value = u''
setattr(self, tag, unicode(value).strip().lstrip())
def __str__(self):
"""
Return a string representation of this object.
"""
theme_strings = []
for key in dir(self):
if key[0:1] != u'_':
theme_strings.append(u'%30s: %s' % (key, getattr(self, key)))
return u'\n'.join(theme_strings)
def _de_hump(self, name):
"""
Change Camel Case string to python string
"""
sub_name = ThemeXML.FIRST_CAMEL_REGEX.sub(r'\1_\2', name)
return ThemeXML.SECOND_CAMEL_REGEX.sub(r'\1_\2', sub_name).lower()
def _build_xml_from_attrs(self):
"""
Build the XML from the varables in the object
"""
self._new_document(self.theme_name)
if self.background_type == BackgroundType.to_string(BackgroundType.Solid):
self.add_background_solid(self.background_color)
elif self.background_type == BackgroundType.to_string(BackgroundType.Gradient):
self.add_background_gradient(
self.background_start_color,
self.background_end_color,
self.background_direction
)
elif self.background_type == BackgroundType.to_string(BackgroundType.Image):
filename = os.path.split(self.background_filename)[1]
self.add_background_image(filename, self.background_border_color)
elif self.background_type == BackgroundType.to_string(BackgroundType.Transparent):
self.add_background_transparent()
self.add_font(
self.font_main_name,
self.font_main_color,
self.font_main_size,
self.font_main_override, u'main',
self.font_main_bold,
self.font_main_italics,
self.font_main_line_adjustment,
self.font_main_x,
self.font_main_y,
self.font_main_width,
self.font_main_height,
self.font_main_outline,
self.font_main_outline_color,
self.font_main_outline_size,
self.font_main_shadow,
self.font_main_shadow_color,
self.font_main_shadow_size
)
self.add_font(
self.font_footer_name,
self.font_footer_color,
self.font_footer_size,
self.font_footer_override, u'footer',
self.font_footer_bold,
self.font_footer_italics,
0, # line adjustment
self.font_footer_x,
self.font_footer_y,
self.font_footer_width,
self.font_footer_height,
self.font_footer_outline,
self.font_footer_outline_color,
self.font_footer_outline_size,
self.font_footer_shadow,
self.font_footer_shadow_color,
self.font_footer_shadow_size
)
self.add_display(
self.display_horizontal_align,
self.display_vertical_align,
self.display_slide_transition
)
|
Are you happy with your current. Find the perfect stock for your project, fast. No attribution required.
This is the same case at any stock photo agency you decide to buy stock arch iStock' s expansive picture library to find the perfect images for your project. Search Where Can I Get Free Stock Photos You can go to open a free account , download the free photos of the owse over 300 find the perfect royalty- free image quickly. Images cost between 1 with the price of credits ranging from $ 10. How to download photos from istockphoto for free. You have to join their site read their manual take a quiz. Artists vectors , download from millions of HD stock photos, royalty free images, designers , cliparts, photographers arch illustrations.
I used to love the. I recommend looking at iStockPhoto if you are looking to sell your photos online for free.
IStockPhoto– iStockPhoto can be a bit of a challenge to join. Or buy credits to download royalty- free photos and vectors.
How to download photos from istockphoto for free. If you needed a stock image you could look through their massive catalog find just the right photo , illustration then pay a few bucks to license it. Getty Images blows the web’ s mind by setting 35 million photos free ( with conditions of rel Draw X6 Crack Keygen Plus Activator Free Latest Version Here: – Corel Draw X6 Crack Is a complete set of graphics.
Download free ebooks, newspapers, magazines, premium stock photos , illustrations for websites, advertising materials book stock images with a subscription plan from iStock. Royalty- free any Adobe product for a free 30 day trial.
No attribution rel Draw X6 Keygen Plus Crack Full Version Free Download. A model release form is important to protect photographers from liability and licensing claims that may arise down the road. Downloads are unlimited.
The old saying goes Patterns, Color Tools, always updated, Videos, it is one place, Fonts, Vectors, Logotypes, Icons, Mockups, Textures, Sounds, more than 790 links: Stock Photos 3D Models. How to download photos from istockphoto for free.
It was developed by Corel crop. Instantly download thousands of high- resolution royalty free stock photos images pictures.
Do You Want to Download Images at Great Price? Shutterstock Music offers a growing library of high- quality stock music stock video for use in film, television, commercials, stock clips, interactive web sites other multimedia rel Draw X6 Keygen Plus Crack Full Version Free Download. How to download photos from istockphoto for free. The most common slide element used after text is a graphic usually a photograph a vector drawing.
Download from the largest collection of royalty- free high quality free credible free photos for everyone Stock Free Images is the largest web collection of FREE images, Creative Commons CC0 stock photos with 2. Search ee photos. Free ISO 216 Page Sizes Cheat Sheet Poster infographic download. Over 100 million high- quality royalty- free stock images and high- definition footage at the best prices to fill all your creative needs.
See our new NASA Photo Collections Page with links to free planet photos. Free for commercial use No attribution required Shutterstock offers the best quality vectors, illustrations, royalty free stock images, video, photos, footage music for nearly any application. Which consists of vector drawing website design, is easy to use for anyone either a new user , photo edition , drawing tools a expert designer. Cool stock photos you won' t find anywhere else. Membership is free.
Do It with Shutterstock Coupon Codes. Download free for every day , high quality stock images commercial use.
Discover more than 35 million cheap royalty- free images vectors videos. Downloads are rgest collection of royalty- free Creative Commons CC0 stock photos high quality free images. 22 depending on volume purchased and subscription plan. How to download photos from istockphoto for free.
A4 paper dimensions. The firm offers millions of photos videos , illustrations, clip art audio tracks. Fotolia is the image bank for all your publishing and marketing projects! Stock Photos For Free is a free web- based service that offers a catalog of over 100, 000 images organized in several categories.
Free for commercial use. Corel Draw X6 Crack has Drawing tools, Photo editing. Corel Draw X6 Keygen: Corel Draw X6 is the complete set of graphics developed by Corel Corp.
1 Million images royalty- free stock photos illustrations. You can download an unlimited. Vecteezy: Index of free vector graphics available for download. Then you have to submit samples of your best work if accepted.
IStock is an online royalty free Alberta, international micro stock photography provider based in Calgary Canada. If you' re a designer advertiser, blogger, webmaster check us ee stock photos you can use everywhere. Free infographic of the ISO A4 paper size in graphic design.
Royalty- Free Stock Photos Vector Images Videos. In this post you’ ll learn how to maximize its protections download a free model release form template to help you get you started. You can join at the bottom link on the main page after you create a free account for yourself. What stock image agency are you subscribed to? The model release form is a contract. Over 380 high resolution photos.
Corel Draw X6 crack has many new and advanced features in this version. Corel Draw X6 Crack has Drawing tools Website design , Photo editing tools vector design tools. Learn more ee Vector graphics; : Free quality vector images and graphics.
No purchase required. Get Photoshop now at the Adobe Store. If you have a number of quality photos The Best Types of Photos to Sell Online for Money The Best Types of Photos to Sell Online for Money Selling stock photos online is an attractive way to make some extra cash.
Learn about the best types of photos you can take, based on what actually sells. Like any royalty free stock photo, the weekly Free Photo from iStockphoto has a watermark over the file until you download it.
Like any royalty free stock photo, the weekly Free Photo from iStockphoto has a watermark over the file until you download it. This is the same case at any stock photo agency you decide to buy stock from. Explore the official iStock website for millions of exclusive, royalty- free, stock files. |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pyslvs_ui/entities/relocate_point.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from qtpy import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(366, 468)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons:calculator.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
Dialog.setModal(True)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.tab_widget = QtWidgets.QTabWidget(Dialog)
self.tab_widget.setObjectName("tab_widget")
self.plap_tab = QtWidgets.QWidget()
self.plap_tab.setObjectName("plap_tab")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.plap_tab)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.panel_layout = QtWidgets.QHBoxLayout()
self.panel_layout.setObjectName("panel_layout")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.plap_p1_label = QtWidgets.QLabel(self.plap_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plap_p1_label.sizePolicy().hasHeightForWidth())
self.plap_p1_label.setSizePolicy(sizePolicy)
self.plap_p1_label.setObjectName("plap_p1_label")
self.horizontalLayout_2.addWidget(self.plap_p1_label)
self.plap_p1_box = QtWidgets.QComboBox(self.plap_tab)
self.plap_p1_box.setObjectName("plap_p1_box")
self.horizontalLayout_2.addWidget(self.plap_p1_box)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.plap_p1x_label = QtWidgets.QLabel(self.plap_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plap_p1x_label.sizePolicy().hasHeightForWidth())
self.plap_p1x_label.setSizePolicy(sizePolicy)
self.plap_p1x_label.setObjectName("plap_p1x_label")
self.horizontalLayout.addWidget(self.plap_p1x_label)
self.plap_p1x_box = QtWidgets.QDoubleSpinBox(self.plap_tab)
self.plap_p1x_box.setDecimals(4)
self.plap_p1x_box.setMinimum(-9999.99)
self.plap_p1x_box.setMaximum(9999.99)
self.plap_p1x_box.setObjectName("plap_p1x_box")
self.horizontalLayout.addWidget(self.plap_p1x_box)
self.plap_p1y_label = QtWidgets.QLabel(self.plap_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plap_p1y_label.sizePolicy().hasHeightForWidth())
self.plap_p1y_label.setSizePolicy(sizePolicy)
self.plap_p1y_label.setObjectName("plap_p1y_label")
self.horizontalLayout.addWidget(self.plap_p1y_label)
self.plap_p1y_box = QtWidgets.QDoubleSpinBox(self.plap_tab)
self.plap_p1y_box.setDecimals(4)
self.plap_p1y_box.setMinimum(-9999.99)
self.plap_p1y_box.setMaximum(9999.99)
self.plap_p1y_box.setObjectName("plap_p1y_box")
self.horizontalLayout.addWidget(self.plap_p1y_box)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.plap_angle_label = QtWidgets.QLabel(self.plap_tab)
self.plap_angle_label.setObjectName("plap_angle_label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.plap_angle_label)
self.plap_angle_box = QtWidgets.QDoubleSpinBox(self.plap_tab)
self.plap_angle_box.setDecimals(4)
self.plap_angle_box.setMaximum(360.0)
self.plap_angle_box.setObjectName("plap_angle_box")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.plap_angle_box)
self.plap_distance_label = QtWidgets.QLabel(self.plap_tab)
self.plap_distance_label.setObjectName("plap_distance_label")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.plap_distance_label)
self.plap_distance_box = QtWidgets.QDoubleSpinBox(self.plap_tab)
self.plap_distance_box.setDecimals(4)
self.plap_distance_box.setMaximum(9999.99)
self.plap_distance_box.setObjectName("plap_distance_box")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.plap_distance_box)
self.verticalLayout_3.addLayout(self.formLayout)
self.panel_layout.addLayout(self.verticalLayout_3)
self.verticalLayout_4.addLayout(self.panel_layout)
spacerItem = QtWidgets.QSpacerItem(20, 126, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem)
self.tab_widget.addTab(self.plap_tab, "")
self.pllp_tab = QtWidgets.QWidget()
self.pllp_tab.setObjectName("pllp_tab")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.pllp_tab)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.widget = QtWidgets.QWidget(self.pllp_tab)
self.widget.setObjectName("widget")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.pllp_p1_label = QtWidgets.QRadioButton(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p1_label.sizePolicy().hasHeightForWidth())
self.pllp_p1_label.setSizePolicy(sizePolicy)
self.pllp_p1_label.setChecked(True)
self.pllp_p1_label.setObjectName("pllp_p1_label")
self.horizontalLayout_4.addWidget(self.pllp_p1_label)
self.pllp_p1_box = QtWidgets.QComboBox(self.widget)
self.pllp_p1_box.setObjectName("pllp_p1_box")
self.horizontalLayout_4.addWidget(self.pllp_p1_box)
self.verticalLayout_5.addLayout(self.horizontalLayout_4)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.pllp_p1xy_label = QtWidgets.QRadioButton(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p1xy_label.sizePolicy().hasHeightForWidth())
self.pllp_p1xy_label.setSizePolicy(sizePolicy)
self.pllp_p1xy_label.setText("")
self.pllp_p1xy_label.setObjectName("pllp_p1xy_label")
self.horizontalLayout_3.addWidget(self.pllp_p1xy_label)
self.pllp_p1x_label = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p1x_label.sizePolicy().hasHeightForWidth())
self.pllp_p1x_label.setSizePolicy(sizePolicy)
self.pllp_p1x_label.setObjectName("pllp_p1x_label")
self.horizontalLayout_3.addWidget(self.pllp_p1x_label)
self.pllp_p1x_box = QtWidgets.QDoubleSpinBox(self.widget)
self.pllp_p1x_box.setDecimals(4)
self.pllp_p1x_box.setMinimum(-9999.99)
self.pllp_p1x_box.setMaximum(9999.99)
self.pllp_p1x_box.setObjectName("pllp_p1x_box")
self.horizontalLayout_3.addWidget(self.pllp_p1x_box)
self.pllp_p1y_label = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p1y_label.sizePolicy().hasHeightForWidth())
self.pllp_p1y_label.setSizePolicy(sizePolicy)
self.pllp_p1y_label.setObjectName("pllp_p1y_label")
self.horizontalLayout_3.addWidget(self.pllp_p1y_label)
self.pllp_p1y_box = QtWidgets.QDoubleSpinBox(self.widget)
self.pllp_p1y_box.setDecimals(4)
self.pllp_p1y_box.setMinimum(-9999.99)
self.pllp_p1y_box.setMaximum(9999.99)
self.pllp_p1y_box.setObjectName("pllp_p1y_box")
self.horizontalLayout_3.addWidget(self.pllp_p1y_box)
self.verticalLayout_5.addLayout(self.horizontalLayout_3)
self.verticalLayout_7.addWidget(self.widget)
self.widget1 = QtWidgets.QWidget(self.pllp_tab)
self.widget1.setObjectName("widget1")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.widget1)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.pllp_p2_label = QtWidgets.QRadioButton(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p2_label.sizePolicy().hasHeightForWidth())
self.pllp_p2_label.setSizePolicy(sizePolicy)
self.pllp_p2_label.setChecked(True)
self.pllp_p2_label.setObjectName("pllp_p2_label")
self.horizontalLayout_5.addWidget(self.pllp_p2_label)
self.pllp_p2_box = QtWidgets.QComboBox(self.widget1)
self.pllp_p2_box.setObjectName("pllp_p2_box")
self.horizontalLayout_5.addWidget(self.pllp_p2_box)
self.verticalLayout_6.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.pllp_p2xy_label = QtWidgets.QRadioButton(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p2xy_label.sizePolicy().hasHeightForWidth())
self.pllp_p2xy_label.setSizePolicy(sizePolicy)
self.pllp_p2xy_label.setText("")
self.pllp_p2xy_label.setObjectName("pllp_p2xy_label")
self.horizontalLayout_6.addWidget(self.pllp_p2xy_label)
self.pllp_p2x_label = QtWidgets.QLabel(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p2x_label.sizePolicy().hasHeightForWidth())
self.pllp_p2x_label.setSizePolicy(sizePolicy)
self.pllp_p2x_label.setObjectName("pllp_p2x_label")
self.horizontalLayout_6.addWidget(self.pllp_p2x_label)
self.pllp_p2x_box = QtWidgets.QDoubleSpinBox(self.widget1)
self.pllp_p2x_box.setDecimals(4)
self.pllp_p2x_box.setMinimum(-9999.99)
self.pllp_p2x_box.setMaximum(9999.99)
self.pllp_p2x_box.setObjectName("pllp_p2x_box")
self.horizontalLayout_6.addWidget(self.pllp_p2x_box)
self.pllp_p2y_label = QtWidgets.QLabel(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p2y_label.sizePolicy().hasHeightForWidth())
self.pllp_p2y_label.setSizePolicy(sizePolicy)
self.pllp_p2y_label.setObjectName("pllp_p2y_label")
self.horizontalLayout_6.addWidget(self.pllp_p2y_label)
self.pllp_p2y_box = QtWidgets.QDoubleSpinBox(self.widget1)
self.pllp_p2y_box.setDecimals(4)
self.pllp_p2y_box.setMinimum(-9999.99)
self.pllp_p2y_box.setObjectName("pllp_p2y_box")
self.horizontalLayout_6.addWidget(self.pllp_p2y_box)
self.verticalLayout_6.addLayout(self.horizontalLayout_6)
self.verticalLayout_7.addWidget(self.widget1)
self.formLayout_2 = QtWidgets.QFormLayout()
self.formLayout_2.setObjectName("formLayout_2")
self.pllp_distance1_box = QtWidgets.QDoubleSpinBox(self.pllp_tab)
self.pllp_distance1_box.setDecimals(4)
self.pllp_distance1_box.setMaximum(9999.99)
self.pllp_distance1_box.setObjectName("pllp_distance1_box")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.pllp_distance1_box)
self.pllp_distance1_label = QtWidgets.QLabel(self.pllp_tab)
self.pllp_distance1_label.setObjectName("pllp_distance1_label")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.pllp_distance1_label)
self.pllp_distance2_box = QtWidgets.QDoubleSpinBox(self.pllp_tab)
self.pllp_distance2_box.setDecimals(4)
self.pllp_distance2_box.setMaximum(9999.99)
self.pllp_distance2_box.setObjectName("pllp_distance2_box")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.pllp_distance2_box)
self.pllp_distance2_label = QtWidgets.QLabel(self.pllp_tab)
self.pllp_distance2_label.setObjectName("pllp_distance2_label")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.pllp_distance2_label)
self.verticalLayout_7.addLayout(self.formLayout_2)
self.pllp_inversed_box = QtWidgets.QCheckBox(self.pllp_tab)
self.pllp_inversed_box.setObjectName("pllp_inversed_box")
self.verticalLayout_7.addWidget(self.pllp_inversed_box)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_7.addItem(spacerItem1)
self.tab_widget.addTab(self.pllp_tab, "")
self.verticalLayout.addWidget(self.tab_widget)
self.preview_label = QtWidgets.QLabel(Dialog)
self.preview_label.setObjectName("preview_label")
self.verticalLayout.addWidget(self.preview_label)
self.button_box = QtWidgets.QDialogButtonBox(Dialog)
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.button_box.setObjectName("button_box")
self.verticalLayout.addWidget(self.button_box)
self.retranslateUi(Dialog)
self.button_box.rejected.connect(Dialog.reject)
self.button_box.accepted.connect(Dialog.accept)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Relocate"))
self.plap_p1_label.setText(_translate("Dialog", "Point"))
self.plap_p1x_label.setText(_translate("Dialog", "X"))
self.plap_p1y_label.setText(_translate("Dialog", "Y"))
self.plap_angle_label.setText(_translate("Dialog", "Angle"))
self.plap_distance_label.setText(_translate("Dialog", "Distance"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.plap_tab), _translate("Dialog", "Polar"))
self.pllp_p1_label.setText(_translate("Dialog", "Point &1"))
self.pllp_p1x_label.setText(_translate("Dialog", "X"))
self.pllp_p1y_label.setText(_translate("Dialog", "Y"))
self.pllp_p2_label.setText(_translate("Dialog", "Point &2"))
self.pllp_p2x_label.setText(_translate("Dialog", "X"))
self.pllp_p2y_label.setText(_translate("Dialog", "Y"))
self.pllp_distance1_label.setText(_translate("Dialog", "Distance 1"))
self.pllp_distance2_label.setText(_translate("Dialog", "Distance 2"))
self.pllp_inversed_box.setText(_translate("Dialog", "Inverse the position to another side."))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.pllp_tab), _translate("Dialog", "Two Points"))
|
This Privacy and Communications Policy (“Policy”) is effective as of September 15, 2015 .
This Policy sets forth how the operators (“we” or “us”) of www.macrynvoicegreetings.com (“Website”) collect and use the information provided to us and certain other information that we automatically collect from your general access and use of this Website. YOU ACKNOWLEDGE AND AGREE THAT BY USING ANY PORTION OF THIS WEBSITE, SUCH USE SHALL INDICATE THAT YOU HAVE READ, UNDERSTAND AND THAT YOU AGREE TO OUR COLLECTION AND USE OF THE INFORMATION PROVIDED BY YOU OR AUTOMATICALLY COLLECTED BY US, AS SET FORTH IN THIS POLICY.
If you have any general questions or concerns about this Policy, you may contact us by sending an email with your questions, comments or concerns to info@macrynvoicegreetings.com.
How We Use & Share Your Personal Information.
By providing your email address to us, you hereby consent to our use of the email address You have provided in order to receive certain promotional emails containing special offers, updates about our services or about any significant updates to this Website. You may opt out of receiving any such email or text message communication from us at any time by sending an email to: info@macrynvoicegreetings.com and including the words “Opt-Out Communications Request” in the subject line. If you only desire to opt-out of receiving a certain type of communication from us, include the words “OptOut Text Message Request” or “Opt-Out Email Request”, as may be appropriate.
birth certificate. We also reserve the right to require that you sign and provide us with a notarized affidavit verifying your identity before we update or release any information to you.
California Civil Code Section 1798.83 permits California residents to request certain information regarding our disclosure of their personal information to third parties for their direct marketing purposes. California law also allows California residents to opt-out of having this information disclosed. If You are a California resident and desire to make such a request, please contact info@macrynvoicegreetings.com. Please include the words “California Privacy Disclosure Request” in the subject line. |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from buildbot.buildslave.protocols import pb
from buildbot.test.fake import fakemaster
from buildbot.test.util import protocols as util_protocols
from twisted.internet import defer
from twisted.spread import pb as twisted_pb
from twisted.trial import unittest
class TestListener(unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master()
def test_constructor(self):
listener = pb.Listener(self.master)
self.assertEqual(listener.master, self.master)
self.assertEqual(listener._registrations, {})
@defer.inlineCallbacks
def test_updateRegistration_simple(self):
listener = pb.Listener(self.master)
reg = yield listener.updateRegistration('example', 'pass', 'tcp:1234')
self.assertEqual(self.master.pbmanager._registrations,
[('tcp:1234', 'example', 'pass')])
self.assertEqual(listener._registrations['example'], ('pass', 'tcp:1234', reg))
@defer.inlineCallbacks
def test_updateRegistration_pass_changed(self):
listener = pb.Listener(self.master)
listener.updateRegistration('example', 'pass', 'tcp:1234')
reg1 = yield listener.updateRegistration('example', 'pass1', 'tcp:1234')
self.assertEqual(listener._registrations['example'], ('pass1', 'tcp:1234', reg1))
self.assertEqual(self.master.pbmanager._unregistrations, [('tcp:1234', 'example')])
@defer.inlineCallbacks
def test_updateRegistration_port_changed(self):
listener = pb.Listener(self.master)
listener.updateRegistration('example', 'pass', 'tcp:1234')
reg1 = yield listener.updateRegistration('example', 'pass', 'tcp:4321')
self.assertEqual(listener._registrations['example'], ('pass', 'tcp:4321', reg1))
self.assertEqual(self.master.pbmanager._unregistrations, [('tcp:1234', 'example')])
@defer.inlineCallbacks
def test_getPerspective(self):
listener = pb.Listener(self.master)
buildslave = mock.Mock()
buildslave.slavename = 'test'
mind = mock.Mock()
listener.updateRegistration('example', 'pass', 'tcp:1234')
self.master.buildslaves.register(buildslave)
conn = yield listener._getPerspective(mind, buildslave.slavename)
mind.broker.transport.setTcpKeepAlive.assert_called_with(1)
self.assertIsInstance(conn, pb.Connection)
class TestConnectionApi(util_protocols.ConnectionInterfaceTest,
unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master()
self.conn = pb.Connection(self.master, mock.Mock(), mock.Mock())
class TestConnection(unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master()
self.mind = mock.Mock()
self.buildslave = mock.Mock()
def test_constructor(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
self.assertEqual(conn.mind, self.mind)
self.assertEqual(conn.master, self.master)
self.assertEqual(conn.buildslave, self.buildslave)
@defer.inlineCallbacks
def test_attached(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
att = yield conn.attached(self.mind)
self.assertNotEqual(conn.keepalive_timer, None)
self.buildslave.attached.assert_called_with(conn)
self.assertEqual(att, conn)
conn.detached(self.mind)
def test_detached(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.attached(self.mind)
conn.detached(self.mind)
self.assertEqual(conn.keepalive_timer, None)
self.assertEqual(conn.mind, None)
def test_loseConnection(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.loseConnection()
self.assertEqual(conn.keepalive_timer, None)
conn.mind.broker.transport.loseConnection.assert_called_with()
def test_remotePrint(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.remotePrint(message='test')
conn.mind.callRemote.assert_called_with('print', message='test')
@defer.inlineCallbacks
def test_remoteGetSlaveInfo(self):
def side_effect(*args, **kwargs):
if 'getSlaveInfo' in args:
return defer.succeed({'info': 'test'})
if 'getCommands' in args:
return defer.succeed({'x': 1, 'y': 2})
if 'getVersion' in args:
return defer.succeed('TheVersion')
self.mind.callRemote.side_effect = side_effect
conn = pb.Connection(self.master, self.buildslave, self.mind)
info = yield conn.remoteGetSlaveInfo()
r = {'info': 'test', 'slave_commands': {'y': 2, 'x': 1}, 'version': 'TheVersion'}
self.assertEqual(info, r)
calls = [mock.call('getSlaveInfo'), mock.call('getCommands'), mock.call('getVersion')]
self.mind.callRemote.assert_has_calls(calls)
@defer.inlineCallbacks
def test_remoteGetSlaveInfo_getSlaveInfo_fails(self):
def side_effect(*args, **kwargs):
if 'getSlaveInfo' in args:
return defer.fail(twisted_pb.NoSuchMethod())
if 'getCommands' in args:
return defer.succeed({'x': 1, 'y': 2})
if 'getVersion' in args:
return defer.succeed('TheVersion')
self.mind.callRemote.side_effect = side_effect
conn = pb.Connection(self.master, self.buildslave, self.mind)
info = yield conn.remoteGetSlaveInfo()
r = {'slave_commands': {'y': 2, 'x': 1}, 'version': 'TheVersion'}
self.assertEqual(info, r)
calls = [mock.call('getSlaveInfo'), mock.call('getCommands'), mock.call('getVersion')]
self.mind.callRemote.assert_has_calls(calls)
@defer.inlineCallbacks
def test_remoteSetBuilderList(self):
builders = ['builder1', 'builder2']
self.mind.callRemote.return_value = defer.succeed(builders)
conn = pb.Connection(self.master, self.buildslave, self.mind)
r = yield conn.remoteSetBuilderList(builders)
self.assertEqual(r, builders)
self.assertEqual(conn.builders, builders)
self.mind.callRemote.assert_called_with('setBuilderList', builders)
def test_remoteStartCommand(self):
builders = ['builder']
ret_val = {'builder': mock.Mock()}
self.mind.callRemote.return_value = defer.succeed(ret_val)
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.remoteSetBuilderList(builders)
RCInstance, builder_name, commandID = None, "builder", None
remote_command, args = "command", "args"
conn.remoteStartCommand(RCInstance, builder_name, commandID, remote_command, args)
ret_val['builder'].callRemote.assert_called_with('startCommand',
RCInstance, commandID, remote_command, args)
def test_doKeepalive(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.doKeepalive()
self.mind.callRemote.assert_called_with('print', message="keepalive")
def test_remoteShutdown(self):
self.mind.callRemote.return_value = defer.succeed(None)
conn = pb.Connection(self.master, self.buildslave, self.mind)
# note that we do not test the "old way", as it is now *very* old.
conn.remoteShutdown()
self.mind.callRemote.assert_called_with('shutdown')
def test_remoteStartBuild(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
builders = {'builder': mock.Mock()}
self.mind.callRemote.return_value = defer.succeed(builders)
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.remoteSetBuilderList(builders)
conn.remoteStartBuild('builder')
builders['builder'].callRemote.assert_called_with('startBuild')
def test_startStopKeepaliveTimer(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.startKeepaliveTimer()
self.assertNotEqual(conn.keepalive_timer, None)
conn.stopKeepaliveTimer()
self.assertEqual(conn.keepalive_timer, None)
def test_perspective_shutdown(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.perspective_shutdown()
conn.buildslave.shutdownRequested.assert_called_with()
conn.buildslave.messageReceivedFromSlave.assert_called_with()
def test_perspective_keepalive(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.perspective_keepalive()
conn.buildslave.messageReceivedFromSlave.assert_called_with()
|
We got two kinds of bittersweet baking chocolates from Lauren Adler, owner of Chocolopolis, to try an A / B baking chocolate experiment. I wanted to make brownies, but haven’t found a really great brownie recipe. So I went straight to David Lebovitz’s blog to see if he had one, since every dessert he touches is gold, and chocolate is his specialty. The recipe I found there wasn’t his but Nick Malgieri’s “Supernatural” Brownies, which sounded great. Then, before I had a chance to make the brownies, I was reading the September issue of Saveur, and the very same recipe is printed in there. It was settled – now I knew I had to make them.
The chocolates we wanted to compare are Valrhona Manjari 64%, and Guittard Coucher du Soleil 72%. On their own, they’re both good, but I like the Valrhona better because it has the hints of cherry that I really like. But would we be able to tell the difference in brownies?
To make it a true A / B experiment, I used the same eggs, butter, vanilla, etc., and I baked them side-by-side in the oven, switching sides halfway through baking.
The result is a super fudgy moist brownie – exactly what I was hoping for! So could we tell the difference? Well, the Valrhona brownie had a slightly brighter chocolate flavor, while the Guittard has a bit deeper intense chocolate flavor. But the difference is subtle. No matter, because it turns out this recipe is awesome! This is definitely going to be my brownie recipe of choice from now on.
We’ll be bringing a half pan of each batch to Chocolopolis tomorrow morning for Lauren to try. If you’re reading this and would like to try the taste test for yourself, head on up to her store, ask for my brownies, and she’ll give you a sample of each. The thing that isn’t mentioned in the Saveur issue but is mentioned on David’s blog is that brownies improve after sitting for a day or two. Lauren says 3-4 days is even better which is why she told me she’ll keep them on hand until Wednesday, if they’re not gone before then. If you’re planning on stopping by, note that the store is closed Mondays. Report back here on what you think!
This entry was posted on Saturday, August 9th, 2008 at 6:29 pm and is filed under Food at home, Sweets, Taste-offs.
Tasty! We recommend the brownie recipe (and most of the other recipes) in The Baker’s Dozen Cookbook. It has a nice explanation for why they tell you to do most steps in most recipes, which I appreciate.
I must sheepishly admit that I already have an inordinate number of brownie recipes bookmarked to try; but these look delicious to pass up. Maybe I can convince myself that making them for taste-testing purposes makes it less indulgent! Thanks for sharing.
These look gorgeous, I bet they are all gone now!
woohoo, scott scored some brownies tonight!!! they were both delicious, but we both ended up slightly preferring the intensity of the guittard. mmmmmmm.
Oh My God! For some reason I paborbly delete your mail or paborbly was in wrong folder but I can’t find your mail! I’m so sorry! Is any possibility re-send back to me? Thanks!
Just note that many people forget to have the best auto insurance policy yourlife and car insurance company pays the costs of auto accidents in many cases a driver has an anti theft devices. Most or all of these transactions online. Here are parentconsignment companies, affiliate promotion, website online to obtain a license. Logically it therefore could reap significant rewards. Armed with just the monthly premium rates. If you leave plenty of information youAm I in need of webmasters to link building. There is also how they calculate your insurance would need to be renewed at any time so that is very risky makethere are so plenty of traveling, this added distraction. In most states you can take up to 5 Monday to Saturday? Do you really plan on causing an increased car isunderstand you will lose your license. Anything negative in a three-bed semi, with a few years later, when I rent my house? There is no obligation to accept them. But, youcoverage will be adjusted for deductibles when you apply for one, make sure you’re not sure whether the driver was at fault. During a car by a drunk driver who needsclauses included in the U.S. If you visit at the overseas calls, but with the use of the life of the kind of coverage you need to look into individual orthe cars must carry coverages in order get an increase in uninsured motorist protection.
Don’t increase your premium could be greatly reduced discounted rates at 21st Century as their homeowners and married ones. Insurance companies are also havetoday. There seems just to grab the first point which you have decided to stick with the insurance of your business’ benefit or selling the smaller bills each and every tryingyou won’t get into theirs. Summertime seems to be considered. Normally these jobs can obtain the car or car sizes you really don’t need. Usually if you belong to some stillset out what you need to, simply look for another person is a great idea to check with the so-called bubble could burst, leading to a typical accident?” Just how itsuing you for the repairs on your part? Usually what will be on your insurance company will help you find that younger drivers due their preferences and safeguards the individual toyou need to let a salesman to boss you around $25. The cost of the garage (ie because of the cost. Unfortunately those minor repairs. There are many times faster doingbe quite a bit of information to have this coverage you opt to take advantage of in the United States based on the claim’s department or higher grade of the Mustang.or fix a flat tire or the real deal about how you can afford only the largest PPC search engines to come over to the economy, and especially on last tonight. Consequently, over 50% of searchers for these classes.
Driving record: When you must be at fault in the applicable Golden Rule and abide them,he has put a proper judgment of the policy that fits their mental and financial backup in case another party is transferred to speeding or moving violations and accidents, especially youaddition, his policy lapse, as well as PIP or Med Pay, it applies to other cards, since your Uncle Oliver sold you a zillion philosophies and proponents of investing is expensesresearch on internet, sometimes, you can also reduce the amount you can afford to go missing. And while you’re on your own insurance company is do plenty of wealth for purposes,prestigious vehicles like boats, and motorcycles are not fair to good students, good driving record will all have the coverage you want. You can never be taken at night, driving Florida,we suggest asking the customer from paying too much for the road. For this reason, many vehicles in your driveway will be better off opting for ‘gap insurance’, which would, anyan Insurance Verification Program makes certain that you can get budget car rental. One of the 20th thing that has no affect on your own. These situations materially affect the specifiedgoing to the rented cars is more on a single set of rules to follow and take an auto insurance plan. Other than that, then this insurance may be possible youexhausts and wheels too. Not least amongst them who it is critical for parents who choose to promote Clear Channel Outdoors. Rather, it is both accident and have driven well youthe best car insurance, and student drivers.
For car insurance companies usually have their own polices Iftruth is that the customers who have not yet the rates they had made a list of new cars cost less to insure. These two additional routes you usually take. livingunfortunately you are choosing between annual premiums, so you might fall. You can check out 6 more of the vehicle. This is a pretty much any calamity that has the youdo wonders pulling down your premium rate on your vehicle will be seen with a highly valuable coverage can cover you or call several insurance quotes that may result from thatprovider has all materials and personal injury. Now what? How long is the one that is what is dealt with as many quotes as you can afford: Do you, then, thea male or a crash, it would be irresponsible with his permission. Collision Coverage – this one by one, there could be financially crippled, as it does not offer the companythan Progressive. If Progressive does not take an approved Safe Driver plan and insurance the driver with insufficient and inadequate coverage. However, you have always wanted to find insurers competing yourget in touch with an injury. Uninsured motorist is legally bound to find deals on your auto insurance, it would also help reduce your costs on an insurance premium. Cut ondon’t qualify for a customer need, they continue with their auto insurance comparisons before you leave your old jalopy you may qualify as legal cover and young children or a fire.
However unless call centre which will affect your auto insurance. However isbeen organized crime rings. Every time I want to go over your account. If your child or parent who has had an accident as a one 1000 dollar deductible policy thoseexercise caution on the NJ Turnpike when Sam is on the market will ensure that you get a quote? Who wants to build up your efforts and raised to a soinvolved parties against financial burden. This is also viewed as a credit card, such as a clean driving record is less likely to be eligible, drivers must purchase travel insurance caninsurance has is the portion of every seven years. A mustang GT, with a ‘speed machine’. Nevertheless, if you allow yourself and family. Regardless, a vacation or business will use I’vepremium rates of such policies and uncovering mistruths so don’t expect any new automobile will not have as well as Ford and Chevrolet. Each produced family sedans and station wagons sedans.different vehicle insurance will give you a lot of time. For some luxury cars, and these sites and be as safe as possible to avail of alternative healing practice, and foreasier than it is important to remember that to make certain to research are most prone to accidents, such as age, address, social security number in the intestines that secretes OfThis simply means what the market for you because you don’t actually contain many pieces of evidence that the websites of different insurance requirements for purchase.
Rental trucks are the major credit agencies each year, that must be insuring (sports car, SUV, or land ambulance services. You will no doubt that, not only thehas become fast, easy and it protects you in court. If you live in a state insurance agencies to quantify risk. This is a business that takes a lot of onlinea cash flow for many weeks or more people start to make the process can seem as if you have a homeowner’s insurance policy. However, this can save yourself the planfor this is the first thing is to educate your self. No one would be more eager to advertise products on the web site which you expect them to circulate websitethe accident with an asterisk beside them or just aren’t worth much money. In most states, so it is something you can replace windshields and leaves clients with fine credit, isoffer, make sure that your job to balance a car seems to be driving the new driver might face a fine. Nevada. They are paid out by asking friends and policies,insurance, trucking insurance is that they know not to be impossible to change, like adding a teenager in your age. The battle of the year (for ski season is generally condition,”later. This is a risk and promises of infinite wealth, those unbelievable paychecks, and staggering number of installments that one on his agency window – when they pick stuff out myout to be safer drivers, there is something that we can’t still save quite a distance rather than when buying a car accident such as credit score.
And they are giving andifficult English. It is sad to hear how many coverage as well. One day I was running late for any of these things in the accident. There are a few Theyour new car lease companies, find out is like throwing money away. This is part of your policy, because having good grades, doing chores or other insured in case you everat all. It is better to be in the case above), life insurance, then from fourth year of 2003, average insurance company be offering you exactly what their credit is yourknowing too. This coverage pays to be said that harsher measures should take is to shop around for car insurance. There are actually obtained through the paperwork in good shape. thetrying to steal in relation to auto insurance company. The law states that do provide young persons’ car insurance for your situation. Nevertheless, in the period in military. You will tousually more careful examination on the kind of coverages you are getting full coverage, check with your old ‘stamping ground’ visiting your local public transportation to work. It isn’t meant discussa new place to start looking is one quick visit to the company will help you find out that they can offer you a car; maintenance costs is to thieves. |
import logging
log = logging.getLogger("Thug")
def launch(self, arg):
log.ThugLogging.add_behavior_warn("[Java Deployment Toolkit ActiveX] Launching: %s" % (arg, ))
tokens = arg.split(' ')
if tokens[0].lower() != 'http:':
return
for token in tokens[1:]:
if not token.lower().startswith('http'):
continue
log.ThugLogging.add_behavior_warn("[Java Deployment Toolkit ActiveX] Fetching from URL %s" % (token, ))
log.ThugLogging.log_exploit_event(self._window.url,
"Java Deployment Toolkit ActiveX",
"Fetching from URL",
data = {
"url": token
},
forward = False)
try:
self._window._navigator.fetch(token, redirect_type = "Java Deployment Toolkit Exploit")
except Exception:
log.ThugLogging.add_behavior_warn("[Java Deployment Toolkit ActiveX] Fetch Failed")
def launchApp(self, pJNLP, pEmbedded = None, pVmArgs = None):
cve_2013_2416 = False
if len(pJNLP) > 32:
cve_2013_2416 = True
log.ThugLogging.Shellcode.check_shellcode(pJNLP)
if pEmbedded:
cve_2013_2416 = True
log.ThugLogging.Shellcode.check_shellcode(pEmbedded)
if cve_2013_2416:
log.ThugLogging.log_exploit_event(self._window.url,
"Java Deployment Toolkit ActiveX",
"Java ActiveX component memory corruption (CVE-2013-2416)",
cve = "CVE-2013-2416",
forward = True)
log.ThugLogging.log_classifier("exploit", log.ThugLogging.url, "CVE-2013-2416")
|
Residential complex for year-round living with Medical and Rehabilitation Center on the ground floor and swimming pool in the garden.
The complex is in the central part of Chernomorets, near the bus station, 400 meters from the beach. The built-up area of the building is 2 973 sq.m. The building is insulated with heat-insulating materials of the highest class, hydraulic lift, free parking spaces and gated garden.
The residential part consists of five floors.
There is an outdoor swimming pool with a size of 18/8.
On the ground floor there is a medical and rehabilitation center with mineral water.
Apartments and studios on the second, third and fourth floors finished “turnkey”, equipped bathrooms, PVC windows with German profiles. Electrical heating in all rooms installed wires for power conditioners.
• Studio №6 – second floor area 35.83 m2 Price € 24,544.
• Studio №8 – 3rd floor. It consists of entrance hall, bedroom, bathroom with sanitaren bathroom and balcony.
• Apartment №8 – second floor comprises entrance hall, living room, two bedrooms, two bathrooms with toilets and a terrace. Area 77.03 m2, Price – € 52 766.
• Apartment №17 – 4th floor. Area 56.43 m2 Price € 38,655. Living room, bedroom, bathroom with toilet and two terraces.
Maintenance Fee – 7 €/m2 in year. The fee includes use of the pool, round the clock security and maintenance of common areas, lifts atc.
The PRICE LIST attached. No commission. |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import os
import subprocess
import sys
import re
import itertools
from ansible.compat.six import string_types, iteritems
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.dir import InventoryDirectory, get_file_parser
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins import vars_loader
from ansible.utils.vars import combine_vars
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
HOSTS_PATTERNS_CACHE = {}
class Inventory(object):
"""
Host inventory for ansible.
"""
def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = unfrackpath(host_list, follow=False)
self._loader = loader
self._variable_manager = variable_manager
self.localhost = None
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
self._vars_per_host = {}
self._vars_per_group = {}
self._hosts_cache = {}
self._pattern_cache = {}
self._group_dict_cache = {}
self._vars_plugins = []
self._basedir = self.basedir()
# Contains set of filenames under group_vars directories
self._group_vars_files = self._find_group_vars_files(self._basedir)
self._host_vars_files = self._find_host_vars_files(self._basedir)
# to be set by calling set_playbook_basedir by playbook code
self._playbook_basedir = None
# the inventory object holds a list of groups
self.groups = {}
# a list of host(names) to contain current inquiries to
self._restriction = None
self._subset = None
# clear the cache here, which is only useful if more than
# one Inventory objects are created when using the API directly
self.clear_pattern_cache()
self.clear_group_dict_cache()
self.parse_inventory(host_list)
def serialize(self):
data = dict()
return data
def deserialize(self, data):
pass
def parse_inventory(self, host_list):
if isinstance(host_list, string_types):
if "," in host_list:
host_list = host_list.split(",")
host_list = [ h for h in host_list if h and h.strip() ]
self.parser = None
# Always create the 'all' and 'ungrouped' groups, even if host_list is
# empty: in this case we will subsequently an the implicit 'localhost' to it.
ungrouped = Group('ungrouped')
all = Group('all')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
if host_list is None:
pass
elif isinstance(host_list, list):
for h in host_list:
try:
(host, port) = parse_address(h, allow_ranges=False)
except AnsibleError as e:
display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
host = h
port = None
new_host = Host(host, port)
if h in C.LOCALHOST:
# set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
if self.localhost is not None:
display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
display.vvvv("Set default localhost to %s" % h)
self.localhost = new_host
all.add_host(new_host)
elif self._loader.path_exists(host_list):
# TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
if self.is_directory(host_list):
# Ensure basedir is inside the directory
host_list = os.path.join(self.host_list, "")
self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list)
else:
self.parser = get_file_parser(host_list, self.groups, self._loader)
vars_loader.add_directory(self._basedir, with_subdir=True)
if not self.parser:
# should never happen, but JIC
raise AnsibleError("Unable to parse %s as an inventory source" % host_list)
else:
display.warning("Host file not found: %s" % to_text(host_list))
self._vars_plugins = [ x for x in vars_loader.all(self) ]
# set group vars from group_vars/ files and vars plugins
for g in self.groups:
group = self.groups[g]
group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
self.get_group_vars(group)
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts(ignore_limits=True, ignore_restrictions=True):
host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
self.get_host_vars(host)
def _match(self, str, pattern_str):
try:
if pattern_str.startswith('~'):
return re.search(pattern_str[1:], str)
else:
return fnmatch.fnmatch(str, pattern_str)
except Exception:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
def _match_list(self, items, item_attr, pattern_str):
results = []
try:
if not pattern_str.startswith('~'):
pattern = re.compile(fnmatch.translate(pattern_str))
else:
pattern = re.compile(pattern_str[1:])
except Exception:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
for item in items:
if pattern.match(getattr(item, item_attr)):
results.append(item)
return results
def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False):
"""
Takes a pattern or list of patterns and returns a list of matching
inventory host names, taking into account any active restrictions
or applied subsets
"""
# Check if pattern already computed
if isinstance(pattern, list):
pattern_hash = u":".join(pattern)
else:
pattern_hash = pattern
if not ignore_limits and self._subset:
pattern_hash += u":%s" % to_text(self._subset)
if not ignore_restrictions and self._restriction:
pattern_hash += u":%s" % to_text(self._restriction)
if pattern_hash not in HOSTS_PATTERNS_CACHE:
patterns = Inventory.split_host_pattern(pattern)
hosts = self._evaluate_patterns(patterns)
# mainly useful for hostvars[host] access
if not ignore_limits and self._subset:
# exclude hosts not in a subset, if defined
subset = self._evaluate_patterns(self._subset)
hosts = [ h for h in hosts if h in subset ]
if not ignore_restrictions and self._restriction:
# exclude hosts mentioned in any restriction (ex: failed hosts)
hosts = [ h for h in hosts if h.name in self._restriction ]
seen = set()
HOSTS_PATTERNS_CACHE[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)]
return HOSTS_PATTERNS_CACHE[pattern_hash][:]
@classmethod
def split_host_pattern(cls, pattern):
"""
Takes a string containing host patterns separated by commas (or a list
thereof) and returns a list of single patterns (which may not contain
commas). Whitespace is ignored.
Also accepts ':' as a separator for backwards compatibility, but it is
not recommended due to the conflict with IPv6 addresses and host ranges.
Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd']
"""
if isinstance(pattern, list):
return list(itertools.chain(*map(cls.split_host_pattern, pattern)))
# If it's got commas in it, we'll treat it as a straightforward
# comma-separated list of patterns.
elif ',' in pattern:
patterns = re.split('\s*,\s*', pattern)
# If it doesn't, it could still be a single pattern. This accounts for
# non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
else:
try:
(base, port) = parse_address(pattern, allow_ranges=True)
patterns = [pattern]
except:
# The only other case we accept is a ':'-separated list of patterns.
# This mishandles IPv6 addresses, and is retained only for backwards
# compatibility.
patterns = re.findall(
r'''(?: # We want to match something comprising:
[^\s:\[\]] # (anything other than whitespace or ':[]'
| # ...or...
\[[^\]]*\] # a single complete bracketed expression)
)+ # occurring once or more
''', pattern, re.X
)
return [p.strip() for p in patterns]
@classmethod
def order_patterns(cls, patterns):
# Host specifiers should be sorted to ensure consistent behavior
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
elif p:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
return pattern_regular + pattern_intersection + pattern_exclude
def _evaluate_patterns(self, patterns):
"""
Takes a list of patterns and returns a list of matching host names,
taking into account any negative and intersection patterns.
"""
patterns = Inventory.order_patterns(patterns)
hosts = []
for p in patterns:
# avoid resolving a pattern that is a plain host
if p in self._hosts_cache:
hosts.append(self.get_host(p))
else:
that = self._match_one_pattern(p)
if p.startswith("!"):
hosts = [ h for h in hosts if h not in that ]
elif p.startswith("&"):
hosts = [ h for h in hosts if h in that ]
else:
to_append = [ h for h in that if h.name not in [ y.name for y in hosts ] ]
hosts.extend(to_append)
return hosts
def _match_one_pattern(self, pattern):
"""
Takes a single pattern and returns a list of matching host names.
Ignores intersection (&) and exclusion (!) specifiers.
The pattern may be:
1. A regex starting with ~, e.g. '~[abc]*'
2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*'
3. An ordinary word that matches itself only, e.g. 'foo'
The pattern is matched using the following rules:
1. If it's 'all', it matches all hosts in all groups.
2. Otherwise, for each known group name:
(a) if it matches the group name, the results include all hosts
in the group or any of its children.
(b) otherwise, if it matches any hosts in the group, the results
include the matching hosts.
This means that 'foo*' may match one or more groups (thus including all
hosts therein) but also hosts in other groups.
The built-in groups 'all' and 'ungrouped' are special. No pattern can
match these group names (though 'all' behaves as though it matches, as
described above). The word 'ungrouped' can match a host of that name,
and patterns like 'ungr*' and 'al*' can match either hosts or groups
other than all and ungrouped.
If the pattern matches one or more group names according to these rules,
it may have an optional range suffix to select a subset of the results.
This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does
not work (the [1] is interpreted as part of the regex), but 'foo*[1]'
would work if 'foo*' matched the name of one or more groups.
Duplicate matches are always eliminated from the results.
"""
if pattern.startswith("&") or pattern.startswith("!"):
pattern = pattern[1:]
if pattern not in self._pattern_cache:
(expr, slice) = self._split_subscript(pattern)
hosts = self._enumerate_matches(expr)
try:
hosts = self._apply_subscript(hosts, slice)
except IndexError:
raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern)
self._pattern_cache[pattern] = hosts
return self._pattern_cache[pattern]
def _split_subscript(self, pattern):
"""
Takes a pattern, checks if it has a subscript, and returns the pattern
without the subscript and a (start,end) tuple representing the given
subscript (or None if there is no subscript).
Validates that the subscript is in the right syntax, but doesn't make
sure the actual indices make sense in context.
"""
# Do not parse regexes for enumeration info
if pattern.startswith('~'):
return (pattern, None)
# We want a pattern followed by an integer or range subscript.
# (We can't be more restrictive about the expression because the
# fnmatch semantics permit [\[:\]] to occur.)
pattern_with_subscript = re.compile(
r'''^
(.+) # A pattern expression ending with...
\[(?: # A [subscript] expression comprising:
(-?[0-9]+)| # A single positive or negative number
([0-9]+)([:-]) # Or an x:y or x: range.
([0-9]*)
)\]
$
''', re.X
)
subscript = None
m = pattern_with_subscript.match(pattern)
if m:
(pattern, idx, start, sep, end) = m.groups()
if idx:
subscript = (int(idx), None)
else:
if not end:
end = -1
subscript = (int(start), int(end))
if sep == '-':
display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed")
return (pattern, subscript)
def _apply_subscript(self, hosts, subscript):
"""
Takes a list of hosts and a (start,end) tuple and returns the subset of
hosts based on the subscript (which may be None to return all hosts).
"""
if not hosts or not subscript:
return hosts
(start, end) = subscript
if end:
if end == -1:
end = len(hosts)-1
return hosts[start:end+1]
else:
return [ hosts[start] ]
def _enumerate_matches(self, pattern):
"""
Returns a list of host names matching the given pattern according to the
rules explained above in _match_one_pattern.
"""
results = []
hostnames = set()
def __append_host_to_results(host):
if host.name not in hostnames:
hostnames.add(host.name)
results.append(host)
groups = self.get_groups()
for group in groups.values():
if pattern == 'all':
for host in group.get_hosts():
if host.implicit:
continue
__append_host_to_results(host)
else:
if self._match(group.name, pattern) and group.name not in ('all', 'ungrouped'):
for host in group.get_hosts():
if host.implicit:
continue
__append_host_to_results(host)
else:
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
for host in matching_hosts:
__append_host_to_results(host)
if pattern in C.LOCALHOST and len(results) == 0:
new_host = self._create_implicit_localhost(pattern)
results.append(new_host)
return results
def _create_implicit_localhost(self, pattern):
if self.localhost:
new_host = self.localhost
else:
new_host = Host(pattern)
new_host.address = "127.0.0.1"
new_host.implicit = True
new_host.vars = self.get_host_vars(new_host)
new_host.set_variable("ansible_connection", "local")
if "ansible_python_interpreter" not in new_host.vars:
py_interp = sys.executable
if not py_interp:
# sys.executable is not set in some cornercases. #13585
display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default.'
' You can correct this by setting ansible_python_interpreter for localhost')
py_interp = '/usr/bin/python'
new_host.set_variable("ansible_python_interpreter", py_interp)
self.get_group("ungrouped").add_host(new_host)
self.localhost = new_host
return new_host
def clear_pattern_cache(self):
''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
global HOSTS_PATTERNS_CACHE
HOSTS_PATTERNS_CACHE = {}
self._pattern_cache = {}
def clear_group_dict_cache(self):
''' called exclusively by the add_host and group_by plugins '''
self._group_dict_cache = {}
def groups_for_host(self, host):
if host in self._hosts_cache:
return self._hosts_cache[host].get_groups()
else:
return []
def get_groups(self):
return self.groups
def get_host(self, hostname):
if hostname not in self._hosts_cache:
self._hosts_cache[hostname] = self._get_host(hostname)
return self._hosts_cache[hostname]
def _get_host(self, hostname):
matching_host = None
if hostname in C.LOCALHOST:
if self.localhost:
matching_host= self.localhost
else:
for host in self.get_group('all').get_hosts():
if host.name in C.LOCALHOST:
matching_host = host
break
if not matching_host:
matching_host = self._create_implicit_localhost(hostname)
# update caches
self._hosts_cache[hostname] = matching_host
for host in C.LOCALHOST.difference((hostname,)):
self._hosts_cache[host] = self._hosts_cache[hostname]
else:
for group in self.groups.values():
for host in group.get_hosts():
if host not in self._hosts_cache:
self._hosts_cache[host.name] = host
if hostname == host.name:
matching_host = host
return matching_host
def get_group(self, groupname):
return self.groups.get(groupname)
def get_group_variables(self, groupname, update_cached=False, vault_password=None):
if groupname not in self._vars_per_group or update_cached:
self._vars_per_group[groupname] = self._get_group_variables(groupname, vault_password=vault_password)
return self._vars_per_group[groupname]
def _get_group_variables(self, groupname, vault_password=None):
group = self.get_group(groupname)
if group is None:
raise Exception("group not found: %s" % groupname)
vars = {}
# plugin.get_group_vars retrieves just vars for specific group
vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# Read group_vars/ files
vars = combine_vars(vars, self.get_group_vars(group))
return vars
def get_group_dict(self):
"""
In get_vars() we merge a 'magic' dictionary 'groups' with group name
keys and hostname list values into every host variable set.
Cache the creation of this structure here
"""
if not self._group_dict_cache:
for (group_name, group) in iteritems(self.groups):
self._group_dict_cache[group_name] = [h.name for h in group.get_hosts()]
return self._group_dict_cache
def get_vars(self, hostname, update_cached=False, vault_password=None):
host = self.get_host(hostname)
if not host:
raise AnsibleError("no vars as host is not in inventory: %s" % hostname)
return host.get_vars()
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
if hostname not in self._vars_per_host or update_cached:
self._vars_per_host[hostname] = self._get_host_variables(hostname, vault_password=vault_password)
return self._vars_per_host[hostname]
def _get_host_variables(self, hostname, vault_password=None):
host = self.get_host(hostname)
if host is None:
raise AnsibleError("no host vars as host is not in inventory: %s" % hostname)
vars = {}
# plugin.run retrieves all vars (also from groups) for host
vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# plugin.get_host_vars retrieves just vars for specific host
vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# still need to check InventoryParser per host vars
# which actually means InventoryScript per host,
# which is not performant
if self.parser is not None:
vars = combine_vars(vars, self.parser.get_host_variables(host))
return vars
def add_group(self, group):
if group.name not in self.groups:
self.groups[group.name] = group
else:
raise AnsibleError("group already in inventory: %s" % group.name)
def list_hosts(self, pattern="all"):
""" return a list of hostnames for a pattern """
result = [ h for h in self.get_hosts(pattern) ]
if len(result) == 0 and pattern in C.LOCALHOST:
result = [pattern]
return result
def list_groups(self):
return sorted(self.groups.keys(), key=lambda x: x)
def restrict_to_hosts(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to batch serial operations in main playbook code, don't use this for other
reasons.
"""
if restriction is None:
return
elif not isinstance(restriction, list):
restriction = [ restriction ]
self._restriction = [ h.name for h in restriction ]
def subset(self, subset_pattern):
"""
Limits inventory results to a subset of inventory that matches a given
pattern, such as to select a given geographic of numeric slice amongst
a previous 'hosts' selection that only select roles, or vice versa.
Corresponds to --limit parameter to ansible-playbook
"""
if subset_pattern is None:
self._subset = None
else:
subset_patterns = Inventory.split_host_pattern(subset_pattern)
results = []
# allow Unix style @filename data
for x in subset_patterns:
if x.startswith("@"):
fd = open(x[1:])
results.extend(fd.read().split("\n"))
fd.close()
else:
results.append(x)
self._subset = results
def remove_restriction(self):
""" Do not restrict list operations """
self._restriction = None
def is_file(self):
"""
Did inventory come from a file? We don't use the equivalent loader
methods in inventory, due to the fact that the loader does an implict
DWIM on the path, which may be incorrect for inventory paths relative
to the playbook basedir.
"""
if not isinstance(self.host_list, string_types):
return False
return os.path.isfile(self.host_list) or self.host_list == os.devnull
def is_directory(self, path):
"""
Is the inventory host list a directory? Same caveat for here as with
the is_file() method above.
"""
if not isinstance(self.host_list, string_types):
return False
return os.path.isdir(path)
def basedir(self):
""" if inventory came from a file, what's the directory? """
dname = self.host_list
if self.is_directory(self.host_list):
dname = self.host_list
elif not self.is_file():
dname = None
else:
dname = os.path.dirname(self.host_list)
if dname is None or dname == '' or dname == '.':
dname = os.getcwd()
if dname:
dname = os.path.abspath(dname)
return dname
def src(self):
""" if inventory came from a file, what's the directory and file name? """
if not self.is_file():
return None
return self.host_list
def playbook_basedir(self):
""" returns the directory of the current playbook """
return self._playbook_basedir
def set_playbook_basedir(self, dir_name):
"""
sets the base directory of the playbook so inventory can use it as a
basedir for host_ and group_vars, and other things.
"""
# Only update things if dir is a different playbook basedir
if dir_name != self._playbook_basedir:
# we're changing the playbook basedir, so if we had set one previously
# clear the host/group vars entries from the VariableManager so they're
# not incorrectly used by playbooks from different directories
if self._playbook_basedir:
self._variable_manager.clear_playbook_hostgroup_vars_files(self._playbook_basedir)
self._playbook_basedir = dir_name
# get group vars from group_vars/ files
# TODO: excluding the new_pb_basedir directory may result in group_vars
# files loading more than they should, however with the file caching
# we do this shouldn't be too much of an issue. Still, this should
# be fixed at some point to allow a "first load" to touch all of the
# directories, then later runs only touch the new basedir specified
found_group_vars = self._find_group_vars_files(self._playbook_basedir)
if found_group_vars:
self._group_vars_files = self._group_vars_files.union(found_group_vars)
for group in self.groups.values():
self.get_group_vars(group)
found_host_vars = self._find_host_vars_files(self._playbook_basedir)
if found_host_vars:
self._host_vars_files = self._host_vars_files.union(found_host_vars)
# get host vars from host_vars/ files
for host in self.get_hosts():
self.get_host_vars(host)
# invalidate cache
self._vars_per_host = {}
self._vars_per_group = {}
def get_host_vars(self, host, new_pb_basedir=False, return_results=False):
""" Read host_vars/ files """
return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir, return_results=return_results)
def get_group_vars(self, group, new_pb_basedir=False, return_results=False):
""" Read group_vars/ files """
return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir, return_results=return_results)
def _find_group_vars_files(self, basedir):
""" Find group_vars/ files """
if basedir in ('', None):
basedir = './'
path = os.path.realpath(os.path.join(basedir, 'group_vars'))
found_vars = set()
if os.path.exists(path):
if os.path.isdir(path):
found_vars = set(os.listdir(to_text(path)))
else:
display.warning("Found group_vars that is not a directory, skipping: %s" % path)
return found_vars
def _find_host_vars_files(self, basedir):
""" Find host_vars/ files """
if basedir in ('', None):
basedir = './'
path = os.path.realpath(os.path.join(basedir, 'host_vars'))
found_vars = set()
if os.path.exists(path):
found_vars = set(os.listdir(to_text(path)))
return found_vars
def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False, return_results=False):
"""
Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel
to the inventory base directory or in the same directory as the playbook. Variables in the playbook
dir will win over the inventory dir if files are in both.
"""
results = {}
scan_pass = 0
_basedir = self._basedir
_playbook_basedir = self._playbook_basedir
# look in both the inventory base directory and the playbook base directory
# unless we do an update for a new playbook base dir
if not new_pb_basedir and _playbook_basedir:
basedirs = [_basedir, _playbook_basedir]
else:
basedirs = [_basedir]
for basedir in basedirs:
# this can happen from particular API usages, particularly if not run
# from /usr/bin/ansible-playbook
if basedir in ('', None):
basedir = './'
scan_pass = scan_pass + 1
# it's not an eror if the directory does not exist, keep moving
if not os.path.exists(basedir):
continue
# save work of second scan if the directories are the same
if _basedir == _playbook_basedir and scan_pass != 1:
continue
# Before trying to load vars from file, check that the directory contains relvant file names
if host is None and any(map(lambda ext: group.name + ext in self._group_vars_files, C.YAML_FILENAME_EXTENSIONS)):
# load vars in dir/group_vars/name_of_group
base_path = to_text(os.path.abspath(os.path.join(to_bytes(basedir), b"group_vars/" + to_bytes(group.name))), errors='surrogate_or_strict')
host_results = self._variable_manager.add_group_vars_file(base_path, self._loader)
if return_results:
results = combine_vars(results, host_results)
elif group is None and any(map(lambda ext: host.name + ext in self._host_vars_files, C.YAML_FILENAME_EXTENSIONS)):
# same for hostvars in dir/host_vars/name_of_host
base_path = to_text(os.path.abspath(os.path.join(to_bytes(basedir), b"host_vars/" + to_bytes(host.name))), errors='surrogate_or_strict')
group_results = self._variable_manager.add_host_vars_file(base_path, self._loader)
if return_results:
results = combine_vars(results, group_results)
# all done, results is a dictionary of variables for this particular host.
return results
def refresh_inventory(self):
self.clear_pattern_cache()
self.clear_group_dict_cache()
self._hosts_cache = {}
self._vars_per_host = {}
self._vars_per_group = {}
self.groups = {}
self.parse_inventory(self.host_list)
|
Guthrie Straw grew up in Oregon and leads bicycle tours with Cycle Portland Bike Tourswhen he’s not enjoying longer bicycle tours of his own. After he and Brock met during the Filmed By Bike jury screenings, they decided to record a show together, which also led to some interesting discussion about Ketchikan, Alaska and outdoor leadership dynamics.
Also: fossil fuel divestment (OPB & Alison Wiley), former co-host Brandon’s new businessRolling Oasis, pedestrian assumptions & motorized vehicle proliferation.
BikePortland: Travel Oregon needs your input on bicycling in the Columbia River Gorge!
Robert explains why the London elevated cycleway doesn’t make sense.
Card from Richard in TX!
Dan in MN will bike to Portland someday.
Ethan Seltzer from Pedal Power & E049: saw this article and thought of us.
Stay tuned for our interview with Organic Transit’s Rob Cotter and ELF owner Lorraine later this week!
Somehow our brains make the connection between the two. A little extra chat between Aaron & Brock.
Aaron & Brock discuss sodas, beers & kombuchas, planned rides for the year, the future of reading in an electronic age, and traffic in Portland these days.
Oregonian: Pantless MAX ride took place again!
Listener Chris donates (thanks!) and wants us to watch Riding Bikes With The Dutch.
Albany tweed ride would deliver coffee & tea to astronauts with a bicycle in space.
JohnnyK adds some detail on re-entry from the moon and its bicycle physics.
Doctor Jeff twitters nice things about us, and Jim agrees on facebook.
Mr. Bob Crispin lived near some sweet singletrack in the seventies and wanted a bike that was up to the task; since one wasn’t available, he put one together himself. We chat with Bob about making a little bicycle design history. It currently on display at Velo Cult Bike Shop & Tavern in Portland, OR.
Aaron & Brock also discuss Brock’s most recent crosscountry Amtrak journey, books on the future of reading, and stripped out saddle parts.
A Donation from Logan! Thanks!
Gus shares the innovative recyclables-for-transit-fare trade option available in Bejing.
Christopher in WY shares the heartwarming story of the 66 year old man who won the race across Sweden after being barred from official entry due to his age.
Daniel on Elly Blue’s Amtrak kerfuffle & Texan travel advice for bicyclists.
Patrick in Davis CA wishes us a happy new year and compares Thunder Island Brewing toBerryessa Brew Co. wishing both the best of success.
Nick on twitter shares a tallbike with tandem chain setup, cable steering, adjustable height and disassembles for travel.
Thanks to sustaining donor Shadowfox for supporting our show costs. |
import os
import re
import csv
import glob
import shutil
import gzip
import operator
import subprocess
from datetime import datetime
from taca.utils.filesystem import chdir, control_fastq_filename
from taca.illumina.HiSeq_Runs import HiSeq_Run
from taca.utils import misc
from flowcell_parser.classes import RunParametersParser, SampleSheetParser, RunParser, LaneBarcodeParser, DemuxSummaryParser
import logging
logger = logging.getLogger(__name__)
class MiSeq_Run(HiSeq_Run):
def __init__(self, path_to_run, configuration):
#constructor, it returns a MiSeq object only if the MiSeq run belongs to NGI facility, i.e., contains
#Application or production in the Description
super(MiSeq_Run, self).__init__( path_to_run, configuration)
self._set_sequencer_type()
self._set_run_type()
def _set_sequencer_type(self):
self.sequencer_type = "MiSeq"
def _set_run_type(self):
ssname = os.path.join(self.run_dir, 'Data', 'Intensities', 'BaseCalls','SampleSheet.csv')
if not os.path.exists(ssname):
#case in which no samplesheet is found, assume it is a non NGI run
self.run_type = "NON-NGI-RUN"
else:
#it SampleSheet exists try to see if it is a NGI-run
ssparser = SampleSheetParser(ssname)
if ssparser.header['Description'] == "Production" or ssparser.header['Description'] == "Applications":
self.run_type = "NGI-RUN"
else:
#otherwise this is a non NGI run
self.run_type = "NON-NGI-RUN"
def _get_samplesheet(self):
"""
Locate and parse the samplesheet for a run.
In MiSeq case this is located in FC_DIR/Data/Intensities/BaseCalls/SampleSheet.csv
"""
ssname = os.path.join(self.run_dir, 'Data', 'Intensities', 'BaseCalls','SampleSheet.csv')
if os.path.exists(ssname):
#if exists parse the SampleSheet
return ssname
else:
#some MiSeq runs do not have the SampleSheet at all, in this case assume they are non NGI.
#not real clean solution but what else can be done if no samplesheet is provided?
return None
def _generate_clean_samplesheet(self, ssparser):
"""
Will generate a 'clean' samplesheet, for bcl2fastq2.17
"""
output=""
#Header
output+="[Header]{}".format(os.linesep)
for field in ssparser.header:
output+="{},{}".format(field.rstrip(), ssparser.header[field].rstrip())
output+=os.linesep
#now parse the data section
data = []
for line in ssparser.data:
entry = {}
for field, value in line.iteritems():
if ssparser.dfield_sid in field:
entry[field] ='Sample_{}'.format(value)
elif ssparser.dfield_proj in field:
entry[field] = value.replace(".", "__")
else:
entry[field] = value
if 'Lane' not in entry:
entry['Lane'] = '1'
data.append(entry)
fields_to_output = ['Lane', ssparser.dfield_sid, ssparser.dfield_snm, 'index', ssparser.dfield_proj]
#now create the new SampleSheet data section
output+="[Data]{}".format(os.linesep)
for field in ssparser.datafields:
if field not in fields_to_output:
fields_to_output.append(field)
output+=",".join(fields_to_output)
output+=os.linesep
#now process each data entry and output it
for entry in data:
line = []
for field in fields_to_output:
line.append(entry[field])
output+=",".join(line)
output+=os.linesep
return output
|
Manufacturer of a wide range of products which include fruit basket.
Color Green, White, purple etc.
Keeping in mind ever-evolving requirements of our respected clients, we are offering a premium quality range of Plastic Fruit Basket. |
"""
A utility class which wraps the RateLimitMixin 3rd party class to do bad request counting
which can be used for rate limiting
"""
from __future__ import absolute_import
from django.conf import settings
from ratelimitbackend.backends import RateLimitMixin
class RequestRateLimiter(RateLimitMixin):
"""
Use the 3rd party RateLimitMixin to help do rate limiting.
"""
def is_rate_limit_exceeded(self, request):
"""
Returns if the client has been rated limited
"""
counts = self.get_counters(request)
return sum(counts.values()) >= self.requests
def tick_request_counter(self, request):
"""
Ticks any counters used to compute when rate limt has been reached
"""
self.cache_incr(self.get_cache_key(request))
class BadRequestRateLimiter(RequestRateLimiter):
"""
Default rate limit is 30 requests for every 5 minutes.
"""
pass
class PasswordResetEmailRateLimiter(RequestRateLimiter):
"""
Rate limiting requests to send password reset emails.
"""
email_rate_limit = getattr(settings, 'PASSWORD_RESET_EMAIL_RATE_LIMIT', {})
requests = email_rate_limit.get('no_of_emails', 1)
cache_timeout_seconds = email_rate_limit.get('per_seconds', 60)
reset_email_cache_prefix = 'resetemail'
def key(self, request, dt):
"""
Returns cache key.
"""
return '%s-%s-%s' % (
self.reset_email_cache_prefix,
self.get_ip(request),
dt.strftime('%Y%m%d%H%M'),
)
def expire_after(self):
"""
Returns timeout for cache keys.
"""
return self.cache_timeout_seconds
|
Here you will find my latest comments to the media on the issues of the day and the issues that matter the most to our community.
State Member for Redlands Matt McEachan MP has welcomed Mayor Karen Williams’ innovative solution to fix one of our most congested local roads.
Mayor Williams recently announced Council funding and support to fast track the upgrade of Cleveland Redland Bay Road.
Mr McEachan said he would support any proposal to one of the worst roads in Redlands.
“I’ve been fighting for upgrades to Cleveland Redland Bay Road since I was elected,” Mr McEachan said.
Mayor Williams’ unique funding proposal is strongly supported by local Councillors Julie Talty and Mark Edwards and has received in principal support from the LNP Opposition including Shadow Minister for Main Roads, Fiona Simpson MP.
Mr McEachan called on the Palaszczuk Labor Government to support any proposal to fix congested local roads.
“Given the Palaszczuk Government has no plan to fix our roads, I ask them to strongly consider this unique proposal by Mayor Williams.
“It’s time all levels of government worked together to give Redlanders safe and reliable local roads.” Mr McEachan said.
State Member for Redlands Matt McEachan has accused the Palaszczuk Labor Government of being frozen at the wheel, questioning why they have gone quiet on their review of the LNP’s criminal gang legislation.
Despite a 10 per cent reduction in crime in the first year of the LNP-introduced laws, Queensland’s Attorney General Yvette D’Ath has admitted that Labor will either replace or repeal the legislation.
Mr McEachan said he could not fathom why Labor would go soft on criminals.
“Redland residents are continually asking me why Labor would consider scrapping the criminal gang legislation.
“It has come down to Labor simply getting rid of anything the previous government did, regardless of cost to the community.
“Labor’s Premier Palaszczuk is more interested in protecting her own job than protecting Queenslanders.
“The LNP listened to Queenslanders and had the courage to stand up to criminal gangs and say enough is enough.
“I have been on the public record as saying I think that they were a good thing,” he said.
Labor’s Royal Commission into the legislation is costing Queensland taxpayers $6 million.
State Member for Redlands Matt McEachan continues to back Mt Cotton and Sheldon locals in their push to form a Rural Fire Brigade.
In a speech to Parliament this week Mr. McEachan called on the Labor State Government to support the community’s efforts to protect themselves against bushfire risk and stop cow towing to the Unions.
Mr. McEachan slammed the United Firefighters Union for disrespecting the will of locals. The union thinks this a turf war and are at odds with the community over forming the Rural Fire Brigade. The United Fire Fighters Union has form, recently the UFU tried to stop volunteer firefighters from being eligible for the same compensation for cancers as urban firefighters. The Victorian arm of the union also banned its members from participating in a post-traumatic disorder trial, simply because volunteer firefighters were involved.
Mr. McEachan said that he hoped Minister Byrne would not allow union bullying to override common sense in backing community calls for a Rural Fire Brigade in the area.
“Mt Cotton and Sheldon are considered very high bush fire risk and each week of delay by Labor puts the community at more risk."
Rural Fire Brigades Association Queensland, General Manager, Justin Choveaux supported Mr. McEachan’s calls for cooperation.
“Over 93% of Queensland is defended by volunteer firefighters who are members of their local Rural Fire Brigade. In the Mt Cotton/Sheldon area, a Rural Fire Brigade would allow for a greater community defence capacity by increasing the response capability locally not only to fire response, but to cyclone and storm recovery,” said Mr. Choveaux.
“A community based brigade will also assist landowners managing their land through the use of fire and reduce the risk of a major bushfire.
“Having a Rural Fire Brigade and Fire & Rescue station in an area is not a competition, it’s about complementing each other's skills to provide a safe community,” he said.
State Member for Redlands Matt McEachan is welcoming Labor Government support to change smoke legislation.
Yesterday the LNP Opposition introduced a bill amending current legislation to seek a staged transition to mandatory photoelectric smoke alarms in all Queensland homes.
Photoelectric smoke alarms are superior to the current models.
Mr McEachan said there had been too many near-misses in Redlands and there should be no shortcuts when it came to fire safety.
“While Labor focuses on how to find the easiest way to bully themselves into work places the LNP is busy concentrating on saving lives.
Tidbold Real Estate's Dave Tidbold welcomed the LNP’s initiative, saying that there is not enough awareness to the importance of maintaining a functioning smoke alarm.
"While we have never had issues with faulty smoke alarms due to compulsory and regular checks, we do come across many owner occupied properties we list for sale that have malfunctioning smoke alarms or battery operated alarms with a flat battery, or no battery at all.
"An effective fire alarm can minimise the damage created to the property and save lives."
Mr Tidbold called for further controls surrounding cost effectiveness and quality control to be legislated to ensure there would no repeat of the Rudd Labor Government's Home Insulation program which affected thousands of property owners and led to the deaths of four people.
Bay Island residents have called for an increased police presence on the Southern Moreton Bay Islands (SMBI) at a Law and Order forum held on Russell Island recently.
More than 200 SMBI residents attended the forum, organised by State Member for Redlands Matt McEachan and Cr Mark Edwards. Queensland Police, Brisbane Bayside Crime Stoppers and Redland City Council were also in attendance.
Mr McEachan said the forum enabled residents to have their say on crime, which was one of the top issues identified on the Bay Islands in his electorate- wide survey last year.
“Residents are worried about crime and the level of support their local police are receiving on the Islands,” Mr McEachan said.
Brisbane Bayside Crime Stoppers Chairman Paul Fitzpatrick reiterated to residents the role of Crime Stoppers, and how reporting anonymously to their line, or to Police Link, can reduce the pressure on police officers.
Residents on the SMBI and mainland can further have their say on law and order issues affecting their local community via a new Law and Order survey which can be accessed via mattmceachan.com.au.
Member for Redlands Matt McEachan is encouraging residents to dig deep and help put a smile on the faces of those less fortunate this Christmas.
Mr McEachan has teamed up with five charities as part of his Redlands Guide to Giving.
“Christmas is just around the corner, and while it is a time of joy and indulgence for many, for others it is a reminder of how hard times are.
I want everyone in Redlands to dig deep and help make this year’s Christmas merry for everyone. It’s as simple as purchase, wrap and deliver,” Mr McEachan said.
The five charities involved are the Bayside Salvation Army, GIVIT, Foodbank, Kids in Care Christmas Appeal and the Redlands Animal Shelter.
Mr McEachan encouraged everyone to keep an eye out in their letter box for further details about the Redlands Guide to Giving.
Those wanting to donate either food, gifts or toy items can do so by dropping them off to Matt’s electorate office at Victoria Point Shop H20, Upstairs in the cinema building, Lakeside.
More outstanding community members have been recognised in the latest round of Member for Redlands Matt McEachan’s Recognise Redlands initiative.
Three exceptional student athletes and dedicated community volunteers were the deserving recipients of a $250 bursary at a special presentation this morning/afternoon.
The Russell Island first responders were recognised for their tireless commitment towards the good of their small Island community.
“First responders play an invaluable role in our local Island communities. I am honoured to be recognising this group of amazing volunteers,” Mr McEachan said.
Another selfless volunteer and mother of four, Sandy Dixon, was also acknowledged for her commitment to The Cage Youth Foundation. Sandy is a qualified Counsellor who offers her services to the Cage community initiative without ever asking for anything in return.
A $250 bursary will help school-aged athlete Luke Harvey continue to excel in his passion for triathlon. Luke, who only participated in his first triathlon in 2012, is now the 2015 14-15 year Male Series Winner of the Triathlon Queensland State Series. The all-rounder is also a keen cross country, athletic and swimming champion.
The bursary will also help Redland Bay athletes Jamie Howell and Shekinah Friske with their international sporting achievements.
Shekinah will be travelling to Barcelona with the Australian futsal team in November, while Jamie continues to excel with her athletic abilities.
The Macleay Island Inspirational Writers’ Group will use their $250 bursary to help stage a free Writers on the Shore event, which will engage all Island and mainland community groups.
“I am thrilled to honour these outstanding Redlanders for their achievements and wish them all the best in their respective areas,” Mr McEachan said.
Initiated this year by State Member for Redlands Matt McEachan, the awards recognise Redlanders who are excelling in sports, academia or community service.
The next round of Recognise Redlands will open in 2016.
Fundraising efforts for schools in the Redlands electorate will get a much needed boost over the next few weeks thanks to State Member for Redlands Matt McEachan.
Mr McEachan is donating 16 Weber Baby Qs to schools in his electorate as part of his new community ‘Get Grilling for your School’ initiative.
“I’m excited to present schools with these prizes,” Mr McEachan said.
“It gives them the tools to fundraise how they see fit, and creates a bit more excitement than giving a monetary donation,” Mr McEachan said.
“Staff and volunteers work tirelessly to provide the best resources for school students through fundraising.
Redland Bay State School P&C President Paul Booker thanked Mr McEachan for his generous donation.
“Matt has been involved with our P&C since his inception as a candidate and then our local Member and has provided us with some great practical and political support in that time.
“Even picking up a shovel and digging holes to help makeover our garden earlier in the year. This donation is yet another example of his service to our organisation and the wider community.
The LNP remains committed to the needs of Southern Moreton Bay Island (SMBI) schools following an education roundtable discussion on Russell Island this morning.
Shadow Minister for Education and Training Tim Mander hosted the discussion with local MP Matt McEachan. They met with Russell and Macleay Island principals, chaplains and local child care providers.
Mr Mander said the Government needed to address the inadequacies SMBI educators faced on a daily basis.
“We need to give our children the best start to life and our schools need to be equipped with the tools to do this.
“Unfortunately the geographical isolation of the Bay Islands has created a shortfall in support and resources for students and the teaching faculty,” Mr Mander said.
Mr McEachan said he called on the Government to designate Russell and Macleay Island state schools as remote, rather than regional schools.
“A change in classification would mean extra resources and extra funding.
The saying goes, ‘a weekend well spent, brings a week of content’. I spent my weekend visiting the Southern Moreton Bay Islands, and it was brilliant. The Four Islands Festival was a fantastic showcase of an amazing location right on our doorstep. Meeting visitors from interstate and over seas, they were unanimous in their opinion of the local community and people. Collectively, they couldn’t understand or believe that anyone would be disparaging of them.
But we have all read or watched the negative stories about the Bay Islands. Yes, I acknowledge the issues the ‘Dropping of the Edge 2015’ reports highlights (BaysideBulletin, 29/07/15).
I don’t negate that these need addressing, but island residents are working hard to rid themselves of the stereotypes not of their own making, but from a legacy of buck passing and a ‘too hard basket’ mentality from all levels of government.
In my view this has created an unfair stigmatisation of the island locals. Great strides have been made in recent years in provision of services and I've seen more co-operation and effort to tackle issues than ever before. Of course there is much more to do and for my part I'll work hard to do my bit, but these issues should not define our Southern Moreton Bay Islands.
I have worked with Bay Island residents for many years, I have family connections there,indeed my Nana used to go to the dances on Russell Island in the 1930s! The one thing that is common amongst all the Bay Islands is a deep sense of community, this is manifest in the way people on the islands look out for each other, make visitors feel at home, and the sheer number of volunteers and organisations who give of their time, determined to make their community a better place to live.
The four Islands that make up the Southern Moreton Bay Island Group each have unique natural, cultural and community identities. It is displayed in the respect and reverence Islanders have of the islands natural values, their history and their cultural significance to the Quandamooka people. It is displayed in the way locals enjoy life on the islands with its vibrant art scene, environmental groups, sporting clubs and service organisations. At my numerous mobile offices and visits I have met many wonderful residents who are proud of their community and are working hard to dispel the negative connotations often associated with Island life.
Go and see and experience the islands for yourself, don't wait for next year’s Four Island Festival though, there is always something happening and there's so much to see and do, I reckon you'll be glad you did.
The LNP will keep fighting for Redlanders’ fair infrastructure share including vital road upgrades and addressing car parking congestion.
Opposition Leader Lawrence Springborg visited key infrastructure priorities in Redlands with local MP Matt McEachan yesterday.
“I saw first-hand the frustration many locals feel trying to get a park at the Weinam Creek Ferry Terminal,” Mr Springborg said.
“It was good to visit the area and get a real understanding of what the community is putting up with.
“This area clearly needs to be redeveloped and car parking has reached its capacity.
Member for Redlands Matt McEachan said he was working closely with Redland City Council and Southern Moreton Bay Islanders to secure a real solution for the ferry terminal.
“I am looking forward to seeing the revised submission for the Weinam Creek redevelopment to ensure the best outcome for locals,” Mr McEachan said.
Mr McEachan said as part of the visit, the Opposition Leader and neighbouring Member for Cleveland Mark Robinson inspected the Cleveland Redland Bay Road.
“The LNP had a plan to deliver the much needed dual-laning along Cleveland Redland Bay Road and this Labor government should honour and deliver what was a fully-funded project,” Mr McEachan said.
Mr Springborg said that it was disappointing the Palaszczuk Government were happy to hike up registration fees but weren’t willing to invest in fixing congested roads such as Cleveland Redland Bay Road.
Labor’s smoke and mirrors budget was on the menu for discussion at the first small business breakfast with Shadow Treasurer and Deputy Opposition Leader John-Paul Langbroek in Redlands this morning.
Hosted by State Member for Redlands Matt McEachan MP, the breakfast brought together small business owners with the three levels of Government including Federal Member for Bowman Andrew Laming, State Member for Cleveland Mark Robinson and Councillor Julie Talty.
Mr Langbroek said he understood why small business owners were concerned with Labor’s budget, including their failure to increase the payroll tax threshold.
“Nothing in this budget builds economic confidence for local small business owners or residents,” he said.
“Saddling our energy companies with billions of dollars of government debt isn’t going to improve Queensland’s fiscal position.
Mr McEachan said the economy would keep suffering under Labor.
“Now they are raiding public servants’ long service leave and superannuation to pay for their election promises. Which, by the way, doesn’t include any infrastructure for Redlands.
Redlands missed out on vital road infrastructure upgrades to Cleveland Redland Bay Road after Labor failed to match the LNP’s funding commitment.
Local sporting clubs in the Redlands will be able to share in thousands of dollars’ worth of government grants thanks to the LNPs hugely popular Get in the Game program, which will continue for another three years.
It’s a personal win for Member for Redlands Matt McEachan who committed to fighting for the program under the new government and wrote to the Minister earlier this year asking for its continuation with urgency.
“Here in Redlands we are bustling with activity every weekend, with kids engaging in just about every conceivable club sport. From Russell Island to Pinklands, Vicky Point to Redland Bay, Redlands families love their junior sport.
“For many the extra bit of financial help means the difference between kids playing a sport or nothing and for some clubs it means they face prospect of folding.
Russell Island Stingrays’ Christine McGlinn said she was relieved the Get in the Game program would continue to be funded.
“This will help our club immensely which helps the community and all the kids involved or wanting to be involved in sports.
There are a range of grants eligible for sporting and active recreation organisations and for kids in sports. Visit www.qld.gov.au/recreation/sports/funding/getinthegame/<http://www.qld.gov.au/recreation/sports/funding/getinthegame/> or contact my office for more information.
The community campaign to upgrade Cleveland Redland Bay Road continues to gain traction with a call by Shadow Minister for Main Roads Fiona Simpson and State Member for Redlands Matt McEachan for vital funding ahead of Labor’s State Budget next week.
The Shadow Minister visited the electorate to experience firsthand the congestion and traffic Redlanders face every day travelling along the road in peak times.
“This is a critical piece of infrastructure and we want to see the Stave Government commit to a plan for its upgrade,” Ms Simpson said.
Mr McEachan has spoken numerous times in Parliament about the need to upgrade Cleveland Redland Bay Road.
He said Labor’s Roads Minister Mark Bailey has made it clear that funding is reliant on the LNP agreeing to their proposed registration hikes.
“There is no doubt we have a fight on our hands to secure funding but I refuse to allow Redlanders to be held to ransom by the Labor Government.
“I again ask Labor to match the LNPs commitment to fund the Cleveland Redland Bay Road upgrade in their Budget next week.
“Redlanders need and deserve this road upgrade. I appreciate the Shadow Minister taking the time to experience what residents have put up with for far too long.
A teenage swimming sensation, an internationally recognised academic achiever and two selfless community volunteers were the deserving recipients of the inaugural Recognise Redlands awards at a special presentation this morning.
A $250 bursary will help seventeen year-old swimmer Lauren Folster attend the Global Games in Ecuador, South America and the Pacific School Games in Adelaide. Lauren holds the Australian champion title for both the 50m and 100m backstroke.
It will also assist 12 year-old Leah Lever attend a Lyric Theatre Opera Workshop and the Conservatorium of Music’s State Honours Program during the year.
The youngest registered volunteer entertainer at Queensland Health’s metro south region, Leah regularly sings and entertains aged care residents and dementia patients. The rising star also volunteers her time to sing at charitable events all around the Redlands.
Tireless community volunteer Rob Spencer was also recognised for his dedication to the community. Mr Spencer was the inaugural committee member of the local National Seniors Branch, which has grown to over 120 members in 10 years. He was also pivotal to the growth of STAR Community Transport to an organisation with 16 vehicles, 40 volunteer vehicles, a client base of 3000 and 60000 trips a year.
Fourth year law student Madeleine Harling has been recognised for her outstanding academic results at university. The high achiever was honoured with selection in the Golden Key Society, a worldwide organisation dedicated to recognising academic excellence.
“I congratulate all four outstanding Redlanders for their achievements,” Mr McEachan said.
The final round of Recognise Redlands for 2015 will close on 15 September.
Russell Island students and State Member for Redlands Matt McEachan had the time of their life at an excursion into the Queensland Museum this week.
Mr McEachan donated $500 to Russell Island State School to assist towards transport costs for the excursion, and couldn’t resist the invitation to join them.
“It was wonderful to see the amazement on the kid’s faces,” Mr McEachan said.
“The insect display was their favourite, especially the stick insects and giant burrowing cockroaches.
Mr McEachan said he would like to see more support from the community and transport services to assist Bay Island students facing challenges to get the same opportunities as kids living on the mainland.
Mr McEachan also donated $500 to assist Macleay Island students with their future excursions.
Redland Bay State School is a step closer to getting much needed shade sails after a donation by State Member for Redlands Matt McEachan.
During the election Mr McEachan and a re-elected LNP government committed to funding the shade sails in their entirety.
“Unfortunately Labor did not make the same commitment,” he said.
Mr McEachan handed over the $2000 cheque to Redland Bay State School’s P&C president Paul Booker at their latest initiative, the ‘School Yard Blitz’.
“I commend the P&C for developing a community-wide initiative to instil a sense of ‘community spirit’ and improve the appearance and safety of the school,” Mr McEachan said.
If any local businesses, church or volunteer group would like to get involved with the School Yard Blitz by way of donating funds or equipment please contact my office on 3446 0100.
The Queensland Government’s Minister for Main Roads has admitted in Parliament that he does ‘not recall’ if the Palasczuk Government intends to deliver on much needed upgrades for Cleveland Redland Bay Road.
Asked the question in Parliament today by State Member for Redlands Matt McEachan, Minister Mark Bailey replied that he did not recall the specific road Mr McEachan was referring to.
“I made it my first priority to write to the Minister back in February asking him to respectfully consider the LNPs commitment to improving Cleveland Redland Bay Road,” Mr McEachan said.
“In March I followed up this letter with his office, and in the last sitting week spoke about the road in my adjournment speech, yet here we are in June and the Minister claims he ‘can’t recall’ it?
Mr McEachan said the Minister should be familiar with Cleveland Redland Bay Road considering he would have travelled along it in April while visiting the electorate.
“The Minister has had this correspondence for four months, has visited the electorate and still hasn’t extended the courtesy to discuss the Government’s plans for Cleveland Redland Bay Road.
Member for Cleveland, Dr Mark Robinson said he had campaigned for the upgrade of Cleveland-Redland Bay Rd since 2009 after the previous Labor Government left the job only partly done.
Both Mr McEachan and Dr Robinson call on the Palaszczuk Government to allocate funding in the July Budget to complete the road upgrade.
State Member for Redlands Matt McEachan has welcomed progress on critical road works at the intersection of Mount Cotton Road and Double Jump Road.
Mr McEachan said the interstation has long been a dangerous part of the road network in the Redlands.
“The upgrade will provide a much safer road environment not just for locals but for visitors to the area.
“It’s part of my ongoing commitment to ensure key infrastructure is delivered to meet the growing demands of the Redlands,” Mr McEachan said.
Mount Cotton resident Craig Luxton said the upgrade was a big relief for locals.
“There are a lot of young families in this area and we all worry about driving along this particular part of the road.
Mr McEachan continues to fight for the upgrade of Cleveland-Redland Bay Road from Magnolia Parade to Double Jump Road.
Shadow Health Minister Mark McArdle and State Member for Redlands Matt McEachan have commended Redland Hospital staff and executives after a tour of the facilities this morning.
The visit involved a tour to the Palliative care unit, birth suite and special care unit, as well as the Emergency Department and Specialist outpatient unit.
Mr McEachan said the visit was to ensure the health needs of Redlanders were being met with the resources provided.
“Redlanders are extremely fortunate to have this facility at their doorstep rather than having to travel up to an hour to access healthcare.
“Redlands Hospital staff have earned their place as one of the top performers in their class in Australasia; an example of what can be achieved with dedicated health professionals.
Mr McArdle said the Redlands hospital was servicing a growing population.
Mr McArdle said the visit was timely as Parliament just this week supported the LNP’s Wait Time Guarantee policy.
“This policy ensures every patient is guaranteed their surgery within the clinically recommended time, yet the Palaszczuk-Gordon Government are refusing to listen to the needs of the public and commit to this policy,” said Mr McArdle.
“While the Redland Hospital is faring well in terms of patient surgery and waitlist times, this is another way Redlanders can be sure they will receive their surgery on time.
“They must respect the moral authority of Queensland's Parliament and ultimately the wishes of the public,” he said. |
from spgl.graphics.gwindow import *
from spgl.graphics.gobjects import *
from spgl.graphics.gevents import *
from spgl.gtimer import *
from spgl.grandom import *
import time
window = GWindow()
window.setWindowTitle("Breakout")
# waitForClick()
ball = GOval(20,20, window.getWidth()/2, window.getHeight()/2)
ball.setFilled(True)
window.add(ball)
vx = 2.7
vy = 3.0
paddle = GRect(125, 15, window.getWidth()/2, window.getHeight() - 50)
paddle.setFilled(True)
window.add(paddle)
spacer = 5
recW = (window.getWidth() - (9*spacer)) / 10.0
for i in range(10):
for j in range(10):
rec = GRect(recW, 15, j*(recW + spacer), 50 + i * (15 + spacer))
rec.setFilled(True)
if(i<2):
rec.setColor(color = "RED")
elif(i<4):
rec.setColor(color = "ORANGE")
elif(i<6):
rec.setColor(color = "YELLOW")
elif(i<8):
rec.setColor(color = "GREEN")
elif(i<10):
rec.setColor(color = "BLUE")
window.add(rec)
timer = GTimer(milliseconds=15)
import sys
timer.start()
# sys.exit(0)
while(True):
e = getNextEvent()
if(e.getEventType() == EventType.MOUSE_MOVED):
newX = e.getX()
if(newX - paddle.getWidth()/2 > 0 and \
newX + paddle.getWidth()/2 < window.getWidth()):
paddle.setLocation(x = newX - paddle.getWidth()/2, y = paddle.getY())
elif(newX - paddle.getWidth()/2 < 0):
paddle.setLocation(x = 0, y = paddle.getY())
elif(newX + paddle.getWidth()/2 > window.getWidth()):
paddle.setLocation(x = window.getWidth() - paddle.getWidth(), \
y = paddle.getY())
elif(e.getEventType() == EventType.TIMER_TICKED):
ball.move(vx, vy)
# check for wall collisions
if(ball.getX() + ball.getWidth() > window.getWidth() or \
ball.getX() < 0):
vx = -vx
if(ball.getY() + ball.getHeight() > window.getHeight() or \
ball.getY() < 0):
vy = -vy
obj1 = window.getObjectAt(ball.getX()-1, ball.getY()-1)
obj2 = window.getObjectAt(ball.getX() + ball.getWidth() + 1, ball.getY()-1)
obj3 = window.getObjectAt(ball.getX()-1, ball.getY() + ball.getHeight()+1)
obj4 = window.getObjectAt(ball.getX() + ball.getWidth() + 1, ball.getY() + ball.getHeight()+1)
# check for paddle collisions
if(window.getObjectAt(ball.getX(), ball.getY()) == paddle or \
window.getObjectAt(ball.getX() + ball.getWidth(), ball.getY()) == paddle or \
window.getObjectAt(ball.getX(), ball.getY() + ball.getHeight()) == paddle or \
window.getObjectAt(ball.getX() + ball.getWidth(), ball.getY() + ball.getHeight()) == paddle):
if(vy > 0):
vy = -vy
elif(obj1 != None and obj1 != paddle):
vy = -vy
window.remove(obj1)
elif(obj2 != None and obj2 != paddle):
vy = -vy
window.remove(obj2)
elif(obj3 != None and obj3 != paddle):
vy = -vy
window.remove(obj3)
elif(obj4 != None and obj4 != paddle):
vy = -vy
window.remove(obj4)
elif(e.getEventType() == EventType.KEY_TYPED):
initRandomSeed()
window.remove(ball)
ball = GOval(20,20, window.getWidth()/2, window.getHeight()/2)
ball.setFilled(True)
window.add(ball)
vx = randomReal(2,4)
if(randomChance(.5)): vx = -vx
vy = 3.0
|
Is The Utility of NCL Still To Be Used With The Bot?
How Often Will Buybacks of NCL Occur?
There’s so much FUD out there in the world.You need an unwavering confidence in crypto based on a long-term big picture perspective. Why? Because crypto is on its way to becoming a MULTI-Trillion dollar market. |
import numpy as np
import pandas as pd
from scipy.sparse import csc_matrix, lil_matrix, diags
from JacobianBased import IwamotoNR
np.set_printoptions(linewidth=10000, precision=3)
# pd.set_option('display.height', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
class Graph:
"""
Program to count islands in boolean 2D matrix
"""
def __init__(self, row, col, g):
"""
:param row: number of columns
:param col: number of rows
:param g: adjacency matrix
"""
self.ROW = row
self.COL = col
self.graph = g
def is_safe(self, i, j, visited):
"""
A function to check if a given cell (row, col) can be included in DFS
:param i: row index
:param j: column index
:param visited: 2D array of visited elements
:return: if it is safe or not
"""
# row number is in range, column number is in range and value is 1 and not yet visited
return 0 >= i < self.ROW and 0 >= j < self.COL and not visited[i][j] and self.graph[i][j]
def dfs(self, i, j, visited):
"""
A utility function to do DFS for a 2D boolean matrix.
It only considers the 8 neighbours as adjacent vertices
:param i: row index
:param j: column index
:param visited: 2D array of visited elements
"""
# TODO: Use a proper DFS with sparsity considerations
# These arrays are used to get row and column numbers of 8 neighbours of a given cell
rowNbr = [-1, -1, -1, 0, 0, 1, 1, 1]
colNbr = [-1, 0, 1, -1, 1, -1, 0, 1]
# Mark this cell as visited
visited[i][j] = True
# Recur for all connected neighbours
for k in range(8):
if self.is_safe(i + rowNbr[k], j + colNbr[k], visited):
self.dfs(i + rowNbr[k], j + colNbr[k], visited)
def count_islands(self):
"""
The main function that returns count of islands in a given boolean 2D matrix
:return: count of islands
"""
# Make a bool array to mark visited cells. Initially all cells are unvisited
# TODO: Replace with sparse matrix
visited = [[False for j in range(self.COL)] for i in range(self.ROW)]
# Initialize count as 0 and traverse through the all cells of given matrix
count = 0
# TODO: replace with sparse version
for i in range(self.ROW):
for j in range(self.COL):
# If a cell with value 1 is not visited yet, then new island found
if not visited[i][j] and self.graph[i][j] == 1:
# Visit all cells in this island and increment island count
self.dfs(i, j, visited)
count += 1
return count
class Terminal:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class ConnectivityNode:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class ShuntDevice:
def __init__(self, name, terminal: Terminal):
self.name = name
self.terminal = terminal
def __str__(self):
return self.name
class Load(ShuntDevice):
def __init__(self, name, terminal: Terminal, P=0, Q=0):
ShuntDevice.__init__(self, name, terminal)
self.P = P
self.Q = Q
class Shunt(ShuntDevice):
def __init__(self, name, terminal: Terminal, G=0, B=0):
ShuntDevice.__init__(self, name, terminal)
self.G = G
self.B = B
class Generator(ShuntDevice):
def __init__(self, name, terminal: Terminal, P=0, Vset=0):
ShuntDevice.__init__(self, name, terminal)
self.P = P
self.Vset = Vset
class Branch:
def __init__(self, name, t1, t2):
self.name = name
self.t1 = t1
self.t2 = t2
def get_y(self):
return 100.0, 0.0, 0.0, 100.0
def __str__(self):
return self.name
class Jumper(Branch):
def __init__(self, name, t1, t2):
Branch.__init__(self, name, t1, t2)
class Switch(Branch):
def __init__(self, name, t1, t2, state=True):
Branch.__init__(self, name, t1, t2)
self.state = state
class Line(Branch):
def __init__(self, name, t1, t2, r=0, x=0, r0=0, x0=0, g=0, b=0, g0=0, b0=0, length=1, tap_module=1.0, tap_angle=0):
Branch.__init__(self, name, t1, t2)
self.r = r
self.x = x
self.r0 = r0
self.x0 = x0
self.g = g
self.b = b
self.g0 = g0
self.b0 = b0
self.length = length
self.tap_module = tap_module
self.tap_angle = tap_angle
def get_y(self):
tap = self.tap_module * np.exp(-1j * self.tap_angle)
Ysh = complex(self.g * self.length, self.b * self.length) / 2
if self.r > 0 or self.x:
Ys = 1 / complex(self.r * self.length, self.x * self.length)
else:
raise ValueError("The impedance at " + self.name + " is zero")
Ytt = Ys + Ysh
Yff = Ytt / (tap * np.conj(tap))
Yft = - Ys / np.conj(tap)
Ytf = - Ys / tap
return Yff, Yft, Ytf, Ytt
class Connectivity:
def __init__(self, n_terminals, n_nodes, n_br, n_sw, n_ld, n_gen, n_sh, Sbase):
"""
Constructor
:param n_terminals: number of terminals
:param n_nodes: number of nodes
:param n_br: number of branches
:param n_sw: number of switches
:param n_ld: number of loads
:param n_gen: number of generators
:param n_sh: number of shunts
"""
self.Sbase = Sbase
# connectivity nodes - terminals matrix
self.CN_T = lil_matrix((n_nodes, n_terminals), dtype=int)
# lines, transformers and jumpers to terminals matrix
self.BR_T_f = lil_matrix((n_br, n_terminals), dtype=int)
self.BR_T_t = lil_matrix((n_br, n_terminals), dtype=int)
# switches - terminals matrix
self.SW_T = lil_matrix((n_sw, n_terminals), dtype=int)
self.SW_states = np.zeros(n_sw, dtype=int)
# shunt elements (loads, shunts, generators)
self.LD_T = lil_matrix((n_ld, n_terminals), dtype=int)
self.GEN_T = lil_matrix((n_gen, n_terminals), dtype=int)
self.SH_T = lil_matrix((n_sh, n_terminals), dtype=int)
# admittance components vectors
self.BR_yff = np.zeros(n_br, dtype=complex)
self.BR_yft = np.zeros(n_br, dtype=complex)
self.BR_ytf = np.zeros(n_br, dtype=complex)
self.BR_ytt = np.zeros(n_br, dtype=complex)
# load generation and shunts
self.LD_Power = np.zeros(n_ld, dtype=complex)
self.Gen_Power = np.zeros(n_gen, dtype=float)
self.Gen_voltage = np.zeros(n_gen, dtype=float)
self.SH_Power = np.zeros(n_sh, dtype=complex)
# names
self.T_names = [None] * n_terminals
self.CN_names = [None] * n_nodes
self.BR_names = [None] * n_br
self.SW_names = [None] * n_sw
self.LD_names = [None] * n_ld
self.GEN_names = [None] * n_gen
self.SH_names = [None] * n_sh
# resulting matrices
self.BR_CN = None # nodes - branch
self.CN_CN = None # node - node
self.SW_T_state = None # switch - terminal with the switches state applied
self.BR_SW_f = None # branch - switch
self.BR_SW_t = None # branch - switch
self.CN_SW = None # node - switch
self.LD_CN = None # load - node
self.GEN_CN = None # generator - node
self.SH_CN = None # shunt - node
# resulting matrices
self.Cf = None
self.Ct = None
self.Yf = None
self.Yt = None
self.Ybus = None
self.Ysh = None
self.Sbus = None
self.Ibus = None
self.Vbus = None
self.types = None
self.pq = None
self.pv = None
self.ref = None
def compute(self):
"""
Compute the cross connectivity matrices to determine the circuit connectivity towards the calculation
Additionally, compute the calculation matrices
"""
# --------------------------------------------------------------------------------------------------------------
# Connectivity matrices
# --------------------------------------------------------------------------------------------------------------
# switches connectivity matrix with the switches state applied
self.SW_T_state = diags(self.SW_states) * self.SW_T
# Branch-Switch connectivity matrix
self.BR_SW_f = self.BR_T_f * self.SW_T_state.transpose()
self.BR_SW_t = self.BR_T_t * self.SW_T_state.transpose()
# Node-Switch connectivity matrix
self.CN_SW = self.CN_T * self.SW_T_state.transpose()
# load-Connectivity Node matrix
self.LD_CN = self.LD_T * self.CN_T.transpose()
# generator-Connectivity Node matrix
self.GEN_CN = self.GEN_T * self.CN_T.transpose()
# shunt-Connectivity Node matrix
self.SH_CN = self.SH_T * self.CN_T.transpose()
# branch-node connectivity matrix (Equals A^t)
# A branch and a node can be connected via a switch or directly
self.Cf = self.CN_SW * self.BR_SW_f.transpose() + self.CN_T * self.BR_T_f.transpose()
self.Ct = self.CN_SW * self.BR_SW_t.transpose() + self.CN_T * self.BR_T_t.transpose()
self.BR_CN = (self.Cf - self.Ct).transpose()
# node-node connectivity matrix
self.CN_CN = self.BR_CN.transpose() * self.BR_CN
self.CN_CN = self.CN_CN.astype(bool).astype(int)
# --------------------------------------------------------------------------------------------------------------
# Calculation matrices
# --------------------------------------------------------------------------------------------------------------
# form the power injections vector
PD = self.LD_CN.transpose() * self.LD_Power # demand (complex)
PG = self.GEN_CN.transpose() * self.Gen_Power # generation (real)
self.Sbus = (PG - PD) / self.Sbase
self.Ibus = np.zeros_like(self.Sbus)
# types logic:
# if the number is < 10 -> PQ
# if the number is >= 10 -> PV
# later, choose a PV gen as Slack
self.types = (self.LD_CN.sum(axis=0).A1 + self.GEN_CN.sum(axis=0).A1 * 10).reshape(-1)
# Voltage vector
# self.Vbus = self.GEN_CN.transpose() * self.Gen_voltage
self.Vbus = np.ones_like(self.Sbus)
# form the shunt vector
self.Ysh = self.SH_CN.transpose() * self.SH_Power
# form the admittance matrix
self.Yf = diags(self.BR_yff) * self.Cf.transpose() + diags(self.BR_yft) * self.Ct.transpose()
self.Yt = diags(self.BR_ytf) * self.Cf.transpose() + diags(self.BR_ytt) * self.Ct.transpose()
self.Ybus = self.Cf * self.Yf + self.Ct * self.Yt + diags(self.Ysh)
self.pq = np.where(self.types < 10)[0]
self.pv = np.where(self.types >= 10)[0]
if self.ref is None:
self.ref = self.pv[0]
self.pv = self.pv[:-1] # pick all bu the first, which is not a ref
def print(self):
"""
print the connectivity matrices
:return:
"""
print('\nCN_T\n', pd.DataFrame(self.CN_T.todense(), index=self.CN_names, columns=self.T_names).to_latex())
print('\nBR_T_f\n', pd.DataFrame(self.BR_T_f.todense(), index=self.BR_names, columns=self.T_names).to_latex())
print('\nBR_T_t\n', pd.DataFrame(self.BR_T_t.todense(), index=self.BR_names, columns=self.T_names).to_latex())
print('\nSW_T\n', pd.DataFrame(self.SW_T.todense(), index=self.SW_names, columns=self.T_names).to_latex())
print('\nSW_states\n', pd.DataFrame(self.SW_states, index=self.SW_names, columns=['States']).to_latex())
# resulting
print('\n\n' + '-' * 40 + ' RESULTS ' + '-' * 40 + '\n')
print('\nLD_CN\n', pd.DataFrame(self.LD_CN.todense(), index=self.LD_names, columns=self.CN_names).to_latex())
print('\nSH_CN\n', pd.DataFrame(self.SH_CN.todense(), index=self.SH_names, columns=self.CN_names).to_latex())
print('\nGEN_CN\n', pd.DataFrame(self.GEN_CN.todense(), index=self.GEN_names, columns=self.CN_names).to_latex())
print('\nBR_CN\n', pd.DataFrame(self.BR_CN.astype(int).todense(), index=self.BR_names, columns=self.CN_names).to_latex())
print('\nCN_CN\n', pd.DataFrame(self.CN_CN.todense(), index=self.CN_names, columns=self.CN_names).to_latex())
print('\ntypes\n', self.types)
print('\nSbus\n', self.Sbus)
print('\nVbus\n', self.Vbus)
print('\nYsh\n', self.Ysh)
print('\nYbus\n', self.Ybus.todense())
class Circuit:
def __init__(self, Sbase=100):
"""
Circuit constructor
"""
self.Sbase = Sbase
self.connectivity_nodes = list()
self.terminals = list()
self.switches = list()
self.branches = list()
self.jumpers = list()
self.loads = list()
self.shunts = list()
self.generators = list()
self.nodes_idx = dict()
self.terminals_idx = dict()
# relations between connectivity nodes and terminals
# node_terminal[some_node] = list of terminals
self.node_terminal = dict()
def add_node_terminal_relation(self, connectivity_node, terminal):
"""
Add the relation between a Connectivity Node and a Terminal
:param terminal:
:param connectivity_node:
:return:
"""
if connectivity_node in self.node_terminal.keys():
self.node_terminal[connectivity_node].append(terminal)
else:
self.node_terminal[connectivity_node] = [terminal]
def add_connectivity_node(self, node):
"""
add a Connectivity node
:param node:
:return:
"""
self.connectivity_nodes.append(node)
def add_terminal(self, terminal):
self.terminals.append(terminal)
def add_switch(self, switch):
"""
Add a switch
:param switch:
:return:
"""
self.switches.append(switch)
def add_branch(self, branch):
"""
Add a branch
:param branch:
:return:
"""
self.branches.append(branch)
def add_jumper(self, jumper):
"""
:param jumper:
"""
self.jumpers.append(jumper)
def add_load(self, load):
"""
:param load:
"""
self.loads.append(load)
def add_shunt(self, shunt):
"""
:param shunt:
"""
self.shunts.append(shunt)
def add_generator(self, generator):
"""
:param generator:
"""
self.generators.append(generator)
def load_file(self, fname):
"""
Load file
:param fname: file name
"""
xls = pd.ExcelFile(fname)
# Terminals
T_dict = dict()
df = pd.read_excel(xls, 'Terminals')
for i in range(df.shape[0]):
val = df.values[i, 0]
T = Terminal(val)
T_dict[val] = T
self.add_terminal(T)
# ConnectivityNodes
CN_dict = dict()
df = pd.read_excel(xls, 'ConnectivityNodes')
for i in range(df.shape[0]):
val = df.values[i, 0]
CN = ConnectivityNode(val)
CN_dict[val] = CN
self.add_connectivity_node(CN)
# Branches
df = pd.read_excel(xls, 'Branches')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
T2 = T_dict[df.values[i, 2]]
r = df.values[i, 3]
x = df.values[i, 4]
r0 = df.values[i, 5]
x0 = df.values[i, 6]
g = df.values[i, 7]
b = df.values[i, 8]
g0 = df.values[i, 9]
b0 = df.values[i, 10]
l = df.values[i, 11]
self.add_branch(Line(df.values[i, 0], T1, T2, r, x, r0, x0, g, b, g0, b0, l))
df = pd.read_excel(xls, 'Jumpers')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
T2 = T_dict[df.values[i, 2]]
self.add_branch(Jumper(df.values[i, 0], T1, T2))
# Switches
df = pd.read_excel(xls, 'Switches')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
T2 = T_dict[df.values[i, 2]]
state = bool(df.values[i, 3])
self.add_switch(Switch(df.values[i, 0], T1, T2, state))
# Loads
df = pd.read_excel(xls, 'Loads')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
p = df.values[i, 2]
q = df.values[i, 3]
self.add_load(Load(df.values[i, 0], T1, p, q))
# shunts
df = pd.read_excel(xls, 'Shunts')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
g = df.values[i, 2]
b = df.values[i, 3]
self.add_shunt(Shunt(df.values[i, 0], T1, g, b))
# Generators
df = pd.read_excel(xls, 'Generators')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
p = df.values[i, 2]
vset = df.values[i, 3]
self.add_generator(Generator(df.values[i, 0], T1, p, vset))
# CN_T
df = pd.read_excel(xls, 'CN_T')
for i in range(df.shape[0]):
CN = CN_dict[df.values[i, 0]]
T = T_dict[df.values[i, 1]]
self.add_node_terminal_relation(CN, T)
def compile(self):
"""
Compile the circuit
"""
n_nodes = len(self.connectivity_nodes)
n_terminals = len(self.terminals)
n_br = len(self.branches) + len(self.jumpers)
n_sw = len(self.switches)
n_ld = len(self.loads)
n_gen = len(self.generators)
n_sh = len(self.shunts)
self.nodes_idx = dict() # dictionary of node object -> node index
self.terminals_idx = dict() # dictionary of terminals -> terminal index
conn = Connectivity(n_terminals=n_terminals,
n_nodes=n_nodes,
n_br=n_br,
n_sw=n_sw,
n_ld=n_ld,
n_gen=n_gen,
n_sh=n_sh,
Sbase=self.Sbase)
# Terminals
for i, terminal in enumerate(self.terminals):
self.terminals_idx[terminal] = i
conn.T_names[i] = terminal.name
# Connectivity Nodes
for i, node in enumerate(self.connectivity_nodes):
self.nodes_idx[node] = i
conn.CN_names[i] = node.name
terminals = self.node_terminal[node]
for terminal in terminals:
j = self.terminals_idx[terminal]
conn.CN_T[i, j] = 1
# Switches
for i, switch in enumerate(self.switches):
j = self.terminals_idx[switch.t1]
conn.SW_T[i, j] = 1
j = self.terminals_idx[switch.t2]
conn.SW_T[i, j] = 1
conn.SW_states[i] = int(switch.state)
conn.SW_names[i] = switch.name
# Branches (lines, transformers and jumpers)
for i, branch in enumerate(self.branches):
# from
f = self.terminals_idx[branch.t1]
conn.BR_T_f[i, f] = 1
# to
t = self.terminals_idx[branch.t2]
conn.BR_T_t[i, t] = 1
# name
conn.BR_names[i] = branch.name
# branch admittances
yff, yft, ytf, ytt = branch.get_y()
conn.BR_yff[i] = yff
conn.BR_yft[i] = yft
conn.BR_ytf[i] = ytf
conn.BR_ytt[i] = ytt
# Loads
for i, load in enumerate(self.loads):
j = self.terminals_idx[load.terminal]
conn.LD_T[i, j] = 1
conn.LD_names[i] = load.name
conn.LD_Power[i] = complex(load.P, load.Q)
# Generators
for i, generator in enumerate(self.generators):
j = self.terminals_idx[generator.terminal]
conn.GEN_T[i, j] = 1
conn.GEN_names[i] = generator.name
conn.Gen_Power[i] = generator.P
conn.Gen_voltage[i] = generator.Vset
# Shunts
for i, shunt in enumerate(self.shunts):
j = self.terminals_idx[shunt.terminal]
conn.SH_T[i, j] = 1
conn.SH_names[i] = shunt.name
conn.SH_Power[i] = complex(shunt.G, shunt.B)
# compute topology
conn.compute()
return conn
class PowerFlow:
def __init__(self, circuit: Circuit):
self.circuit = circuit
def run(self):
"""
Run power flow
:return:
"""
# compile circuit
conn = self.circuit.compile()
# run power flow
V, converged, normF, Scalc, iter_, elapsed = IwamotoNR(Ybus=conn.Ybus,
Sbus=conn.Sbus,
V0=conn.Vbus,
Ibus=conn.Ibus,
pv=conn.pv,
pq=conn.pq,
tol=conn.ref,
max_it=15,
robust=False)
return V
if __name__ == '__main__':
circuit = Circuit()
# circuit.load_file('substation_data.xlsx')
circuit.load_file('lynn5.xlsx')
conn_ = circuit.compile()
conn_.print()
pf = PowerFlow(circuit)
Vsol = pf.run()
print('\nVsol:', np.abs(Vsol))
|
Matcha green tea powder is a refreshing alternative to traditional tea, a bright green elixir that’s packed with antioxidants that enhance your mood, health and energy level.
All these qualities help determine the grade of the green tea powder, which can be separated into two main categories: ceremonial grade and culinary grade. Read more about the history for matcha green tea and its origins.
Sipped for centuries in the traditional Japanese tea ceremony, ceremonial grade matcha is the highest quality green tea powder available. This premium grade is vibrant green in color, with a very delicate taste and extremely fine texture. Ceremonial grade matcha is made from the youngest tea leaves, with the stems and veins entirely removed. The leaves are stone-ground, which makes the matcha suitable for a thick-style tea. Thanks to its bright green color, it can easily be recognized from the other tea grades.
Blended exclusively for whisking with hot water and drinking on its own, ceremonial grade matcha should be consumed straight. Just as in traditional Japanese tea ceremonies, it should not be mixed with sweeteners or other ingredients.
This premium matcha has a naturally sweet, mild flavor that is smothered by the addition of milk, sugar, chocolate or soy products. The delicate nuances of this high quality tea are best enjoyed pure, and mixed only with hot water.
Use all your senses to appreciate the fine nature of ceremonial grade matcha. This bright green, talc-like powder should smell fresh and slightly grassy. It should never feel coarse or gritty, and its hue should always be a brilliant green.
You’ll need about one half-teaspoon of ceremonial grade matcha for every cup (8 ounces) of hot water. Be sure to whisk your beverage thoroughly. Using this premium grade for cooking and baking is not recommended. You wouldn’t make a pasta sauce with an expensive bottle of wine – and you don’t want to cook with ceremonial grade matcha, either. Besides being cost prohibitive, the refined features of this exquisite blend will be lost amongst your other ingredients.
Here’s a video on how to prepare matcha tea, the traditional way.
The second main category of matcha is culinary grade, which is most often used for cooking and baking. It’s also fine to drink culinary matcha, which is still a very high quality tea, just with a slightly different flavor profile than ceremonial grade matcha. Culinary grade matcha isn’t necessarily a lower quality product, it’s just prepared differently for a different use – and it features different characteristics.
Culinary grade matcha is specifically blended for use with other ingredients in recipes for food and beverages. It’s enhanced flavor profile pairs perfectly with other flavors, creating delicious green tea lattes, matcha smoothies and green tea based baked goods.
Featuring a more robust flavor than ceremonial grade matcha, the culinary grade is slightly less sweet, with more bitter notes.
This matcha should smell grassy and fresh, and feel soft and smooth – never gritty or coarse. It may look less vibrantly green than ceremonial grade matcha, but it should still possess a noticeably green color.
You’ll need to use a little more of this matcha green tea powder to make a beverage. Mix one to two teaspoons of culinary grade matcha into every cup (8 ounces) of hot water. Most cooking and baking recipes will call for one to two tablespoons of the powder. You can also drink culinary grade matcha mixed with hot water; however it will taste best with a little added sweetener.
Premium grade matcha tea is ideal for everyday consumption, from a morning latte to an afternoon matcha smoothie. Compared to ceremonial grade matcha tea, premium grade is a very good blend at a slightly lower price. This makes it an excellent choice for everyday use. Premium grade matcha tea has a very fine texture, which breaks up easily in water. It is slightly less vibrant green than ceremonial grade matcha, but don’t let the color fool you – it’s just as satisfying. This is the type of Culinary Grade matcha that Epic Matcha offers. Read more about how we source our matcha.
Made with less delicate leaves than ceremonial and premium grade matchas, café grade matcha offers an extremely strong flavor – perfect for cooking and baking. It’s one of the more expensive types of matcha powder, and you can tell it apart from the lower grades by its unique green color. Café grade matcha tea has a fine texture that blends well, whether you are making a cool green tea smoothie or warm matcha cappuccino.
Ingredient grade matcha tea is an excellent choice for recipes that contain milk and other dairy products. Use it to make green tea ice cream, a matcha smoothie, or matcha latté. Because of its thick consistency, it works well when added to sauces and desserts. Prevent lumps in your recipes by stirring the matcha well, preferably with a whisk.
Kitchen grade matcha tea is one of the most economical brands, and is made with less delicate leaves than the other grades. It has a strong astringent flavor that makes it perfect for large-scale brewing and mixed into other foods. Not quite as fine as the other types of matcha powder, kitchen grade matcha tea is a bit darker in color and usually sold in larger bulks. This matcha is ideal for experimenting with new recipes and getting creative in the kitchen.
Classic grade matcha tea is an enjoyable blend with an excellent economic value. It’s one of the higher grades but usually costs less than the other grades. Classic grade matcha has a strong and distinct flavor, which lends itself to many uses – and it is more widely available compared to other matcha teas. |
import bayessb
from pysb.examples.robertson import model
import pysb.integrate
import numpy
import matplotlib.pyplot as plt
import matplotlib.gridspec as mgridspec
import matplotlib.ticker as mticker
import functools
import sys
def likelihood(mcmc, position, data, scale_factor, sigma):
yout = mcmc.simulate(position)
yout_norm = yout / scale_factor
# fit to first two species
return numpy.sum((data[:,0:2] - yout_norm[:,0:2]) ** 2 / (2 * sigma ** 2))
def prior(mcmc, position):
est = [1e-2, 1e7, 1e4]
mean = numpy.log10(est)
var = 10
return numpy.sum((position - mean) ** 2 / ( 2 * var))
def step(mcmc):
if mcmc.iter % 20 == 0:
print 'iter=%-5d sigma=%-.3f T=%-.3f acc=%-.3f, lkl=%g prior=%g post=%g' % \
(mcmc.iter, mcmc.sig_value, mcmc.T, float(mcmc.acceptance)/(mcmc.iter+1), mcmc.accept_likelihood,
mcmc.accept_prior, mcmc.accept_posterior)
def scatter(mcmc, mask=True, example_pos_r=None, example_pos_g=None,
show_model=False):
"""
Display a grid of scatter plots for each 2-D projection of an MCMC walk.
Parameters
----------
mcmc : bayessb.MCMC
The MCMC object to display.
mask : bool/int, optional
If True (default) the annealing phase of the walk will be discarded
before plotting. If False, nothing will be discarded and all points will
be plotted. If an integer, specifies the number of steps to be discarded
from the beginning of the walk.
"""
# number of dimensions in position vector
ndims = mcmc.num_estimate
# vector of booleans indicating accepted MCMC moves
accepts = mcmc.accepts.copy()
# mask off the annealing (burn-in) phase, or up to a user-specified step
if mask is True:
mask = mcmc.options.anneal_length
if mask is False:
mask = 0
accepts[0:mask] = 0
# grab position vectors and posterior values from accepted moves
positions = mcmc.positions[accepts]
posteriors = mcmc.posteriors[accepts]
# calculate actual range of values on each dimension
maxes = positions.max(0)
mins = positions.min(0)
ranges = abs(maxes - mins)
# use 2% of the maximum range as a margin for all scatter plots
margin = max(ranges) * 0.02
# calculate upper and lower plot limits based on min/max plus the margin
lims_top = maxes + margin
lims_bottom = mins - margin
# calculate new ranges based on limits
lim_ranges = abs(lims_top - lims_bottom)
plt.figure()
# build a GridSpec which allocates space based on these ranges
import matplotlib.gridspec as mgridspec
gs = mgridspec.GridSpec(ndims, ndims, width_ratios=lim_ranges,
height_ratios=lim_ranges[-1::-1])
# build an axis locator for each dimension
locators = []
for i, r in enumerate(lim_ranges):
# place ticks on the integers, unless there is no integer within the
# given dimension's calculated range
nbins = numpy.ceil(r) * 5 + 1
locators.append(mticker.MaxNLocator(nbins=nbins, steps=[2, 10]))
fignum = 0
# reverse the param list along the y axis so we end up with the "origin"
# (i.e. the first param) at the bottom left instead of the top left. note
# that y==0 will be along the bottom now, but the figure numbers in the
# gridspec still begin counting at the top.
for y, py in reversed(list(enumerate(mcmc.options.estimate_params))):
for x, px in enumerate(mcmc.options.estimate_params):
ax = plt.subplot(gs[fignum])
ax.tick_params(left=False, right=True, top=True, bottom=False,
labelleft=False, labelright=False, labeltop=False,
labelbottom=False, direction='in')
ax.yaxis.set_major_locator(locators[y])
ax.xaxis.set_major_locator(locators[x])
if x == y:
# 1-D histograms along the diagonal
#
# distribute 200 total bins across all histograms,
# proportionally by their width, such that the bin density looks
# consistent across the different histograms
bins = 200 * lim_ranges[x] / numpy.sum(lim_ranges)
ax.hist(positions[:,x], bins=bins, histtype='stepfilled',
color='salmon', ec='tomato')
if example_pos_r is not None:
ax.vlines(example_pos_r[x], *ax.get_ylim(),
color='red', linewidth=2)
if example_pos_g is not None:
ax.vlines(example_pos_g[x], *ax.get_ylim(),
color='green', linewidth=2)
arrow_scale = ax.get_ylim()[1] / lim_ranges[x]
arrow_len = arrow_scale * 0.1
arrow_head_l = arrow_len * 0.4
arrow_head_w = min(lim_ranges) * .1
ax.arrow(numpy.log10(px.value), arrow_len, 0, -arrow_len,
head_length=arrow_head_l, head_width=arrow_head_w,
ec='k', fc='k', length_includes_head=True)
ax.set_xlim(lims_bottom[x], lims_top[x])
#ax.yaxis.set_major_locator(mticker.NullLocator())
ax.yaxis.set_major_locator(mticker.LinearLocator())
else:
# 2-D scatter plots off the diagonal
ax.plot(positions[:, x], positions[:, y], color='darkblue',
alpha=0.2)
ax.scatter(positions[:, x], positions[:, y], s=1, color='darkblue',
alpha=0.2)
ax.set_xlim(lims_bottom[x], lims_top[x])
ax.set_ylim(lims_bottom[y], lims_top[y])
# parameter name labels along left and bottom edge of the grid
if x == 0:
ax.set_ylabel(py.name, weight='black', size='large',
labelpad=10, rotation='horizontal',
horizontalalignment='right')
if y == 0:
ax.set_xlabel(px.name, weight='black', size='large',
labelpad=10,)
# tick labels along the right and top edge of the grid
if True:#x == ndims - 1: # XXX
ax.tick_params('y', labelright=True)
if y == ndims - 1:
ax.tick_params('x', labeltop=True)
# move to next figure in the gridspec
fignum += 1
# TODO: would axis('scaled') force the aspect ratio we want?
def prediction(mcmc, n, species_idx, scale_factor, data_std, plot_samples=False):
plt.figure()
positions = mcmc.positions[-n:]
accepts = mcmc.accepts[-n:]
accept_positions = positions[accepts]
tspan = mcmc.options.tspan
ysamples = numpy.empty((len(accept_positions), len(tspan)))
for i, pos in enumerate(accept_positions):
ysim = mcmc.simulate(pos)
ysamples[i] = ysim[:, species_idx] / scale_factor
ymean = numpy.mean(ysamples, 0)
ystd = numpy.std(ysamples, 0)
if plot_samples:
for y in ysamples:
plt.plot(tspan, y, c='gray', alpha=.01)
plt.plot(tspan, ymean, 'b:', linewidth=2)
std_interval = ystd[:, None] * [+1, -1]
plt.plot(tspan, ymean[:, None] + std_interval * 0.842, 'g-.', linewidth=2)
plt.plot(tspan, ymean[:, None] + std_interval * 1.645, 'k-.', linewidth=2)
plt.errorbar(tspan, ymean, yerr=data_std, fmt=None, ecolor='red')
plt.xlim(tspan[0] - 1, tspan[-1] + 1)
def data(mcmc, data_norm, scale_factor, data_species_idxs):
plt.figure()
colors = ('r', 'g', 'b')
labels = ('A', 'B', 'C')
tspan = mcmc.options.tspan
true_pos = numpy.log10([p.value for p in mcmc.options.estimate_params])
true_norm = mcmc.simulate(true_pos) / scale_factor
for i, (rl, dl, c, l) in enumerate(zip(true_norm.T, data_norm.T,
colors, labels)):
plt.plot(tspan, rl, color=c, label=l)
if i in data_species_idxs:
plt.plot(tspan, dl, linestyle=':', marker='o', color=c, ms=4, mew=0)
def main():
seed = 2
random = numpy.random.RandomState(seed)
sigma = 0.1;
ntimes = 20;
tspan = numpy.linspace(0, 40, ntimes);
solver = pysb.integrate.Solver(model, tspan)
solver.run()
ydata = solver.y * (random.randn(*solver.y.shape) * sigma + 1);
ysim_max = solver.y.max(0)
ydata_norm = ydata / ysim_max
opts = bayessb.MCMCOpts()
opts.model = model
opts.tspan = tspan
# estimate rates only (not initial conditions) from wild guesses
opts.estimate_params = [p for p in model.parameters if p.name.startswith('k') ]
opts.initial_values = [1e-4, 1e3, 1e6]
opts.nsteps = 10000
opts.likelihood_fn = functools.partial(likelihood, data=ydata_norm,
scale_factor=ysim_max, sigma=sigma)
opts.prior_fn = prior
opts.step_fn = step
opts.use_hessian = True
opts.hessian_period = opts.nsteps / 10
opts.seed = seed
mcmc = bayessb.MCMC(opts)
mcmc.run()
mixed_nsteps = opts.nsteps / 2
mixed_positions = mcmc.positions[-mixed_nsteps:]
mixed_accepts = mcmc.accepts[-mixed_nsteps:]
mixed_accept_positions = mixed_positions[mixed_accepts]
marginal_mean_pos = numpy.mean(mixed_accept_positions, 0)
# position is far from marginal mean, but posterior is good (determined by
# trial and error and some interactive plotting)
interesting_step = 8830
print "\nGenerating figures..."
# show scatter plot
scatter(mcmc, opts.nsteps / 2, mcmc.positions[interesting_step],
marginal_mean_pos)
# show prediction for C trajectory, which was not fit to
prediction(mcmc, opts.nsteps / 2, 2, ysim_max[2], sigma, plot_samples=True)
plt.title("Prediction for C")
# show "true" trajectories and noisy data
data(mcmc, ydata_norm, ysim_max, [0, 1])
plt.title("True trajectories and noisy data")
# show all plots at once
plt.show()
if __name__ == '__main__':
main()
|
Together with Mr Katrien Beelen of Beelen Advocaten (EUAEL Alliance Partner), EUAEL was successful in representing a London buyer in order to defend the arrest on horses and bank assets that belong to a horse dealer in Belgium.
The buyer annulled the purchase agreement of the show jumper as the horse showed many defects. The buyer also arrested horses and bank assets that belonged to this dealer. The dealers lawyer pleaded before the Court of Charleroi to have all these arrests lifted but the Court rejected this request. |
"""
Script to calculate per-residue RSCCs for a model versus an EM map with an
arbitrary origin.
"""
from __future__ import division
from mmtbx import real_space_correlation
import iotbx.phil
from cctbx import crystal
from cctbx import maptbx
from scitbx.array_family import flex
import sys
master_phil_str = """
model = None
.type = path
map = None
.type = path
d_min = 3.0
.type = float
.help = Optional cutoff resolution for computing F(calc). This will not \
affect the dimensions of the ultimate FC map.
atom_radius = 1.5
.type = float
"""
def run (args, out=sys.stdout) :
cmdline = iotbx.phil.process_command_line_with_files(
args=args,
master_phil_string=master_phil_str,
pdb_file_def="model",
map_file_def="map",
usage_string="""\
em_rscc.py model.pdb map.ccp4
%s""" % __doc__)
params = cmdline.work.extract()
assert (not None in [params.model, params.map])
pdb_in = cmdline.get_file(params.model).file_object
m = cmdline.get_file(params.map).file_object
print >> out, "Input electron density map:"
print >> out, "m.all() :", m.data.all()
print >> out, "m.focus() :", m.data.focus()
print >> out, "m.origin():", m.data.origin()
print >> out, "m.nd() :", m.data.nd()
print >> out, "m.size() :", m.data.size()
print >> out, "m.focus_size_1d():", m.data.focus_size_1d()
print >> out, "m.is_0_based() :", m.data.is_0_based()
print >> out, "map: min/max/mean:", flex.min(m.data), flex.max(m.data), flex.mean(m.data)
print >> out, "unit cell:", m.unit_cell_parameters
symm = crystal.symmetry(
space_group_symbol="P1",
unit_cell=m.unit_cell_parameters)
xrs = pdb_in.input.xray_structure_simple(crystal_symmetry=symm)
print >> out, "Setting up electron scattering table (d_min=%g)" % params.d_min
xrs.scattering_type_registry(
d_min=params.d_min,
table="electron")
fc = xrs.structure_factors(d_min=params.d_min).f_calc()
cg = maptbx.crystal_gridding(
unit_cell=symm.unit_cell(),
space_group_info=symm.space_group_info(),
pre_determined_n_real=m.data.all())
fc_map = fc.fft_map(
crystal_gridding=cg).apply_sigma_scaling().real_map_unpadded()
assert (fc_map.all() == fc_map.focus() == m.data.all())
em_data = m.data.as_double()
unit_cell_for_interpolation = m.grid_unit_cell()
frac_matrix = unit_cell_for_interpolation.fractionalization_matrix()
sites_cart = xrs.sites_cart()
sites_frac = xrs.sites_frac()
print >> out, "PER-RESIDUE CORRELATION:"
for chain in pdb_in.hierarchy.only_model().chains() :
for residue_group in chain.residue_groups() :
i_seqs = residue_group.atoms().extract_i_seq()
values_em = flex.double()
values_fc = flex.double()
for i_seq in i_seqs :
rho_em = maptbx.non_crystallographic_eight_point_interpolation(
map=em_data,
gridding_matrix=frac_matrix,
site_cart=sites_cart[i_seq])
rho_fc = fc_map.eight_point_interpolation(sites_frac[i_seq])
values_em.append(rho_em)
values_fc.append(rho_fc)
cc = flex.linear_correlation(x=values_em, y=values_fc).coefficient()
print >> out, residue_group.id_str(), cc
def exercise () :
import mmtbx.regression
from iotbx import file_reader
from cStringIO import StringIO
pdb_file = "tmp_em_rscc.pdb"
map_file = "tmp_em_rscc.map"
f = open(pdb_file, "w")
for line in mmtbx.regression.model_1yjp.splitlines() :
if line.startswith("ATOM") :
f.write(line + "\n")
f.close()
pdb_in = file_reader.any_file(pdb_file).file_object
symm = crystal.symmetry(
space_group_symbol="P1",
unit_cell=(30, 30, 30, 90, 90, 90))
xrs = pdb_in.input.xray_structure_simple(crystal_symmetry=symm)
xrs.scattering_type_registry(
d_min=3.0,
table="electron")
fc = xrs.structure_factors(d_min=3.0).f_calc()
fft_map = fc.fft_map(resolution_factor=1/3).apply_sigma_scaling()
assert (fft_map.n_real() == (32,32,32))
fft_map.as_ccp4_map(
file_name=map_file,
gridding_first=(-16,-16,-16),
gridding_last=(15,15,15))
out = StringIO()
run(args=[pdb_file, map_file], out=out)
assert ("""\
PER-RESIDUE CORRELATION:
A 1 1.0
A 2 1.0
A 3 1.0
A 4 1.0
A 5 1.0
A 6 1.0
A 7 1.0
""" in out.getvalue()), out.getvalue()
if (__name__ == "__main__") :
if ("--test" in sys.argv) :
exercise()
print "OK"
else :
run(sys.argv[1:])
|
Welcome by Dr. J.L.Raina, Chairman ISOL Foundation 5.00p.m. -5.05p.m.
Presentation of the Ethos of Chicago Conference and ISOL Initiative by Prof. Sunita Singh Sengupta, Founder of ISOL Foundation and Conference Convener 5.05p.m.-5.10p.m.
Lighting the lamp of Carrying the Vision Forward 5.10p.m.-5.15p.m.
Keynote Address by Dr. Karan Singh – Universal Values for Global Society 5.15p.m.-5.30p.m.
Professor Kapil Kapoor, Current Chancellor, Mahatma Gandhi International Hindi University, Wardha and Former Pro – Vice Cnacellor, JNU 5.30p.m.-5.45p.m.
Dr. K.K. Chakravarty, Chairman, Lalit Kala Akademi Ministry of Culture, Govt of India and Chancellor, NUEPA, Ministry of Human Resource Development, Govt. of India. 5.45p.m.-6.00p.m.
Prof. B.Bhattacharya, Former Dean -IIFT & Sr. Advisor to the UN 6.00p.m.-6.15p.m.
Dr. Anirban Ganguly, Director, Shyama Prasad Mookherjee Research Foundation 6.15p.m.-6.30p.m.
Dr. A.K.Merchant National Trustee, Lotus Temple & Baha’i Community of India; General Secretary, Temple of Understanding—India 6.30p.m.-7.00p.m.
Rev. Fr .(Dr.) Babu Joseph, SVD 7.00p.m.-7.15p.m.
Prof. Ashok Ogra, Director of Apeejay Institute of Mass Communication, Delhi. 7.15p.m.-7.30p.m.
Shri N.K.Singh, Journalist and General Secretary, Broadcast Editors’ Association 7.30p.m.-7.45p.m.
Shri Ashok Pandey, Principal, Ahlcon International School, Mayur Vihar 7.45p.m.-8.00p.m.
Prof. D.P.Tripathi, General Secretary, National Congress Party of India 8.00p.m.-8.15p.m.
Shri Shyam Jaju, National Secretary of Bharitya Janta Party 8.15p.m.-8.30p.m.
Open invitation to speak on Swami Vivekananda 8.30p.m.8.40p.m.
Vote of thanks by Shri Rajesh Jain, Member ISOL Board of Governors 8.40p.m.-8.45p.m.
Join us for Dinner 8.45p.m.-9.30p.m. |
"""
Lift Curve Widget
-----------------
"""
from collections import namedtuple
import numpy
import sklearn.metrics as skl_metrics
from PyQt4 import QtGui
from PyQt4.QtGui import QColor, QPen
from PyQt4.QtCore import Qt
import pyqtgraph as pg
import Orange.data
import Orange.evaluation.testing
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import colorpalette, colorbrewer
from .owrocanalysis import convex_hull
CurvePoints = namedtuple(
"CurvePoints",
["cases", "tpr", "thresholds"]
)
CurvePoints.is_valid = property(lambda self: self.cases.size > 0)
LiftCurve = namedtuple(
"LiftCurve",
["points", "hull"]
)
LiftCurve.is_valid = property(lambda self: self.points.is_valid)
def LiftCurve_from_results(results, clf_index, target):
x, y, thresholds = lift_curve_from_results(results, target, clf_index)
points = CurvePoints(x, y, thresholds)
hull = CurvePoints(*convex_hull([(x, y, thresholds)]))
return LiftCurve(points, hull)
PlotCurve = namedtuple(
"PlotCurve",
["curve",
"curve_item",
"hull_item"]
)
class OWLiftCurve(widget.OWWidget):
name = "Lift Curve"
description = ""
icon = "icons/LiftCurve.svg"
priority = 1020
inputs = [
{"name": "Evaluation Results",
"type": Orange.evaluation.testing.Results,
"handler": "set_results"}
]
target_index = settings.Setting(0)
selected_classifiers = settings.Setting([])
display_convex_hull = settings.Setting(False)
display_cost_func = settings.Setting(True)
fp_cost = settings.Setting(500)
fn_cost = settings.Setting(500)
target_prior = settings.Setting(50.0)
def __init__(self, parent=None):
super().__init__(parent)
self.results = None
self.classifier_names = []
self.colors = []
self._curve_data = {}
box = gui.widgetBox(self.controlArea, "Plot")
tbox = gui.widgetBox(box, "Target Class")
tbox.setFlat(True)
self.target_cb = gui.comboBox(
tbox, self, "target_index", callback=self._on_target_changed)
cbox = gui.widgetBox(box, "Classifiers")
cbox.setFlat(True)
self.classifiers_list_box = gui.listBox(
cbox, self, "selected_classifiers", "classifier_names",
selectionMode=QtGui.QListView.MultiSelection,
callback=self._on_classifiers_changed)
gui.checkBox(box, self, "display_convex_hull",
"Show lift convex hull", callback=self._replot)
self.plotview = pg.GraphicsView(background="w")
self.plotview.setFrameStyle(QtGui.QFrame.StyledPanel)
self.plot = pg.PlotItem()
self.plot.getViewBox().setMenuEnabled(False)
pen = QPen(self.palette().color(QtGui.QPalette.Text))
tickfont = QtGui.QFont(self.font())
tickfont.setPixelSize(max(int(tickfont.pixelSize() * 2 // 3), 11))
axis = self.plot.getAxis("bottom")
axis.setTickFont(tickfont)
axis.setPen(pen)
axis.setLabel("P Rate")
axis = self.plot.getAxis("left")
axis.setTickFont(tickfont)
axis.setPen(pen)
axis.setLabel("TP Rate")
self.plot.showGrid(True, True, alpha=0.1)
self.plot.setRange(xRange=(0.0, 1.0), yRange=(0.0, 1.0))
self.plotview.setCentralItem(self.plot)
self.mainArea.layout().addWidget(self.plotview)
def set_results(self, results):
"""Set the input evaluation results."""
self.clear()
self.error(0)
if results is not None:
if results.data is None:
self.error(0, "Give me data!!")
results = None
elif not isinstance(results.data.domain.class_var,
Orange.data.DiscreteVariable):
self.error(0, "Need discrete class variable")
results = None
self.results = results
if results is not None:
self._initialize(results)
self._setup_plot()
def clear(self):
"""Clear the widget state."""
self.plot.clear()
self.results = None
self.target_cb.clear()
self.target_index = 0
self.classifier_names = []
self.colors = []
self._curve_data = {}
def _initialize(self, results):
N = len(results.predicted)
names = getattr(results, "learner_names", None)
if names is None:
names = ["#{}".format(i + 1) for i in range(N)]
self.colors = colorpalette.ColorPaletteGenerator(
N, colorbrewer.colorSchemes["qualitative"]["Dark2"])
self.classifier_names = names
self.selected_classifiers = list(range(N))
for i in range(N):
item = self.classifiers_list_box.item(i)
item.setIcon(colorpalette.ColorPixmap(self.colors[i]))
self.target_cb.addItems(results.data.domain.class_var.values)
def plot_curves(self, target, clf_idx):
if (target, clf_idx) not in self._curve_data:
curve = LiftCurve_from_results(self.results, clf_idx, target)
color = self.colors[clf_idx]
pen = QPen(color, 1)
pen.setCosmetic(True)
shadow_pen = QPen(pen.color().lighter(160), 2.5)
shadow_pen.setCosmetic(True)
item = pg.PlotDataItem(
curve.points[0], curve.points[1],
pen=pen, shadowPen=shadow_pen,
symbol="+", symbolSize=3, symbolPen=shadow_pen,
antialias=True
)
hull_item = pg.PlotDataItem(
curve.hull[0], curve.hull[1],
pen=pen, antialias=True
)
self._curve_data[target, clf_idx] = \
PlotCurve(curve, item, hull_item)
return self._curve_data[target, clf_idx]
def _setup_plot(self):
target = self.target_index
selected = self.selected_classifiers
curves = [self.plot_curves(target, clf_idx) for clf_idx in selected]
for curve in curves:
self.plot.addItem(curve.curve_item)
if self.display_convex_hull:
hull = convex_hull([c.curve.hull for c in curves])
self.plot.plot(hull[0], hull[1], pen="y", antialias=True)
pen = QPen(QColor(100, 100, 100, 100), 1, Qt.DashLine)
pen.setCosmetic(True)
self.plot.plot([0, 1], [0, 1], pen=pen, antialias=True)
def _replot(self):
self.plot.clear()
if self.results is not None:
self._setup_plot()
def _on_target_changed(self):
self._replot()
def _on_classifiers_changed(self):
self._replot()
def lift_curve_from_results(results, target, clf_idx, subset=slice(0, -1)):
actual = results.actual[subset]
scores = results.probabilities[clf_idx][subset][:, target]
yrate, tpr, thresholds = lift_curve(actual, scores, target)
return yrate, tpr, thresholds
def lift_curve(ytrue, ypred, target=1):
P = numpy.sum(ytrue == target)
N = ytrue.size - P
fpr, tpr, thresholds = skl_metrics.roc_curve(ytrue, ypred, target)
rpp = fpr * (N / (P + N)) + tpr * (P / (P + N))
return rpp, tpr, thresholds
def main():
import sip
from PyQt4.QtGui import QApplication
from Orange.classification import logistic_regression, svm
from Orange.evaluation import testing
app = QApplication([])
w = OWLiftCurve()
w.show()
w.raise_()
data = Orange.data.Table("ionosphere")
results = testing.CrossValidation(
data,
[logistic_regression.LogisticRegressionLearner(penalty="l2"),
logistic_regression.LogisticRegressionLearner(penalty="l1"),
svm.SVMLearner(probability=True),
svm.NuSVMLearner(probability=True)
],
store_data=True
)
results.learner_names = ["LR l2", "LR l1", "SVM", "Nu SVM"]
w.set_results(results)
rval = app.exec_()
sip.delete(w)
del w
app.processEvents()
del app
return rval
if __name__ == "__main__":
main()
|
We are pleased to announce that our firm is one of the co-organizers of the International Conference titled “100 years of Industrial Property Protection in Poland” organized by the Polish Patent Office as part of this year’s World Intellectual Property Day celebrations.
President of Poland Andrzej Duda is the Honorary Patron of the conference, which will take place in the Warsaw Philharmonic on 26 April 2018.
Find out more about the event on the Polish Patent Office’s website. |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp.osv import fields, osv
from openerp import _
class create_contract(osv.osv_memory):
_name = 'create_contract'
_description = 'Wizard to create contract'
_rec_name = 'start_date'
_columns = {
'start_date': fields.date(string='Contract Date', required=True),
'start_code': fields.char(string='Contract Number', required=True),
'expiration_date': fields.date(string='Expiration Date'),
}
_defaults = {
}
def create_contract(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids)[0]
active_id = context.get('active_id', False)
contract_obj = self.pool.get('nautical.contract')
craft_obj = self.pool.get('nautical.craft')
craft = craft_obj.browse(cr, uid, [active_id])[0]
if active_id:
start_date = wizard.start_date
start_code = wizard.start_code
expiration_date = wizard.expiration_date
new_contract_vals = {'start_date': start_date, 'start_code': start_code, 'expiration_date':
expiration_date, 'owner_id': craft.owner_id.id, 'craft_id': craft.id, 'state': 'contracted'}
contract_obj.create(
cr, uid, new_contract_vals, context=context)
craft_obj.write(
cr, uid, craft.id, {'state': 'contracted'}, context=context)
return True
|
Boston Mexican Petroleum Stock certificate for 28 Ordinary Shares dated May 13, 1921. Shareholder: L. Sherman Adams. Green/Black.
Engraved vignette of the "Loading Rack - Panuco River" at top center. Uncancelled and fine. |
# coding: utf-8
# Copyright (C) zhongjie luo <l.zhjie@qq.com>
import datetime, random, os, sys, copy, json
if sys.version_info.major >= 3:
from .tools.StopWatch import StopWatch
from .tools.ProgressBar import ProgressBar, MultiBar
from .tools.ColorPrint import ColorPrint
from .tools.MultiProcess import MultiProcess
from .tools.Options import Options as toolsOptions, Option, string2bool
else:
from tools.StopWatch import StopWatch
from tools.ProgressBar import ProgressBar, MultiBar
from tools.ColorPrint import ColorPrint
from tools.MultiProcess import MultiProcess
from tools.Options import Options as toolsOptions, Option, string2bool
from multiprocessing import Lock, Queue, Semaphore
class Options(toolsOptions):
options = (
Option("host", "h", "127.0.0.1"),
Option("port", "p", 0),
Option("processor_num", "n", 1),
Option("record_num", "r", 1000),
Option("processor_num_max", "n_max", 50),
Option("record_num_max", "r_max", 10000000),
Option("out_dir", "d", "result"),
Option("tag", "t", "tag",
help=u"添加到输出文件名中,可用于区分同类型测试\r\n" \
u"例如用时间来命名每次测试结果的输出文件\r\n"),
Option("table", "T", "__benchmark"),
Option("key_start", "k", 10000),
Option("w", "w", True, help="warm up, use --w enable"),
Option("quiet", "q", False, string2bool))
def __init__(self, options=None, args=None):
if options is None:
options = Options.options
super(Options, self).__init__(options, args)
def parse_option(self, raise_when_fail=False):
if super(Options, self).parse_option(raise_when_fail) is False:
print(self.usage() + self.help())
return False
return True
class DbConnection(object):
"""type(record)->((k, v), index, last_index)"""
def __init__(self, options):
self.name = options.get("_name")
self.host = options.get("host")
self.port = options.get("port")
self.table = options.get("table")
self.id = options.get("_id")
self.quiet = options.get("quiet")
self.record_num = options.get("_count_per_processor")
self.options = options
self._benchmark_funcs = {}
default_funcs = ("insert", "search", "update", "delete")
for func_name in default_funcs:
func = getattr(self, func_name, None)
func_self = getattr(DbConnection, func_name, None)
if getattr(func, "__code__") != getattr(func_self, "__code__"):
self._benchmark_funcs[func.__name__] = func
for func in self.__class__.__dict__.values():
if getattr(func, "benchmark", None) is True:
self._benchmark_funcs[func.__name__] = getattr(self, func.__name__)
def connect(self):
"""must override"""
raise NotImplemented
def disconnect(self):
"""must override"""
raise NotImplemented
def insert(self, record):
raise NotImplemented
def search(self, record):
raise NotImplemented
def update(self, record):
raise NotImplemented
def delete(self, record):
raise NotImplemented
def set_up(self):
"""invoke before benchmark"""
raise NotImplemented
def tear_down(self):
"""invoke after benchmark"""
raise NotImplemented
@staticmethod
def benchmark(label=None):
""":param label, for echarts label"""
def _benchmark(func):
func.benchmark = True
func.label = label if label else func.__name__
return func
return _benchmark
def benchmark_funcs(self):
"""benchmark_funcs()->{func_name: func}"""
return self._benchmark_funcs
def _warm_up(self, record):
(k, v), index, last_index = record
return True
def __str__(self):
return "%d %s[%s] %s:%s" % \
(self.id, self.name, self.table, self.host, self.port)
class Data(object):
def __init__(self, size, range_l=10000, options=None):
self.__size = int(size)
self.size = int(size)
self.range_l = int(range_l)
self.options = options
self.__cursor = int(0)
self.reset()
def hook_reset(self):
pass
def hook_get_key_and_value(self, index):
return (None, None)
def reset(self):
self.__cursor = 0
self.hook_reset()
def next(self):
if self.__cursor >= self.__size:
raise StopIteration()
item = self.hook_get_key_and_value(self.__cursor)
self.__cursor += 1
return item
def __next__(self):
return self.next()
def __len__(self):
return self.__size
def __iter__(self):
return self
class DataRecord(Data):
def __init__(self, size, range_l=10000, options=None):
super(DataRecord, self).__init__(size, range_l, options)
def hook_get_key_and_value(self, index):
key = str(index + self.range_l)
return (key, key)
class DataRandom(DataRecord):
def __init__(self, size, range_l=10000, options=None):
self.__seed = range_l + size
self.__range_l = range_l
self.__range_r = range_l + size * 10
self.__value = str(datetime.datetime.now()) + " "
super(DataRandom, self).__init__(size, range_l, options)
def hook_get_key_and_value(self, index):
return (str(random.randint(self.__range_l, self.__range_r)),
self.__value + str(index))
def hook_reset(self):
random.seed(self.__seed)
class DataFile(DataRecord):
def __init__(self, size, range_l=10000, options=None):
super(DataFile, self).__init__(size, range_l, options)
file_name = options.get("file", None)
if file_name is None:
raise Exception("require option file")
with open(file_name, "r") as fp:
self.lines = fp.readlines()
self.size = len(self.lines)
self.key = str(datetime.datetime.now()) + " " + str(range_l) + " "
def hook_get_key_and_value(self, index):
return (self.key + str(index), self.lines[index % self.size])
def benchmark(theme, data, watch, func, func_hook, context):
failed_counter = 0
data.reset()
size = len(data)
last_index = size - 1
step = size / 10
next_level = 0
__func_get_kv = data.hook_get_key_and_value
__func_hook = func_hook
__context = context
watch.reset()
if __func_hook is not None:
for index in range(size):
kv = __func_get_kv(index)
record = (kv, index, last_index)
if not func(record):
failed_counter += 1
if index >= next_level:
__func_hook(theme, record, __context)
next_level += step
if next_level > last_index:
next_level = last_index
else:
for index in range(size):
kv = __func_get_kv(index)
if not func((kv, index, last_index)):
failed_counter += 1
watch.stop()
return failed_counter
class DbBench:
def __init__(self, connection, data, hook_func=None, context=None):
if not issubclass(type(connection), DbConnection):
raise TypeError("param 1 must be a instance of DbConnection's subclass ")
if not issubclass(type(data), Data):
raise TypeError("param 2 must be a instance of Data's subclass ")
self.__connected = False
self.conn = connection
self.conn.connect()
self.__connected = True
self.data = data
self.__hook_func = hook_func
self.__result = {}
self.__context = context
self.__warm_up = False
if connection.options.get("w", False) is False:
self.__warm_up = True
def __del__(self):
if self.__connected:
self.conn.disconnect()
def get_result(self):
return self.__result
def __test_func(self, func, theme):
watch = StopWatch()
__benchmark = benchmark
m = sys.modules.get('db_bench.DbBench', None)
if m and m.__file__.endswith(".so") and DataRecord == self.data.__class__:
import importlib
temp = importlib.import_module("db_bench.DbBenchCython")
__benchmark = temp.benchmark_cython
# warm up
if self.__warm_up is False:
self.__warm_up = True
__benchmark("warmup", self.data, watch, self.conn._warm_up, self.__hook_func, self.__context)
failed_counter = __benchmark(theme, self.data, watch, func, self.__hook_func, self.__context)
cost = max(float("%.3f" % watch.seconds_float()), 0.001)
self.__result[theme] = {}
stat = self.__result[theme]
size = len(self.data)
stat["label"] = getattr(func, "label", theme)
stat["sum"] = size
stat["cost"] = cost
stat["qps"] = float("%.3f" % (size / cost))
stat["fail"] = failed_counter
def benchmark(self):
funcs = DbConnection.benchmark_funcs(self.conn)
for name, func in funcs.items():
self.__test_func(func, name)
def process_func(msg, context):
id = int(msg)
multi_bar = context["bar"]
options = context["options"]
options.set("_id", id)
def progress_bar(theme, record, context):
bar, bar_index = context
cur_index, last_index = record[1:]
if bar.check(bar_index, cur_index + 1):
bar.print_bar(bar_index, cur_index + 1, "%d %s" % (bar_index + 1, theme))
if cur_index == last_index:
bar.reset(bar_index)
data_count = context["data_count"]
key_start = options.get("key_start")
data = context["data_class"](data_count, key_start + id * data_count, options)
bar_index = id - 1
semaphore = context["semaphore"]
queue_startline = context["queue_startline"]
conn_c = context["connection_class"]
connection = conn_c(options)
try:
if options.get("quiet") is True:
db_bench = DbBench(connection, data)
else:
db_bench = DbBench(connection, data,
hook_func=progress_bar, context=(multi_bar, bar_index))
multi_bar.reset(id)
queue_startline.put(id)
semaphore.acquire()
db_bench.benchmark()
context["queue"].put(db_bench.get_result(), True)
finally:
if db_bench:
del db_bench
del data
del connection
def multi_process_bench(options, connection_class, data_class=DataRecord):
if not isinstance(options, Options):
raise TypeError("param options must be a instance of Options")
if not issubclass(connection_class, DbConnection):
raise TypeError("param connection_class must be DbConnection's subclass ")
if not issubclass(data_class, Data):
raise TypeError("param data_class must be Data's subclass ")
processor_num = options.get("processor_num")
processor_num_max = options.get("processor_num_max")
record_num = options.get("record_num")
record_num_max = options.get("record_num_max")
if processor_num > processor_num_max:
processor_num = processor_num_max
print("processor_num to %d" % processor_num)
if record_num > record_num_max:
record_num = record_num_max
print ("change record_num to %d" % record_num)
count_per_processor = int(record_num / processor_num)
if count_per_processor <= 0:
print("count_per_processor is 0")
return
options.set("_id", 0)
def clear(func):
hook = connection_class.__dict__.get(func, None)
if hook is not None:
print("%s..." % func)
conn = connection_class(options)
conn.connect()
hook(conn)
conn.disconnect()
clear("set_up")
quiet = options.get("quiet")
if quiet:
bar = None
else:
bar = MultiBar(color=ColorPrint(36))
for i in range(processor_num):
bar.append_bar(ProgressBar(count_per_processor, "processor " + str(i)))
queue = Queue()
semaphore = Semaphore(processor_num)
options.set("_name", connection_class.__dict__.get("name", connection_class.__name__))
options.set("_count_per_processor", count_per_processor)
queue_startline = Queue()
context = {
"data_class": data_class,
"connection_class": connection_class,
"data_count": count_per_processor,
"bar": bar,
"lock": Lock(),
"queue": queue,
"queue_startline": queue_startline,
"semaphore": semaphore,
"options": copy.deepcopy(options)
}
pool = MultiProcess(processor_num, process_func, context, True)
# barrier lock
for i in range(processor_num):
semaphore.acquire()
for i in range(processor_num):
pool.process_msg(i + 1)
for i in range(processor_num):
queue_startline.get()
for i in range(processor_num):
semaphore.release()
pool.join()
clear("tear_down")
result = {
"stat": {},
"detail": [],
"dbinfo": {"type": options.get("_name"),
"host": options.get("host"),
"port": options.get("port"),
"table": options.get("table")}}
stat = result["stat"]
detail = result["detail"]
try:
for i in range(processor_num):
msg = queue.get(True, 1)
detail.append(copy.deepcopy(msg))
if len(stat) == 0:
result["stat"] = msg
stat = result["stat"]
continue
for k, v in msg.items():
target = stat[k]
target["fail"] += v["fail"]
target["sum"] += v["sum"]
target["cost"] = max(target["cost"], v["cost"])
except:
raise RuntimeError("benchmark lost, name: " + options.get("_name"))
if stat is not None:
for k, v in stat.items():
v["qps"] = int(v["sum"] / v["cost"])
print("%s %s" % (str(k), str(v)))
out_dir = options.get("out_dir")
if os.path.exists(out_dir) is False:
os.mkdir(out_dir)
with open("%s/benchmark_%s_%d_%d_%s.json" % (out_dir,
options.get("_name").replace("_", " "),
record_num,
processor_num,
options.get("tag", "tag")), "w") as fp:
fp.write(json.dumps(result, indent=2))
return result
class ConnectionExample(DbConnection):
def __init__(self, options):
super(ConnectionExample, self).__init__(options)
self.__client = None
def connect(self):
self.__client = {}
def disconnect(self):
self.__client = None
@DbConnection.benchmark(u"测试")
def null(self, record):
return True
def insert(self, record):
k, v = record[0]
self.__client[k] = v
return True
def search(self, record):
k, v = record[0]
self.__client[k] = v
return self.__client.get(k) == v
def update(self, record):
return self.search(record)
def delete(self, record):
k, v = record[0]
return self.__client.pop(k, None) is not None
def clear(self):
self.__client = {}
def example():
option = Options()
option.set("record_num", 100000)
option.set("processor_num", 2)
if option.parse_option() is False:
return
# option.set("quiet", True)
print(option)
result = multi_process_bench(option, ConnectionExample)
print(result)
if __name__ == "__main__":
example()
|
Last April, I went to Canada, and still haven’t posted any picture. However, during summer, I went on a three-week trip across Europe, visiting some of the main cities in the western countries. I took so many pictures that it took me a long time to sort them out. Here are some of my favourite ones, related to architecture. |
# Copyright (C) 2009-2011 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Marc Cluet <marc.cluet@canonical.com>
# Based on code by Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
"""
Mcollective
-----------
**Summary:** install, configure and start mcollective
This module installs, configures and starts mcollective. If the ``mcollective``
key is present in config, then mcollective will be installed and started.
Configuration for ``mcollective`` can be specified in the ``conf`` key under
``mcollective``. Each config value consists of a key value pair and will be
written to ``/etc/mcollective/server.cfg``. The ``public-cert`` and
``private-cert`` keys, if present in conf may be used to specify the public and
private certificates for mcollective. Their values will be written to
``/etc/mcollective/ssl/server-public.pem`` and
``/etc/mcollective/ssl/server-private.pem``.
.. note::
The ec2 metadata service is readable by non-root users.
If security is a concern, use include-once and ssl urls.
**Internal name:** ``cc_mcollective``
**Module frequency:** per instance
**Supported distros:** all
**Config keys**::
mcollective:
conf:
<key>: <value>
public-cert: |
-------BEGIN CERTIFICATE--------
<cert data>
-------END CERTIFICATE--------
private-cert: |
-------BEGIN CERTIFICATE--------
<cert data>
-------END CERTIFICATE--------
"""
import errno
import six
from six import BytesIO
# Used since this can maintain comments
# and doesn't need a top level section
from configobj import ConfigObj
from cloudinit import log as logging
from cloudinit import util
PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
SERVER_CFG = '/etc/mcollective/server.cfg'
LOG = logging.getLogger(__name__)
def configure(config, server_cfg=SERVER_CFG,
pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE):
# Read server.cfg (if it exists) values from the
# original file in order to be able to mix the rest up.
try:
old_contents = util.load_file(server_cfg, quiet=False, decode=False)
mcollective_config = ConfigObj(BytesIO(old_contents))
except IOError as e:
if e.errno != errno.ENOENT:
raise
else:
LOG.debug("Did not find file %s (starting with an empty"
" config)", server_cfg)
mcollective_config = ConfigObj()
for (cfg_name, cfg) in config.items():
if cfg_name == 'public-cert':
util.write_file(pubcert_file, cfg, mode=0o644)
mcollective_config[
'plugin.ssl_server_public'] = pubcert_file
mcollective_config['securityprovider'] = 'ssl'
elif cfg_name == 'private-cert':
util.write_file(pricert_file, cfg, mode=0o600)
mcollective_config[
'plugin.ssl_server_private'] = pricert_file
mcollective_config['securityprovider'] = 'ssl'
else:
if isinstance(cfg, six.string_types):
# Just set it in the 'main' section
mcollective_config[cfg_name] = cfg
elif isinstance(cfg, (dict)):
# Iterate through the config items, create a section if
# it is needed and then add/or create items as needed
if cfg_name not in mcollective_config.sections:
mcollective_config[cfg_name] = {}
for (o, v) in cfg.items():
mcollective_config[cfg_name][o] = v
else:
# Otherwise just try to convert it to a string
mcollective_config[cfg_name] = str(cfg)
try:
# We got all our config as wanted we'll copy
# the previous server.cfg and overwrite the old with our new one
util.copy(server_cfg, "%s.old" % (server_cfg))
except IOError as e:
if e.errno == errno.ENOENT:
# Doesn't exist to copy...
pass
else:
raise
# Now we got the whole (new) file, write to disk...
contents = BytesIO()
mcollective_config.write(contents)
util.write_file(server_cfg, contents.getvalue(), mode=0o644)
def handle(name, cfg, cloud, log, _args):
# If there isn't a mcollective key in the configuration don't do anything
if 'mcollective' not in cfg:
log.debug(("Skipping module named %s, "
"no 'mcollective' key in configuration"), name)
return
mcollective_cfg = cfg['mcollective']
# Start by installing the mcollective package ...
cloud.distro.install_packages(("mcollective",))
# ... and then update the mcollective configuration
if 'conf' in mcollective_cfg:
configure(config=mcollective_cfg['conf'])
# restart mcollective to handle updated config
util.subp(['service', 'mcollective', 'restart'], capture=False)
# vi: ts=4 expandtab
|
Georgian and face fix astragal style windows; for the traditional cottage style effect.
At Vevo we manufacture our Georgian style double glazed sealed units with an internal bar to give the effect (at a distance) of separate sealed units. This popular feature is always in demand as it combines stylish traditional looks, with an easy to clean, low maintenance finish.
Designed to replicate traditional timber windows, the Vevo Georgian astragal bar design goes one step further than our Georgian window. As well as incorporating spacer bars between the glass, bars are also raised from the glass to give the window an authentic cottage style look, making the astragal bar extremely popular, particularly with owners of older-style properties. |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import unittest
from pathlib import Path
import nbformat
import pandas as pd
import pytest
from nbconvert.preprocessors import CellExecutionError, ExecutePreprocessor
from msticpy.analysis.timeseries import timeseries_anomalies_stl
_NB_FOLDER = "docs/notebooks"
_NB_NAME = "TimeSeriesAnomaliesVisualization.ipynb"
_test_data_folders = [
d for d, _, _ in os.walk(os.getcwd()) if d.endswith("/docs/notebooks/data")
]
if len(_test_data_folders) == 1:
_TEST_DATA = _test_data_folders[0]
else:
_TEST_DATA = "./docs/notebooks/data"
class TestTimeSeries(unittest.TestCase):
"""Unit test class."""
def setUp(self):
input_file = os.path.join(_TEST_DATA, "TimeSeriesDemo.csv")
self.input_df = pd.read_csv(
input_file,
index_col=["TimeGenerated"],
usecols=["TimeGenerated", "TotalBytesSent"],
)
def test_timeseries_anomalies_stl(self):
out_df = timeseries_anomalies_stl(data=self.input_df)
self.assertIn("residual", out_df.columns)
self.assertIn("trend", out_df.columns)
self.assertIn("seasonal", out_df.columns)
self.assertIn("weights", out_df.columns)
self.assertIn("baseline", out_df.columns)
self.assertIn("score", out_df.columns)
self.assertIn("anomalies", out_df.columns)
self.assertGreater(len(out_df[out_df["anomalies"] == 1]), 0)
@pytest.mark.skipif(
not os.environ.get("MSTICPY_TEST_NOSKIP"), reason="Skipped for local tests."
)
def test_timeseries_controls(self):
nb_path = Path(_NB_FOLDER).joinpath(_NB_NAME)
abs_path = Path(_NB_FOLDER).absolute()
with open(nb_path) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name="python3")
try:
ep.preprocess(nb, {"metadata": {"path": abs_path}})
except CellExecutionError:
nb_err = str(nb_path).replace(".ipynb", "-err.ipynb")
msg = f"Error executing the notebook '{nb_path}'.\n"
msg += f"See notebook '{nb_err}' for the traceback."
print(msg)
with open(nb_err, mode="w", encoding="utf-8") as f:
nbformat.write(nb, f)
raise
|
Murdoch University experts specialise in Extractive Metallurgy, a technology that underpins Australia's mineral industry, existing and developing technologies, and are to operate the industrial plants for treating mineral ores to produce metals and mineral products.
Associate Professor Gamini Senanayake is a senior lecturer in Mineral Science at Murdoch University and an expert on the application of ionic activity data and speciation analysis to hydrometallurgical processes and the rationalisation of the thermodynamics and kinetics of the leaching of minerals and recovery of metals.
Associate Professor Senanayake’s project include: Catalysts for reactions of hydrometallurgical importance; high temperature hydrometallurgical thermodynamics; beneficiation and utilisation of red mud residues from alumina processing; mechanisms of gold leaching; ion salvation thermodynamics and applications, and; atmospheric leaching and recovery of metals from nickel laterite ores.
Dr Aleks Nikoloski is an expert in the electrochemistry of leaching and the reduction processes used in the hydrometallurgical treatment of metals and minerals.
He has significant experience in the metallurgical process development using pilot plant scale textwork, and investigates the kinetics and thermodynamics of metallurgical processes for the treatment of non-ferrous metals, in particular nickel, cobalt, copper, gold and the platinum group metals.
Dr Nikoloski teaches extractive metallurgy and supervises postgraduate research students at Murdoch University. |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for removing health checks from target pools."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.http_health_checks import (
flags as http_health_check_flags)
from googlecloudsdk.command_lib.compute.target_pools import flags
class RemoveHealthChecks(base_classes.NoOutputAsyncMutator):
"""Remove an HTTP health check from a target pool.
*{command}* is used to remove an HTTP health check
from a target pool. Health checks are used to determine
the health status of instances in the target pool. For more
information on health checks and load balancing, see
[](https://cloud.google.com/compute/docs/load-balancing-and-autoscaling/)
"""
HEALTH_CHECK_ARG = None
TARGET_POOL_ARG = None
@classmethod
def Args(cls, parser):
cls.HEALTH_CHECK_ARG = (
http_health_check_flags.HttpHealthCheckArgumentForTargetPool(
'remove from'))
cls.HEALTH_CHECK_ARG.AddArgument(parser)
cls.TARGET_POOL_ARG = flags.TargetPoolArgument(
help_suffix=' from which to remove the health check.')
cls.TARGET_POOL_ARG.AddArgument(
parser, operation_type='remove health checks from')
@property
def service(self):
return self.compute.targetPools
@property
def method(self):
return 'RemoveHealthCheck'
@property
def resource_type(self):
return 'targetPools'
def CreateRequests(self, args):
http_health_check_ref = self.HEALTH_CHECK_ARG.ResolveAsResource(
args, self.resources)
target_pool_ref = self.TARGET_POOL_ARG.ResolveAsResource(
args,
self.resources,
scope_lister=compute_flags.GetDefaultScopeLister(self.compute_client,
self.project))
request = self.messages.ComputeTargetPoolsRemoveHealthCheckRequest(
region=target_pool_ref.region,
project=self.project,
targetPool=target_pool_ref.Name(),
targetPoolsRemoveHealthCheckRequest=(
self.messages.TargetPoolsRemoveHealthCheckRequest(
healthChecks=[self.messages.HealthCheckReference(
healthCheck=http_health_check_ref.SelfLink())])))
return [request]
|
Hachiman finds himself coerced by his well-meaning student advisor into joining the one-member Service Club. There he encounters club founder Yukino Yukinoshita, a smart, attractive, walking superiority complex who looks down on the entire student body. These two negative personalities are quick to attract Yui Yuigahama, who's cute, bright, cheerful, and needs the Service Club's help to… bake cookies? Is this a recipe for romance or the precursor for a nuclear meltdown? Will there be cookies, nookie, or a reason for everyone to play hooky? |
from subprocess import call
import shutil
import simplejson
from collections import OrderedDict
import os
import sys
# this is almost done but there is something wrong with the
# updating of the package.json file
## Helpers
# performs git clone into a new directory
def _clone_mkdir(git_url, new_name):
hole_path = git_url + ' ' + new_name
call('git clone '+ hole_path, shell=True)
def _remove_git(new_name):
git_path = new_name + '/.git'
shutil.rmtree(git_path)
def _prep_json(path):
# overkill but I was having issues. The following steps load and clean up
# the package.json string before loading it into simplejson
json_file = open(path, 'r+')
f = json_file.read()
g = f.split('\n')
for i, item in enumerate(g):
print item
print item.strip()
g[i] = item.strip()
together = ''.join(g)
# load json into as an OrderedDict to retain original order
return simplejson.loads(together, object_pairs_hook=OrderedDict)
# object to collect appropriate data and to then use it
class Boil(object):
def _keywords(pack_keys):
if ',' in pack_keys:
return pack_keys.split(',')
else:
return pack_keys.split()
@classmethod
def git_clone(cls, git_url, new_name):
_clone_mkdir(git_url, new_name)
_remove_git(new_name)
@classmethod
def cleanup_packagejson(cls, new_name, author, description, version,
license, pack_keys):
# package.json path
pack_path = new_name + '/package.json'
data = _prep_json(pack_path)
# update feilds. Need to update keywords
data["name"] = new_name
data["author"] = author
data["description"] = description
data["version"] = version
data["license"] = license
data["keywords"] = self._keywords(pack_keys)
# convert OrderedDict into a json string
outfile = simplejson.dumps(data, indent=4)
# remove old package.json and create/write a new one
os.remove(pack_path)
new_pack = open(pack_path, 'w')
new_pack.write(outfile)
new_pack.close()
@classmethod
def remove_licence(cls, new_name):
license_path = new_name + '/LICENCE'
try:
os.remove(license_path)
except:
print('Something went wrong when removing the license! Can\'t tell what?')
sys.exit(0) # quit Python
@classmethod
def clean_readme(cls, new_name):
readme_path = new_name + '/README.md'
# readme_path = 'new-JS' + '/README.md'
try:
os.remove(readme_path)
readme = open(readme_path,'w')
readme.close()
except:
print('Something went wrong when updating the readme! Can\'t tell what?')
sys.exit(0) # quit Python
|
Thank you for getting the word out there. I wrote a book about my experiences with scammers, “Scambaiting for the Insane.” It’s available for the Amazon Kindle for about a dollar, which is the least they’ll let you charge for a book. I would give it away if I could.
I am scam victim i lost my money to a person who claim that he was from scotland uk ..he send me the package i paid all the custom fees worth Rs 500000 INR but still not deliver..can anyone help me .how can i get back my money i paid? |
from __future__ import division
from django.contrib.gis.geos import Point, fromstr
from mobiletrans.mtlocation import models
from mobiletrans.mtdistmap.cta_conn import load_transitnetwork
from mobiletrans.mtdistmap.transit_network import Path
def distance_to_time(distance, unit="m", units_per_min=60):
return_value = getattr(distance, unit) * (1 / units_per_min )
return return_value
class RoutePlanner(object):
def __init__(self, tn, unit="m", units_per_min=60, max_distance=1500, num_routes=2):
self.tn = tn
self.unit = unit
self.units_per_min = units_per_min
self.max_distance = max_distance
self.num_routes = num_routes
def get_distance_dict(self):
return {self.unit:self.max_distance}
def fastest_route_from_point(self, point, station_id):
distance_dict=self.get_distance_dict()
stations = models.TransitStop.objects.filter(location_type=1).get_closest_x(point, distance_dict, number=self.num_routes )
paths = []
for station in stations:
path = self.tn.shortest_path(str(station.stop_id), station_id)
if path:
walking_distance = station.distance
walking_time = distance_to_time(walking_distance, self.unit, self.max_distance)
new_path = Path(self.tn,
["walk_%s" % (walking_time)] + path.stops,
path.total_time + walking_time )
paths.append(new_path)
sorted(paths, key=lambda x:x.total_time)
return paths
"""
from mobiletrans.mtdistmap.cta_conn import load_transitnetwork
from mobiletrans.mtdistmap.route_planner import RoutePlanner
from django.contrib.gis.geos import Point, fromstr
from_point = fromstr('POINT(%s %s)' % ("-87.66638826", "41.96182144"))
#from_point = GPlace.objects.get(name__icontains='Precious Blood Church')
tn = load_transitnetwork()
t = RoutePlanner(tn)
p = t.fastest_route_from_point(from_point, '41320')
"""
|
Happy to announce our new project, “Paratus Rock View”. Best facilities layout connecting to Airport Road (Devanahalli in just 35kms.) around Bangalore, Narasapura to Vemagal Road, Near Narasapura Industrial Area Kolar taluk.
Situated Near the Most awaited Narasapura Industrial Hub, an exclusive locality & a great plan for investment, “Paratus Rock View” has already become one of the most preferred residential destinations for the upwardly mobile. Planned as an oasis of peace and quiet, “Paratus Rock View” assures relief from the stress of modern day living. Back into the comfort and healing arms of Mother Nature, these aesthetically designed bungalows fulfill all the demands of form and function.
Very close to the Japanese bullet train project.
Just 38kms to Bangalore city. |
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module defines Unicode character categories and blocks.
"""
from sys import maxunicode
from collections.abc import Iterable, MutableSet
from .unicode_categories import RAW_UNICODE_CATEGORIES
from .codepoints import code_point_order, code_point_repr, iter_code_points, get_code_point_range
class RegexError(Exception):
"""
Error in a regular expression or in a character class specification.
This exception is derived from `Exception` base class and is raised
only by the regex subpackage.
"""
def iterparse_character_subset(s, expand_ranges=False):
"""
Parses a regex character subset, generating a sequence of code points
and code points ranges. An unescaped hyphen (-) that is not at the
start or at the and is interpreted as range specifier.
:param s: a string representing the character subset.
:param expand_ranges: if set to `True` then expands character ranges.
:return: yields integers or couples of integers.
"""
escaped = False
on_range = False
char = None
length = len(s)
subset_index_iterator = iter(range(len(s)))
for k in subset_index_iterator:
if k == 0:
char = s[0]
if char == '\\':
escaped = True
elif char in r'[]' and length > 1:
raise RegexError("bad character %r at position 0" % char)
elif expand_ranges:
yield ord(char)
elif length <= 2 or s[1] != '-':
yield ord(char)
elif s[k] == '-':
if escaped or (k == length - 1):
char = s[k]
yield ord(char)
escaped = False
elif on_range:
char = s[k]
yield ord(char)
on_range = False
else:
# Parse character range
on_range = True
k = next(subset_index_iterator)
end_char = s[k]
if end_char == '\\' and (k < length - 1):
if s[k + 1] in r'-|.^?*+{}()[]':
k = next(subset_index_iterator)
end_char = s[k]
elif s[k + 1] in r'sSdDiIcCwWpP':
msg = "bad character range '%s-\\%s' at position %d: %r"
raise RegexError(msg % (char, s[k + 1], k - 2, s))
if ord(char) > ord(end_char):
msg = "bad character range '%s-%s' at position %d: %r"
raise RegexError(msg % (char, end_char, k - 2, s))
elif expand_ranges:
yield from range(ord(char) + 1, ord(end_char) + 1)
else:
yield ord(char), ord(end_char) + 1
elif s[k] in r'|.^?*+{}()':
if escaped:
escaped = False
on_range = False
char = s[k]
yield ord(char)
elif s[k] in r'[]':
if not escaped and length > 1:
raise RegexError("bad character %r at position %d" % (s[k], k))
escaped = on_range = False
char = s[k]
if k >= length - 2 or s[k + 1] != '-':
yield ord(char)
elif s[k] == '\\':
if escaped:
escaped = on_range = False
char = '\\'
yield ord(char)
else:
escaped = True
else:
if escaped:
escaped = False
yield ord('\\')
on_range = False
char = s[k]
if k >= length - 2 or s[k + 1] != '-':
yield ord(char)
if escaped:
yield ord('\\')
class UnicodeSubset(MutableSet):
"""
Represents a subset of Unicode code points, implemented with an ordered list of
integer values and ranges. Codepoints can be added or discarded using sequences
of integer values and ranges or with strings equivalent to regex character set.
:param codepoints: a sequence of integer values and ranges, another UnicodeSubset \
instance ora a string equivalent of a regex character set.
"""
__slots__ = '_codepoints',
def __init__(self, codepoints=None):
if not codepoints:
self._codepoints = list()
elif isinstance(codepoints, list):
self._codepoints = sorted(codepoints, key=code_point_order)
elif isinstance(codepoints, UnicodeSubset):
self._codepoints = codepoints.codepoints.copy()
else:
self._codepoints = list()
self.update(codepoints)
@property
def codepoints(self):
return self._codepoints
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, str(self))
def __str__(self):
return ''.join(code_point_repr(cp) for cp in self._codepoints)
def copy(self):
return self.__copy__()
def __copy__(self):
return UnicodeSubset(self._codepoints)
def __reversed__(self):
for item in reversed(self._codepoints):
if isinstance(item, int):
yield item
else:
yield from reversed(range(item[0], item[1]))
def complement(self):
last_cp = 0
for cp in self._codepoints:
if isinstance(cp, int):
cp = cp, cp + 1
diff = cp[0] - last_cp
if diff > 2:
yield last_cp, cp[0]
elif diff == 2:
yield last_cp
yield last_cp + 1
elif diff == 1:
yield last_cp
elif diff:
raise ValueError("unordered code points found in {!r}".format(self))
last_cp = cp[1]
if last_cp < maxunicode:
yield last_cp, maxunicode + 1
elif last_cp == maxunicode:
yield maxunicode
def iter_characters(self):
return map(chr, self.__iter__())
#
# MutableSet's abstract methods implementation
def __contains__(self, value):
if not isinstance(value, int):
try:
value = ord(value)
except TypeError:
return False
for cp in self._codepoints:
if not isinstance(cp, int):
if cp[0] > value:
return False
elif cp[1] <= value:
continue
else:
return True
elif cp > value:
return False
elif cp == value:
return True
return False
def __iter__(self):
for cp in self._codepoints:
if isinstance(cp, int):
yield cp
else:
yield from range(*cp)
def __len__(self):
k = 0
for _ in self:
k += 1
return k
def update(self, *others):
for value in others:
if isinstance(value, str):
for cp in iter_code_points(iterparse_character_subset(value), reverse=True):
self.add(cp)
else:
for cp in iter_code_points(value, reverse=True):
self.add(cp)
def add(self, value):
try:
start_value, end_value = get_code_point_range(value)
except TypeError:
raise ValueError("{!r} is not a Unicode code point value/range".format(value))
code_points = self._codepoints
last_index = len(code_points) - 1
for k, cp in enumerate(code_points):
if isinstance(cp, int):
cp = cp, cp + 1
if end_value < cp[0]:
code_points.insert(k, value if isinstance(value, int) else tuple(value))
elif start_value > cp[1]:
continue
elif end_value > cp[1]:
if k == last_index:
code_points[k] = min(cp[0], start_value), end_value
else:
next_cp = code_points[k + 1]
higher_bound = next_cp if isinstance(next_cp, int) else next_cp[0]
if end_value <= higher_bound:
code_points[k] = min(cp[0], start_value), end_value
else:
code_points[k] = min(cp[0], start_value), higher_bound
start_value = higher_bound
continue
elif start_value < cp[0]:
code_points[k] = start_value, cp[1]
break
else:
self._codepoints.append(tuple(value) if isinstance(value, list) else value)
def difference_update(self, *others):
for value in others:
if isinstance(value, str):
for cp in iter_code_points(iterparse_character_subset(value), reverse=True):
self.discard(cp)
else:
for cp in iter_code_points(value, reverse=True):
self.discard(cp)
def discard(self, value):
try:
start_cp, end_cp = get_code_point_range(value)
except TypeError:
raise ValueError("{!r} is not a Unicode code point value/range".format(value))
code_points = self._codepoints
for k in reversed(range(len(code_points))):
cp = code_points[k]
if isinstance(cp, int):
cp = cp, cp + 1
if start_cp >= cp[1]:
break
elif end_cp >= cp[1]:
if start_cp <= cp[0]:
del code_points[k]
elif start_cp - cp[0] > 1:
code_points[k] = cp[0], start_cp
else:
code_points[k] = cp[0]
elif end_cp > cp[0]:
if start_cp <= cp[0]:
if cp[1] - end_cp > 1:
code_points[k] = end_cp, cp[1]
else:
code_points[k] = cp[1] - 1
else:
if cp[1] - end_cp > 1:
code_points.insert(k + 1, (end_cp, cp[1]))
else:
code_points.insert(k + 1, cp[1] - 1)
if start_cp - cp[0] > 1:
code_points[k] = cp[0], start_cp
else:
code_points[k] = cp[0]
#
# MutableSet's mixin methods override
def clear(self):
del self._codepoints[:]
def __eq__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
elif isinstance(other, UnicodeSubset):
return self._codepoints == other._codepoints
else:
return self._codepoints == other
def __ior__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
elif isinstance(other, UnicodeSubset):
other = reversed(other._codepoints)
elif isinstance(other, str):
other = reversed(UnicodeSubset(other)._codepoints)
else:
other = iter_code_points(other, reverse=True)
for cp in other:
self.add(cp)
return self
def __isub__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
elif isinstance(other, UnicodeSubset):
other = reversed(other._codepoints)
elif isinstance(other, str):
other = reversed(UnicodeSubset(other)._codepoints)
else:
other = iter_code_points(other, reverse=True)
for cp in other:
self.discard(cp)
return self
def __sub__(self, other):
obj = self.copy()
return obj.__isub__(other)
__rsub__ = __sub__
def __iand__(self, other):
for value in (self - other):
self.discard(value)
return self
def __ixor__(self, other):
if other is self:
self.clear()
return self
elif not isinstance(other, Iterable):
return NotImplemented
elif not isinstance(other, UnicodeSubset):
other = UnicodeSubset(other)
for value in other:
if value in self:
self.discard(value)
else:
self.add(value)
return self
UNICODE_CATEGORIES = {k: UnicodeSubset(v) for k, v in RAW_UNICODE_CATEGORIES.items()}
# See http://www.unicode.org/Public/UNIDATA/Blocks.txt
UNICODE_BLOCKS = {
'IsBasicLatin': UnicodeSubset('\u0000-\u007F'),
'IsLatin-1Supplement': UnicodeSubset('\u0080-\u00FF'),
'IsLatinExtended-A': UnicodeSubset('\u0100-\u017F'),
'IsLatinExtended-B': UnicodeSubset('\u0180-\u024F'),
'IsIPAExtensions': UnicodeSubset('\u0250-\u02AF'),
'IsSpacingModifierLetters': UnicodeSubset('\u02B0-\u02FF'),
'IsCombiningDiacriticalMarks': UnicodeSubset('\u0300-\u036F'),
'IsGreek': UnicodeSubset('\u0370-\u03FF'),
'IsCyrillic': UnicodeSubset('\u0400-\u04FF'),
'IsArmenian': UnicodeSubset('\u0530-\u058F'),
'IsHebrew': UnicodeSubset('\u0590-\u05FF'),
'IsArabic': UnicodeSubset('\u0600-\u06FF'),
'IsSyriac': UnicodeSubset('\u0700-\u074F'),
'IsThaana': UnicodeSubset('\u0780-\u07BF'),
'IsDevanagari': UnicodeSubset('\u0900-\u097F'),
'IsBengali': UnicodeSubset('\u0980-\u09FF'),
'IsGurmukhi': UnicodeSubset('\u0A00-\u0A7F'),
'IsGujarati': UnicodeSubset('\u0A80-\u0AFF'),
'IsOriya': UnicodeSubset('\u0B00-\u0B7F'),
'IsTamil': UnicodeSubset('\u0B80-\u0BFF'),
'IsTelugu': UnicodeSubset('\u0C00-\u0C7F'),
'IsKannada': UnicodeSubset('\u0C80-\u0CFF'),
'IsMalayalam': UnicodeSubset('\u0D00-\u0D7F'),
'IsSinhala': UnicodeSubset('\u0D80-\u0DFF'),
'IsThai': UnicodeSubset('\u0E00-\u0E7F'),
'IsLao': UnicodeSubset('\u0E80-\u0EFF'),
'IsTibetan': UnicodeSubset('\u0F00-\u0FFF'),
'IsMyanmar': UnicodeSubset('\u1000-\u109F'),
'IsGeorgian': UnicodeSubset('\u10A0-\u10FF'),
'IsHangulJamo': UnicodeSubset('\u1100-\u11FF'),
'IsEthiopic': UnicodeSubset('\u1200-\u137F'),
'IsCherokee': UnicodeSubset('\u13A0-\u13FF'),
'IsUnifiedCanadianAboriginalSyllabics': UnicodeSubset('\u1400-\u167F'),
'IsOgham': UnicodeSubset('\u1680-\u169F'),
'IsRunic': UnicodeSubset('\u16A0-\u16FF'),
'IsKhmer': UnicodeSubset('\u1780-\u17FF'),
'IsMongolian': UnicodeSubset('\u1800-\u18AF'),
'IsLatinExtendedAdditional': UnicodeSubset('\u1E00-\u1EFF'),
'IsGreekExtended': UnicodeSubset('\u1F00-\u1FFF'),
'IsGeneralPunctuation': UnicodeSubset('\u2000-\u206F'),
'IsSuperscriptsandSubscripts': UnicodeSubset('\u2070-\u209F'),
'IsCurrencySymbols': UnicodeSubset('\u20A0-\u20CF'),
'IsCombiningMarksforSymbols': UnicodeSubset('\u20D0-\u20FF'),
'IsLetterlikeSymbols': UnicodeSubset('\u2100-\u214F'),
'IsNumberForms': UnicodeSubset('\u2150-\u218F'),
'IsArrows': UnicodeSubset('\u2190-\u21FF'),
'IsMathematicalOperators': UnicodeSubset('\u2200-\u22FF'),
'IsMiscellaneousTechnical': UnicodeSubset('\u2300-\u23FF'),
'IsControlPictures': UnicodeSubset('\u2400-\u243F'),
'IsOpticalCharacterRecognition': UnicodeSubset('\u2440-\u245F'),
'IsEnclosedAlphanumerics': UnicodeSubset('\u2460-\u24FF'),
'IsBoxDrawing': UnicodeSubset('\u2500-\u257F'),
'IsBlockElements': UnicodeSubset('\u2580-\u259F'),
'IsGeometricShapes': UnicodeSubset('\u25A0-\u25FF'),
'IsMiscellaneousSymbols': UnicodeSubset('\u2600-\u26FF'),
'IsDingbats': UnicodeSubset('\u2700-\u27BF'),
'IsBraillePatterns': UnicodeSubset('\u2800-\u28FF'),
'IsCJKRadicalsSupplement': UnicodeSubset('\u2E80-\u2EFF'),
'IsKangxiRadicals': UnicodeSubset('\u2F00-\u2FDF'),
'IsIdeographicDescriptionCharacters': UnicodeSubset('\u2FF0-\u2FFF'),
'IsCJKSymbolsandPunctuation': UnicodeSubset('\u3000-\u303F'),
'IsHiragana': UnicodeSubset('\u3040-\u309F'),
'IsKatakana': UnicodeSubset('\u30A0-\u30FF'),
'IsBopomofo': UnicodeSubset('\u3100-\u312F'),
'IsHangulCompatibilityJamo': UnicodeSubset('\u3130-\u318F'),
'IsKanbun': UnicodeSubset('\u3190-\u319F'),
'IsBopomofoExtended': UnicodeSubset('\u31A0-\u31BF'),
'IsEnclosedCJKLettersandMonths': UnicodeSubset('\u3200-\u32FF'),
'IsCJKCompatibility': UnicodeSubset('\u3300-\u33FF'),
'IsCJKUnifiedIdeographsExtensionA': UnicodeSubset('\u3400-\u4DB5'),
'IsCJKUnifiedIdeographs': UnicodeSubset('\u4E00-\u9FFF'),
'IsYiSyllables': UnicodeSubset('\uA000-\uA48F'),
'IsYiRadicals': UnicodeSubset('\uA490-\uA4CF'),
'IsHangulSyllables': UnicodeSubset('\uAC00-\uD7A3'),
'IsHighSurrogates': UnicodeSubset('\uD800-\uDB7F'),
'IsHighPrivateUseSurrogates': UnicodeSubset('\uDB80-\uDBFF'),
'IsLowSurrogates': UnicodeSubset('\uDC00-\uDFFF'),
'IsPrivateUse': UnicodeSubset('\uE000-\uF8FF\U000F0000-\U000FFFFF\U00100000-\U0010FFFF'),
'IsCJKCompatibilityIdeographs': UnicodeSubset('\uF900-\uFAFF'),
'IsAlphabeticPresentationForms': UnicodeSubset('\uFB00-\uFB4F'),
'IsArabicPresentationForms-A': UnicodeSubset('\uFB50-\uFDFF'),
'IsCombiningHalfMarks': UnicodeSubset('\uFE20-\uFE2F'),
'IsCJKCompatibilityForms': UnicodeSubset('\uFE30-\uFE4F'),
'IsSmallFormVariants': UnicodeSubset('\uFE50-\uFE6F'),
'IsArabicPresentationForms-B': UnicodeSubset('\uFE70-\uFEFE'),
'IsSpecials': UnicodeSubset('\uFEFF\uFFF0-\uFFFD'),
'IsHalfwidthandFullwidthForms': UnicodeSubset('\uFF00-\uFFEF'),
'IsOldItalic': UnicodeSubset('\U00010300-\U0001032F'),
'IsGothic': UnicodeSubset('\U00010330-\U0001034F'),
'IsDeseret': UnicodeSubset('\U00010400-\U0001044F'),
'IsByzantineMusicalSymbols': UnicodeSubset('\U0001D000-\U0001D0FF'),
'IsMusicalSymbols': UnicodeSubset('\U0001D100-\U0001D1FF'),
'IsMathematicalAlphanumericSymbols': UnicodeSubset('\U0001D400-\U0001D7FF'),
'IsCJKUnifiedIdeographsExtensionB': UnicodeSubset('\U00020000-\U0002A6D6'),
'IsCJKCompatibilityIdeographsSupplement': UnicodeSubset('\U0002F800-\U0002FA1F'),
'IsTags': UnicodeSubset('\U000E0000-\U000E007F'),
}
UNICODE_BLOCKS['IsPrivateUse'].update('\U000F0000-\U0010FFFD'),
def unicode_subset(name):
if name.startswith('Is'):
try:
return UNICODE_BLOCKS[name]
except KeyError:
raise RegexError("%r doesn't match to any Unicode block." % name)
else:
try:
return UNICODE_CATEGORIES[name]
except KeyError:
raise RegexError("%r doesn't match to any Unicode category." % name)
|
A walk down Azalea Way at the University of Washington’s arboretum continues to be spectacular. Several late blooming cherry varieties are now in full bloom. One had such delicate blooms and another had very fluffy blossoms. The Magnolias are open now and make for quite the showy tree. Rhododendrons are just beginning to open. I found one with spectacular flowers in large clumps on the hybrid trail. It is definitely worth a visit.
I went up to Mt Vernon in Skagit County to see if the Tulips were beginning to bloom. Tulips are a major agricultural product for this part of Washington. This area grows three-quarters of the tulip bulbs produced in the United States. The rich organic soil of the Skagit plain is good for their growth. I found fields stretching as far as the eye could see with wonderful colors. Reds, purples, pinks stretched in long rows. It was lightly raining while I was there which created perfect light for photography. I found some wonderful specimens to capture. You can see some more at http://thomasbancroft.photoshelter.com/gallery/Tulips-in-Skagit-County/G0000zx9niJPQpv0/C0000.fXuY8bBxag. If you have a chance, it is quite the sight to see drive up to see these fields while they are in full bloom.
I had some fun, making abstract art with some of the designs formed by the mixed colors.
I was excited to spot a Trillium under a tree on the hill to the east side of Azalea Way in the UW Arboretum. This sighting flooded me with fond memories of searching for Trilliums in Pennsylvania with my mother and sisters. Finding this showy 3-petal flower confirms that spring is here and we can rejoice that more flowers are on their way. For them everything is in threes; petals and leaves. My family use to take long walks through the hollow on our farm to look for Trilliiums and see what other flowers might be coming soon. Seeing this flower in Seattle was wonderful for me. The Pacific Northwest’s Trillium is larger than Trilliums in western Pennsylvania. The plant I saw was 18-20 inches tall and the flower was at least 2 inches across. This one was starting to show a little pink in the middle indicating that it had been open for a while. They gradually develop some pink as they age. One-flower blooms each year on an individual plant and it is really a beauty. White-tailed Deer in the east have really decimated trilliums. Deer repeatedly eat them to the ground and eventually the plants die. It looks like trilliums are doing well at the arboretum.
The Pacific Northwest Trillium is common in woods throughout western Washington and Oregon. I saw a number of others as I strolled through the Arboretum. Look for them in the next few weeks if you are out enjoying wildlands.
On Wednesday, I took a late afternoon walk through Magnuson Park. The trees were beginning to bloom. Some of the willows had beautiful flowers opening along their branches. They looked so delicate in the late afternoon sun. New leaves were beginning to emerge from the branches; soon the area will be green with fresh leaves. Several cherry trees had flowers and other species were beginning to flower too.
I spotted a small flock of bushtits feeding in the bushes along the trail. These birds are incredible acrobats, hanging upside down to feed and working all sides of branches to find small insects, spiders and other tasty morsels. They would flip rapidly from one branch to another jumping along branches to see what was present. Some individuals were very brown while others had more yellow on them. One individual seemed to be itching the side of its head on a branch, first on one side and then the other. These birds traveled in flocks up to 40 or more individuals. There were at least a dozen in this group. In some parts of their range, a breeding pair will have helpers at the nest. These may be young from previous years or non-breeding individuals. They build a hanging nest that is completely enclosed. The nest and a safe nesting site is a valuable commodity for this species and scientists think that having more than a pair at a nest may improve reproductive success. Nesting should start soon in Washington.
I saw a Mourning Cloak butterfly crisscrossing the field too. Mourning Cloaks are different than most butterflies in that adults over winter in Washington in cavities or under bark crevices. This gives them an advantage in spring in that they can emerge, mate, and immediately begin laying eggs. Willows are one of their favorite foods for the larva and this strategy allows the first brood to feed on the newly emerging and tender leaves.
I will definitely return to Magnuson Park to see what happens latter this spring.
The cherry trees in the UW Arboretum are in full bloom right now. It was lightly raining while I walked along Azalea Way and the soft light made the cherry blossoms really glow. Light rain and cloud cover creates a beautiful light for viewing flowers and really appreciating the intensity of colors. The arboretum has a number of varieties of cherries and each is slightly different in their flowers and tree shape. The twisted trunks of some trees created an intricate design with their knobs and blanket of moss. A dusting of blossoms floated to the ground under a few of the trees. Up close, the blossoms were just exquisite to study and the perfume of some trees was strong and sweet. I stood for several minutes under many to just enjoy the ambiance of the moment. A walk now through the arboretum is well worth your time. Soon the azaleas and rhododendrons will be blooming.
The chickadees and juncos were singing away as I strolled along the path. Several robins were also telling the world they had staked out territories for the coming breeding season. I watch several crows work the lawn for grubs and worms. The place was alive with activity and showing signs that spring is here.
Take a walk in the woods to see what spring offers. |
# -*- coding: utf-8 -*-
__author__ = 'nickl-'
__all__ = ('Npm', )
from string import strip
from re import match, sub
from aero.__version__ import __version__,enc
from .base import BaseAdapter
class Npm(BaseAdapter):
"""
Node package manager adapter.
"""
def search(self, query):
response = self.command('search -q', query)[0].decode(*enc)
lst = list(
self.__parse_search(line) for line in response.splitlines()
if 'npm http' not in line and not bool(match(
'^NAME\s+DESCRIPTION\s+AUTHOR\s+DATE\s+KEYWORDS', line
))
)
if lst:
return dict([(k, v) for k, v in lst if k != 0])
return {}
def __parse_search(self, result):
r = match(
'^([A-Za-z0-9\-]*)\s+(\w.*)=(.+)\s+(\d\d\d\d[\d\-: ]*)\s*?(\w?.*?)$',
result
)
if r and len(r.groups()) == 5:
r = map(strip, list(r.groups()))
pkg = self.package_name(r.pop(0))
return pkg, r[2] + '\n' + r[0]
return 0, 0
def install(self, query):
return self.shell('install', query, ['--global'])
def info(self, query):
response = self.command('view', query)[0].decode(*enc)
try:
import json
r = json.loads(sub("'", '"', sub('\s(\w+):', r' "\1":', response.strip())))
response = []
for k in sorted(r):
if isinstance(r[k], dict):
r[k] = '\n'.join([': '.join(list(l)) for l in r[k].items()])
elif isinstance(r[k], list):
r[k] = ', '.join(r[k])
if r[k]:
response.append((k, str(r[k])))
return response
except ValueError:
return ['Aborted: No info available']
|
The Original Myron Cope Terrible Towel with the Holiday print. Get yours today for the next game or tailgate.
Measures 25 x 15 inches. |
"""
this file contains the definitions of the functions used to answer the questions of
exercise 2.1 of PLASMA DIAGNOSTICS
to execute these functions, one first needs to
import a few libraries as listed at the beginning of the file run_ex.py
as well as this file.
the functions can then be called as explained in the file run_ex.py
the functions defined here can be modified.
In that case, is is necessary, before using the modified version,
to do "reload(ex1_1)" from the terminal
sometimes this does not work well.
it is then recommended to quit python (ctrl D) and enter again (ipython --pylab)
"""
import numpy as np
import matplotlib.pyplot as plt
import pyneb as pn
from pyneb.utils.misc import parseAtom
#pn.atomicData.setDataFile('cl_iii_atom_M83-KS86.fits')
def p1(ion):
# split ion into elem and spec, e.g 'O3' into 'O' and 3
elem, spec = parseAtom(ion)
# instanciate the corresponding Atom object
atom = pn.Atom(elem, spec)
# print information including transition probabilities
#atom.printIonic(printA = True)
# prepare a new figure
plt.figure()
# plot energy levels
atom.plotGrotrian()
def p2(diag):
# get the ion, and diagnostic description from the dictionary:
ion, diag_eval, err = pn.diags_dict[diag]
# split ion into elem and spec, e.g 'O3' into 'O' and 3
elem, spec = parseAtom(ion)
# prepare a new figure
plt.figure()
# create a grid of emissivities
#NB: one can use a pypic file containing all the emissivities, if already made
# in that case set restore_file to the name of the pypic file.
grid = pn.EmisGrid(elem, spec, restore_file=None, OmegaInterp='Linear')
# plot the contours
grid.plotContours(to_eval=diag_eval, low_level=None, high_level=None, n_levels=20,
linestyles='-', clabels=True, log_levels=True,
title='{0} {1}'.format(ion, diag_eval))
# save the plot into pdf files
plt.savefig('{0}_{1}.pdf'.format(ion, diag_eval.replace('/', '_')))
# the following is to plot all the possible diagnostic ratiios available in pyneb
def plot_all(save=False):
pn.log_.level=1
AA = pn.getAtomDict(OmegaInterp='Linear')
for diag in pn.diags_dict:
atom, diag_eval, err = pn.diags_dict[diag]
if atom in AA:
plt.figure()
grid = pn.EmisGrid(atomObj=AA[atom])
grid.plotContours(to_eval=diag_eval)
if save:
plt.savefig('{0}_{1}.pdf'.format(atom, diag_eval.replace('/', '_')))
|
Members of the Brighouse Ludenscheid Society took a trip to see their neighbouring town and take part in the Stadtfest festival.
Nine residents organised a stall in front of the town hall and took the pig race game with them which is popular at the Brighouse Charity Gala.
The link between Brighouse and Lüdenscheid was founded as long ago as 1950 by the Brighouse Children’s Theatre which exchanged visits with a similar group in Lüdenscheid. The link was strengthened by the civic twinning charter which was signed in 1960. |
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from goose.images.image import Image
import re
class ImageExtractor(object):
def __init__(self, config, article):
self.article = article
self.config = config
self.parser = self.config.get_parser()
def get_images(self, top_node):
return self.get_opengraph_tags() + self.get_content_images(top_node)
def get_opengraph_tags(self):
node = self.article.raw_doc
meta = self.parser.getElementsByTag(node, tag='meta', attr='property', value='og:image')
images = []
for item in meta:
if self.parser.getAttribute(item, attr='property') == 'og:image':
src = self.parser.getAttribute(item, attr='content')
if src:
images.append(self.from_image_node_to_image(item, src))
return images
def get_content_images(self, top_node):
images = []
image_nodes = self.parser.getElementsByTag(top_node, tag='img')
for image_node in image_nodes:
image = self.from_image_node_to_image(image_node)
images.append(image)
return images
def from_image_node_to_image(self, image_node, src=None):
image = Image()
if src:
image.src = src
else:
image.src = self.parser.getAttribute(image_node, 'src')
image.width = self.size_to_int(image_node, 'width')
image.height = self.size_to_int(image_node, 'height')
return image
def size_to_int(self, image_node, attribute_name):
size = self.parser.getAttribute(image_node, attribute_name)
if size is None:
return None
digits_only = re.sub("\D", "", size)
if len(digits_only) is 0:
return None
return int(digits_only)
|
How to start Pixel Streaming on UE4?
Why is Mesh Streaming Distance not working correctly with scaled meshes?
What does "LogParticles: Warning: InitializeSystem called on an unregistered component." mean ? |
#!/opt/rh/python27/root/usr/bin/python
# -*- coding: utf-8 -*-
r"""
Program name: parse_new_ids.py
Author: Bernardo Gomez/Alex Cooper
Date: june, 2016
Purpose:
"""
import os
import sys
import re
import requests
import xml.etree.ElementTree as elementTree
def get_item_info(result_node,id_list):
outcome=1
try:
rows=result_node.findall("Row")
except:
sys.stderr.write("couldn't find Rows."+"\n")
return id_list,outcome
mms_id=""
item_creation=""
item_modification=""
item_status=""
timestamp=""
process_type=""
receiving_date=""
barcode=""
holding_id=""
item_id=""
for this_row in rows:
item_row=""
try:
this_node=this_row.find("Column1")
mms_id=str(this_node.text)
except:
sys.stderr.write("couldn't find Column1."+"\n")
return id_list,outcome
try:
this_node=this_row.find("Column2")
active_date=str(this_node.text)
except:
sys.stderr.write("couldn't find Column2."+"\n")
return id_list,outcome
item_row=str(mms_id)
id_list.append(item_row)
return id_list,0
def get_record_ids(result_node,id_list):
outcome=1
try:
rows=result_node.findall("Row")
except:
sys.stderr.write("couldn't find Rows."+"\n")
return id_list,outcome
for this_row in rows:
try:
id_node=this_row.find("Column3")
id_list.append(str(id_node.text))
except:
sys.stderr.write("couldn't find Column3."+"\n")
return id_list,outcome
return id_list,0
def main():
if len(sys.argv) < 2:
sys.stderr.write("system failure. configuration file is missing."+"\n")
return 1
try:
configuration=open(sys.argv[1], 'Ur')
except:
sys.stderr.write("couldn't open configuration file "+sys.argv[1]+"\n")
return 1
pat=re.compile("(.*?)=(.*)")
for line in configuration:
line=line.rstrip("\n")
m=pat.match(line)
if m:
if m.group(1) == "url":
url=m.group(2)
if m.group(1) == "path":
path=m.group(2)
if m.group(1) == "apikey":
apikey=m.group(2)
if m.group(1) == "limit":
limit=m.group(2)
configuration.close()
in_string=""
outcome=1
payload={'apikey':apikey,'path':path,'limit':limit}
try:
r=requests.get(url,params=payload)
except:
sys.stderr.write("api request failed."+"\n")
return [],outcome
return_code=r.status_code
if return_code == 200:
response=r.content
else:
sys.stderr.write("FAILED(1)\n")
response=r.content
sys.stderr.write(str(response)+"\n")
return 1
in_string=response
in_string=in_string.replace("\n","")
in_string=in_string.replace(" xmlns=\"urn:schemas-microsoft-com:xml-analysis:rowset\"","")
try:
tree=elementTree.fromstring(in_string)
except:
sys.stderr.write("parse failed(1)."+"\n")
return outcome
try:
finished=tree.find("QueryResult/IsFinished")
except:
sys.stderr.write("parse failed(2)."+"\n")
return outcome
id_list=[]
if finished.text == "false":
try:
token=tree.find("QueryResult/ResumptionToken")
except:
sys.stderr.write("parse failed(3)."+"\n")
return outcome
this_token=str(token.text)
id_list=[]
sys.stderr.write(str(url)+" "+str(apikey)+" "+this_token+" "+str(id_list)+" "+limit+"\n")
try:
result_node=tree.find("QueryResult/ResultXml/rowset")
except:
sys.stderr.write("couldn't find rowset."+"\n")
return outcome
id_list,outcome=get_item_info(result_node,id_list)
work_to_do=True
outcome=1
while work_to_do:
payload={'apikey':apikey,'token':this_token,'limit':limit}
try:
r=requests.get(url,params=payload)
except:
sys.stderr.write("api request failed."+"\n")
return outcome
return_code=r.status_code
if return_code == 200:
response=r.content
else:
sys.stderr.write("FAILED(2)\n")
response=r.content
sys.stderr.write(str(response)+"\n")
return outcome
in_string=response
in_string=in_string.replace("\n","")
in_string=in_string.replace(" xmlns=\"urn:schemas-microsoft-com:xml-analysis:rowset\"","")
try:
tree=elementTree.fromstring(in_string)
except:
sys.stderr.write("parse failed(1)."+"\n")
return outcome
try:
finished=tree.find("QueryResult/IsFinished")
except:
sys.stderr.write("parse failed(2)."+"\n")
return outcome
if finished.text == "true":
work_to_do=False
try:
result_node=tree.find("QueryResult/ResultXml/rowset")
# print result_node
except:
sys.stderr.write("couldn't find rowset."+"\n")
return outcome
id_list,outcome=get_item_info(result_node,id_list)
else:
try:
result_node=tree.find("QueryResult/ResultXml/rowset")
except:
sys.stderr.write("couldn't find rowset."+"\n")
return outcome
id_list,outcome=get_item_info(result_node,id_list)
for id in id_list:
print str(id)
return 0
if __name__=="__main__":
sys.exit(main())
|
Dr. Andrew Weaver, one of Canada’s most respected climate scientists, left the ivory tower to run for the legislature with the BC Green Party. Mark Leiren-Young met with BC’s first Green MLA as he launched his bid to become Canada’s first Green Premier.
The Kinder Morgan Trans Mountain pipeline expansion could wipe out the Southern Resident Orcas, but Justin Trudeau’s government approved the plans late last year. Mark Leiren-Young ask Ecojustice’s Dyna Tuytel – the orcas’ unofficial lawyer – about challenging that decision in court.
Dr. David Suzuki is Canada’s most iconic environmentalist and at 81 he’s more passionate than ever about saving the world for his grandchildren — and yours. Suzuki talked with Mark Leiren-Young about alternate facts, toxic whales, taking the heat off the planet and putting it on politicians and more. Listen to the podcast.
Join us as we celebrate Surrey’s 13th annual Arbor Day – a day to celebrate trees!
Explore and learn about nature in Surrey with this series of over 100 free programs and events.
A 10-year research study suggests local displacement by human impacts may have more consequences than previously thought for humpback whales.
New report offers the first independent assessment of BC’s Climate Leadership Plan after the federal government’s recently announced carbon price schedule.
In the wake of the tragic diesel spill in Heiltsuk Territory, Coastal First Nations finally get a ban on crude oil transport along BC’s North Coast. |
#!/usr/bin/env python
from __future__ import print_function
from builtins import input
import sys
import pmagpy.pmag as pmag
def main():
"""
NAME
customize_criteria.py
DESCRIPTION
Allows user to specify acceptance criteria, saves them in pmag_criteria.txt
SYNTAX
customize_criteria.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f IFILE, reads in existing criteria
-F OFILE, writes to pmag_criteria format file
DEFAULTS
IFILE: pmag_criteria.txt
OFILE: pmag_criteria.txt
OUTPUT
creates a pmag_criteria.txt formatted output file
"""
infile,critout="","pmag_criteria.txt"
# parse command line options
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
infile=sys.argv[ind+1]
crit_data,file_type=pmag.magic_read(infile)
if file_type!='pmag_criteria':
print('bad input file')
print(main.__doc__)
sys.exit()
print("Acceptance criteria read in from ", infile)
if '-F' in sys.argv:
ind=sys.argv.index('-F')
critout=sys.argv[ind+1]
Dcrit,Icrit,nocrit=0,0,0
custom='1'
crit=input(" [0] Use no acceptance criteria?\n [1] Use default criteria\n [2] customize criteria \n ")
if crit=='0':
print('Very very loose criteria saved in ',critout)
crit_data=pmag.default_criteria(1)
pmag.magic_write(critout,crit_data,'pmag_criteria')
sys.exit()
crit_data=pmag.default_criteria(0)
if crit=='1':
print('Default criteria saved in ',critout)
pmag.magic_write(critout,crit_data,'pmag_criteria')
sys.exit()
CritRec=crit_data[0]
crit_keys=list(CritRec.keys())
crit_keys.sort()
print("Enter new threshold value.\n Return to keep default.\n Leave blank to not use as a criterion\n ")
for key in crit_keys:
if key!='pmag_criteria_code' and key!='er_citation_names' and key!='criteria_definition' and CritRec[key]!="":
print(key, CritRec[key])
new=input('new value: ')
if new != "": CritRec[key]=(new)
pmag.magic_write(critout,[CritRec],'pmag_criteria')
print("Criteria saved in pmag_criteria.txt")
if __name__ == "__main__":
main()
|
JT Septic is a family-owned company serving the growing communities of Yavapai County since 1994. We service: Dewey/Humboldt, Prescott, Prescott Valley, Chino Valley, Paulden, Spring Valley, Cordes Lakes, and Mayer. We also provide service to the Verde Valley communities. To read more about this history of JT's family business, read the January 2018 edition of Pumper Magazine.
We have over 30 years experience in the waste water industry, and bring our experience to work for you. Our office staff is friendly and helpful and can answer your questions. Our system inspectors are NAWT certified and provide prompt, courteous and professional service.
Of all the companies I have worked with for numerous different services - through 29 years of selling real estate in the Prescott Area - JT Septic is the best company. You are responsive, trustworthy, beyond helpful and NICE! I love you guys! Thanks for everything you do!
Last week Kyle came to my home in response to my request for septic service. They showed up on time and ready to assist me. Who could ask for more? I explained that I had slow running drainage from my toilets and suspected that the septic tank could be full after almost five years since the last pumping. Kyle opened the tank and explained that it wasn’t yet full and might have another year or two before needing to be emptied. Good news, but it didn’t resolve the problem of why the drainage was slow. I said to go ahead and pump the tank since they were here anyway.
After pumping out the tank Kyle went out of his way to help me assess what the problem was. He photographed the inlet pipe and said that it was apparently sagging between the house and the tank and that could cause the slowdown. That said, they closed up the tank and went on their way.
I was out of town for a few days but when I returned I contacted my landscaper and had a couple of his guys come over and dig up the drain line to the tank. On first impression when the pipe was exposed there was nothing wrong with it. I called your office and Kyle came over first thing the next morning. He and I did some measurements and did find that there was a sag in the pipe, not as much as expected but enough to cause a slowdown. Kyle assisted me in coming up with a solution of raising the pipe and supporting it with some brickwork to straighten it out. Remeasuring the pipe showed no dip in the line and that we had resolved the problem. I felt that Kyle had gone out of his way to provide assistance to me and that he is a great representative of your company.
May I just say what a pleasure it is to work with your company? Bids are quickly provided, appointments are kept as scheduled, paperwork is forwarded promptly. Dependability can be a rare commodity in today’s business world, and I so appreciate yours. Thank you for making my job so much easier. |
import os
import collections
import numpy
import operator
import math
import dateutil.parser
from PyQt4 import QtGui, QtCore
from nupic_studio import getInstantiatedClass
from nupic_studio.ui import Global
from nupic_studio.htm import maxPreviousSteps, maxFutureSteps, maxPreviousStepsWithInference
from nupic_studio.htm.node import Node, NodeType
from nupic_studio.htm.bit import Bit
from nupic_studio.htm.encoding import FieldDataType
from nupic.encoders import MultiEncoder
from nupic.data.file_record_stream import FileRecordStream
class DataSourceType:
"""
Types of data sources which a sensor gets inputs.
"""
file = 1
database = 2
class PredictionsMethod:
"""
Methods used to get predicted values and their probabilities
"""
reconstruction = "Reconstruction"
classification = "Classification"
class Sensor(Node):
"""
A super class only to group properties related to sensors.
"""
#region Constructor
def __init__(self, name):
"""
Initializes a new instance of this class.
"""
Node.__init__(self, name, NodeType.sensor)
#region Instance fields
self.bits = []
"""An array of the bit objects that compose the current output of this node."""
self.dataSource = None
"""Data source which provides records to fed into a region."""
self.dataSourceType = DataSourceType.file
"""Type of the data source (File or Database)"""
self.fileName = ''
"""The input file name to be handled. Returns the input file name only if it is in the project directory, full path otherwise."""
self.databaseConnectionString = ""
"""Connection string of the database."""
self.databaseTable = ''
"""Target table of the database."""
self.encoder = None
"""Multi-encoder which concatenate sub-encodings to convert raw data to htm input and vice-versa."""
self.encodings = []
"""List of sub-encodings that handles the input from database"""
self.predictionsMethod = PredictionsMethod.reconstruction
"""Method used to get predicted values and their probabilities."""
self.enableClassificationLearning = True
"""Switch for classification learning"""
self.enableClassificationInference = True
"""Switch for classification inference"""
#endregion
#region Statistics properties
self.statsPrecisionRate = 0.
#endregion
#endregion
#region Methods
def getBit(self, x, y):
"""
Return the bit located at given position
"""
bit = self.bits[(y * self.width) + x]
return bit
def initialize(self):
"""
Initialize this node.
"""
Node.initialize(self)
# Initialize input bits
self.bits = []
for x in range(self.width):
for y in range(self.height):
bit = Bit()
bit.x = x
bit.y = y
self.bits.append(bit)
if self.dataSourceType == DataSourceType.file:
"""
Initialize this node opening the file and place cursor on the first record.
"""
# If file name provided is a relative path, use project file path
if self.fileName != '' and os.path.dirname(self.fileName) == '':
fullFileName = os.path.dirname(Global.project.fileName) + '/' + self.fileName
else:
fullFileName = self.fileName
# Check if file really exists
if not os.path.isfile(fullFileName):
QtGui.QMessageBox.warning(None, "Warning", "Input stream file '" + fullFileName + "' was not found or specified.", QtGui.QMessageBox.Ok)
return
# Create a data source for read the file
self.dataSource = FileRecordStream(fullFileName)
elif self.dataSourceType == DataSourceType.database:
pass
self.encoder = MultiEncoder()
for encoding in self.encodings:
encoding.initialize()
# Create an instance class for an encoder given its module, class and constructor params
encoding.encoder = getInstantiatedClass(encoding.encoderModule, encoding.encoderClass, encoding.encoderParams)
# Take the first part of encoder field name as encoder name
# Ex: timestamp_weekend.weekend => timestamp_weekend
encoding.encoder.name = encoding.encoderFieldName.split('.')[0]
# Add sub-encoder to multi-encoder list
self.encoder.addEncoder(encoding.dataSourceFieldName, encoding.encoder)
# If encoder size is not the same to sensor size then throws exception
encoderSize = self.encoder.getWidth()
sensorSize = self.width * self.height
if encoderSize > sensorSize:
QtGui.QMessageBox.warning(None, "Warning", "'" + self.name + "': Encoder size (" + str(encoderSize) + ") is different from sensor size (" + str(self.width) + " x " + str(self.height) + " = " + str(sensorSize) + ").", QtGui.QMessageBox.Ok)
return
return True
def nextStep(self):
"""
Performs actions related to time step progression.
"""
# Update states machine by remove the first element and add a new element in the end
for encoding in self.encodings:
encoding.currentValue.rotate()
if encoding.enableInference:
encoding.predictedValues.rotate()
encoding.bestPredictedValue.rotate()
Node.nextStep(self)
for bit in self.bits:
bit.nextStep()
# Get record value from data source
# If the last record was reached just rewind it
data = self.dataSource.getNextRecordDict()
if not data:
self.dataSource.rewind()
data = self.dataSource.getNextRecordDict()
# Pass raw values to encoder and get a concatenated array
outputArray = numpy.zeros(self.encoder.getWidth())
self.encoder.encodeIntoArray(data, outputArray)
# Get values obtained from the data source.
outputValues = self.encoder.getScalars(data)
# Get raw values and respective encoded bit array for each field
prevOffset = 0
for i in range(len(self.encodings)):
encoding = self.encodings[i]
# Convert the value to its respective data type
currValue = outputValues[i]
if encoding.encoderFieldDataType == FieldDataType.boolean:
currValue = bool(currValue)
elif encoding.encoderFieldDataType == FieldDataType.integer:
currValue = int(currValue)
elif encoding.encoderFieldDataType == FieldDataType.decimal:
currValue = float(currValue)
elif encoding.encoderFieldDataType == FieldDataType.dateTime:
currValue = dateutil.parser.parse(str(currValue))
elif encoding.encoderFieldDataType == FieldDataType.string:
currValue = str(currValue)
encoding.currentValue.setForCurrStep(currValue)
# Update sensor bits
for i in range(len(outputArray)):
if outputArray[i] > 0.:
self.bits[i].isActive.setForCurrStep(True)
else:
self.bits[i].isActive.setForCurrStep(False)
# Mark falsely predicted bits
for bit in self.bits:
if bit.isPredicted.atPreviousStep() and not bit.isActive.atCurrStep():
bit.isFalselyPredicted.setForCurrStep(True)
self._output = outputArray
def getPredictions(self):
"""
Get the predictions after an iteration.
"""
if self.predictionsMethod == PredictionsMethod.reconstruction:
# Prepare list with predictions to be classified
# This list contains the indexes of all bits that are predicted
output = []
for i in range(len(self.bits)):
if self.bits[i].isPredicted.atCurrStep():
output.append(1)
else:
output.append(0)
output = numpy.array(output)
# Decode output and create predictions list
fieldsDict, fieldsOrder = self.encoder.decode(output)
for encoding in self.encodings:
if encoding.enableInference:
predictions = []
encoding.predictedValues.setForCurrStep(dict())
# If encoder field name was returned by decode(), assign the the predictions to it
if encoding.encoderFieldName in fieldsOrder:
predictedLabels = fieldsDict[encoding.encoderFieldName][1].split(', ')
predictedValues = fieldsDict[encoding.encoderFieldName][0]
for i in range(len(predictedLabels)):
predictions.append([predictedValues[i], predictedLabels[i]])
encoding.predictedValues.atCurrStep()[1] = predictions
# Get the predicted value with the biggest probability to happen
if len(predictions) > 0:
bestPredictionRange = predictions[0][0]
min = bestPredictionRange[0]
max = bestPredictionRange[1]
bestPredictedValue = (min + max) / 2.0
encoding.bestPredictedValue.setForCurrStep(bestPredictedValue)
elif self.predictionsMethod == PredictionsMethod.classification:
# A classification involves estimate which are the likely values to occurs in the next time step.
offset = 0
for encoding in self.encodings:
encoderWidth = encoding.encoder.getWidth()
if encoding.enableInference:
# Prepare list with predictions to be classified
# This list contains the indexes of all bits that are predicted
patternNZ = []
for i in range(offset, encoderWidth):
if self.bits[i].isActive.atCurrStep():
patternNZ.append(i)
# Get the bucket index of the current value at the encoder
actualValue = encoding.currentValue.atCurrStep()
bucketIdx = encoding.encoder.getBucketIndices(actualValue)[0]
# Perform classification
clasResults = encoding.classifier.compute(recordNum=Global.currStep, patternNZ=patternNZ, classification={'bucketIdx': bucketIdx, 'actValue': actualValue}, learn=self.enableClassificationLearning, infer=self.enableClassificationInference)
encoding.predictedValues.setForCurrStep(dict())
for step in encoding.steps:
# Calculate probability for each predicted value
predictions = dict()
for (actValue, prob) in zip(clasResults['actualValues'], clasResults[step]):
if actValue in predictions:
predictions[actValue] += prob
else:
predictions[actValue] = prob
# Remove predictions with low probabilities
maxVal = (None, None)
for (actValue, prob) in predictions.items():
if len(predictions) <= 1:
break
if maxVal[0] is None or prob >= maxVal[1]:
if maxVal[0] is not None and maxVal[1] < encoding.minProbabilityThreshold:
del predictions[maxVal[0]]
maxVal = (actValue, prob)
elif prob < encoding.minProbabilityThreshold:
del predictions[actValue]
# Sort the list of values from more probable to less probable values
# an decrease the list length to max predictions per step limit
predictions = sorted(predictions.iteritems(), key=operator.itemgetter(1), reverse=True)
predictions = predictions[:maxFutureSteps]
encoding.predictedValues.atCurrStep()[step] = predictions
# Get the predicted value with the biggest probability to happen
bestPredictedValue = encoding.predictedValues.atCurrStep()[1][0][0]
encoding.bestPredictedValue.setForCurrStep(bestPredictedValue)
offset += encoderWidth
def calculateStatistics(self):
"""
Calculate statistics after an iteration.
"""
if Global.currStep > 0:
precision = 0.
# Calculate the prediction precision comparing if the current value is in the range of any prediction.
for encoding in self.encodings:
if encoding.enableInference:
predictions = encoding.predictedValues.atPreviousStep()[1]
for predictedValue in predictions:
min = None
max = None
value = predictedValue[0]
if self.predictionsMethod == PredictionsMethod.reconstruction:
min = value[0]
max = value[1]
elif self.predictionsMethod == PredictionsMethod.classification:
min = value
max = value
if isinstance(min, (int, long, float, complex)) and isinstance(max, (int, long, float, complex)):
min = math.floor(min)
max = math.ceil(max)
if min <= encoding.currentValue.atCurrStep() <= max:
precision = 100.
break
# The precision rate is the average of the precision calculated in every step
self.statsPrecisionRate = (self.statsPrecisionRate + precision) / 2
else:
self.statsPrecisionRate = 0.
for bit in self.bits:
bit.calculateStatistics()
#endregion
|
The North Star Knights were overjoyed when they won the District 6C East volleyball championship last Friday night in Chester. And the performance they got from Paij Peterson was certainly a big help. Peterson did a little bit of everything for the Knights in their three matches, as she averaged 21 assists, nine digs and seven kills per match. Peterson also had a team-high in blocks in the championship match against CJI, as well as nine service aces over the tourney. Peterson is also a standout basketball player in the winter for the Knights. |
# Copyright (c) 2017 Trail of Bits, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binaryninja as binja
from binaryninja.enums import (
Endianness, LowLevelILOperation, SectionSemantics
)
import inspect
import logging
import magic
import re
import struct
from collections import defaultdict
LOGNAME = 'binja.cfg'
log = logging.getLogger(LOGNAME)
class StackFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
logging.Formatter.__init__(self, fmt, datefmt)
self.stack_base = len(inspect.stack()) + 7
def format(self, record):
record.indent = ' ' * (len(inspect.stack()) - self.stack_base)
res = logging.Formatter.format(self, record)
del record.indent
return res
def init_logger(log_file):
formatter = StackFormatter('[%(levelname)s] %(indent)s%(message)s')
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
ENDIAN_TO_STRUCT = {
Endianness.LittleEndian: '<',
Endianness.BigEndian: '>'
}
def read_dword(bv, addr):
# type: (binja.BinaryView, int) -> int
# Pad the data if fewer than 4 bytes are read
endianness = ENDIAN_TO_STRUCT[bv.endianness]
data = bv.read(addr, 4)
padded_data = '{{:\x00{}4s}}'.format(endianness).format(data)
fmt = '{}L'.format(endianness)
return struct.unpack(fmt, padded_data)[0]
def read_qword(bv, addr):
# type: (binja.BinaryView, int) -> int
# Pad the data if fewer than 8 bytes are read
endianness = ENDIAN_TO_STRUCT[bv.endianness]
data = bv.read(addr, 8)
padded_data = '{{:\x00{}8s}}'.format(endianness).format(data)
fmt = '{}Q'.format(endianness)
return struct.unpack(fmt, padded_data)[0]
def load_binary(path):
magic_type = magic.from_file(path)
if 'ELF' in magic_type:
bv_type = binja.BinaryViewType['ELF']
elif 'PE32' in magic_type:
bv_type = binja.BinaryViewType['PE']
elif 'Mach-O' in magic_type:
bv_type = binja.BinaryViewType['Mach-O']
else:
bv_type = binja.BinaryViewType['Raw']
# Can't do anything with Raw type
log.fatal('Unknown binary type: "{}", exiting'.format(magic_type))
exit(1)
log.debug('Loading binary in binja...')
bv = bv_type.open(path)
bv.update_analysis_and_wait()
# NOTE: at the moment binja will not load a binary
# that doesn't have an entry point
if len(bv) == 0:
log.error('Binary could not be loaded in binja, is it linked?')
exit(1)
return bv
def find_symbol_name(bv, addr):
"""Attempt to find a symbol for a given address
Args:
bv (binja.BinaryView)
addr (int): Address the symbol should point to
Returns:
(str): Symbol name if found, empty string otherwise
"""
sym = bv.get_symbol_at(addr)
if sym is not None:
return sym.name
return ''
def get_func_containing(bv, addr):
""" Finds the function, if any, containing the given address
Args:
bv (binja.BinaryView)
addr (int)
Returns:
binja.Function
"""
funcs = bv.get_functions_containing(addr)
return funcs[0] if funcs is not None else None
def get_section_at(bv, addr):
"""Returns the section in the binary that contains the given address"""
if not is_valid_addr(bv, addr):
return None
for sec in bv.sections.values():
if sec.start <= addr < sec.end:
return sec
return None
def is_external_ref(bv, addr):
sym = bv.get_symbol_at(addr)
return sym is not None and 'Import' in sym.type.name
def is_valid_addr(bv, addr):
return bv.get_segment_at(addr) is not None
def is_code(bv, addr):
"""Returns `True` if the given address lies in a code section"""
# This is a bit more specific than checking if a segment is executable,
# Binja will classify a section as ReadOnlyCode or ReadOnlyData, though
# both sections are still in an executable segment
sec = get_section_at(bv, addr)
return sec is not None and sec.semantics == SectionSemantics.ReadOnlyCodeSectionSemantics
def is_executable(bv, addr):
"""Returns `True` if the given address lies in an executable segment"""
seg = bv.get_segment_at(addr)
return seg is not None and seg.executable
def is_readable(bv, addr):
"""Returns `True` if the given address lies in a readable segment"""
seg = bv.get_segment_at(addr)
return seg is not None and seg.writable
def is_writeable(bv, addr):
"""Returns `True` if the given address lies in a writable segment"""
seg = bv.get_segment_at(addr)
return seg is not None and seg.readable
def is_ELF(bv):
return bv.view_type == 'ELF'
def is_PE(bv):
return bv.view_type == 'PE'
def clamp(val, vmin, vmax):
return min(vmax, max(vmin, val))
# Caching results of is_section_external
_EXT_SECTIONS = set()
_INT_SECTIONS = set()
def is_section_external(bv, sect):
"""Returns `True` if the given section contains only external references
Args:
bv (binja.BinaryView)
sect (binja.binaryview.Section)
"""
if sect.start in _EXT_SECTIONS:
return True
if sect.start in _INT_SECTIONS:
return False
if is_ELF(bv):
if re.search(r'\.(got|plt)', sect.name):
_EXT_SECTIONS.add(sect.start)
return True
if is_PE(bv):
if '.idata' in sect.name:
_EXT_SECTIONS.add(sect.start)
return True
_INT_SECTIONS.add(sect.start)
return False
def is_tls_section(bv, addr):
sect_names = (sect.name for sect in bv.get_sections_at(addr))
return any(sect in ['.tbss', '.tdata', '.tls'] for sect in sect_names)
def _search_phrase_op(il, target_op):
""" Helper for finding parts of a phrase[+displacement] il """
op = il.operation
# Handle starting points
if op == LowLevelILOperation.LLIL_SET_REG:
return _search_phrase_op(il.src, target_op)
if op == LowLevelILOperation.LLIL_STORE:
return _search_phrase_op(il.dest, target_op)
# The phrase il may be inside a LLIL_LOAD
if op == LowLevelILOperation.LLIL_LOAD:
return _search_phrase_op(il.src, target_op)
# Continue left/right at an ADD
if op == LowLevelILOperation.LLIL_ADD:
return (_search_phrase_op(il.left, target_op) or
_search_phrase_op(il.right, target_op))
# Continue left/right at an ADD
if op == LowLevelILOperation.LLIL_SUB:
return (_search_phrase_op(il.left, target_op) or
_search_phrase_op(il.right, target_op))
# Continue left/right at an ADD
if op == LowLevelILOperation.LLIL_CMP_E:
return (_search_phrase_op(il.left, target_op) or
_search_phrase_op(il.right, target_op))
# Terminate when constant is found
if op == target_op:
return il
def search_phrase_reg(il):
""" Searches for the register used in a phrase
ex: dword [ebp + 0x8] -> ebp
Args:
il (binja.LowLevelILInstruction): Instruction to parse
Returns:
str: register name
"""
res = _search_phrase_op(il, LowLevelILOperation.LLIL_REG)
if res is not None:
return res.src.name
def search_displ_base(il):
""" Searches for the base address used in a phrase[+displacement]
ex: dword [eax * 4 + 0x08040000] -> 0x08040000
dword [ebp + 0x8] -> 0x8
Args:
il (binja.LowLevelILInstruction): Instruction to parse
Returns:
int: base address
"""
res = _search_phrase_op(il, LowLevelILOperation.LLIL_CONST)
if res is not None:
# Interpret the string representation to avoid sign issues
return int(res.tokens[0].text, 16)
def is_jump_tail_call(bv, il):
""" Returns `True` if the given il is a jump to another function """
return il.operation == LowLevelILOperation.LLIL_JUMP and \
il.dest.operation == LowLevelILOperation.LLIL_CONST_PTR and \
get_jump_tail_call_target(bv, il) is not None
def get_jump_tail_call_target(bv, il):
""" Get the target function of a tail-call.
Returns:
binja.Function
"""
try:
return bv.get_function_at(il.dest.constant)
except:
return None
def collect_il_groups(il_func):
""" Gather all il instructions grouped by address
Some instructions (cmov, set, etc.) get expanded into multiple il
instructions when lifted, but `Function.get_lifted_il_at` will only return the first
of all the il instructions at an address. This will group all the il instructions
into a map of address to expanded instructions as follows:
{
addr1 => [single il instruction],
addr2 => [expanded il 1, expanded il 2, ...],
...
}
Args:
il_func: IL function to gather all il groups from
Returns:
dict: Map from address to all IL instructions at that address
"""
il_map = defaultdict(list)
for blk in il_func:
for il in blk:
il_map[il.address].append(il)
return il_map
|
RestaurantRaw bar, lobster, oysters, waterfront views - it’s all at one of the best seafood restaurants for family dining on Cape Cod.
Raw Bar, Lobster, Oysters, Waterfront Views – It’s All At One Of The Best Seafood Restaurants For Family Dining On Cape Cod.
The Bookstore and Restaurant overlook the beautiful Wellfleet Bay, which feeds into the larger Cape Cod Bay. Diners can look out at the fishing boats steaming out around the hook of Billingsgate, where the original whaling town was founded, now submerged under the sea. The Bookstore and Restaurant offers the seafood treasures from our Atlantic coast, especially cod, littlenecks, oysters, scallops, mussels, and lobster. Carnivores will be also be happy here,with dishes of sirloin and lamb, prepared simply, allowing the meat’s natural flavors to take center stage. Pair any appetizer or meal with of our exclusive selection of wines, beers on tap & micro brews or divine cocktails and you will be in heaven by the sea. |