content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#
# Copyright 2017 CNIT - Consorzio Nazionale Interuniversitario per le Telecomunicazioni
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import string
import copy
def explicit_element_decl_with_conf(i, words, element, name_subgraph, group, type_element):
comma=[]
config=[]
word=words[i+1]
index=string.find(word, '(')
for w in word.split(','):
if string.find(w,'(')!=-1 and string.find(w,')')==-1:
config.append(w[string.find(w,'(')+1:len(w)])
elif string.find(w,'(')!=-1 and string.find(w,')')!=-1:
config.append(w[string.find(w,'(')+1:len(w)-1])
elif string.find(w,')')!=-1:
config.append(w[0:len(w)-1])
else:
config.append(w)
if name_subgraph != '' and name_subgraph[len(name_subgraph)-1] != '.':
name_subgraph = name_subgraph+'.'
if group[len(group)-1] == '.':
group = group[0:len(group)-1]
if words[0] == '[':
index = string.find(words, ']')
words = words[index+1:]
element[len(element)]=({'element':word[0:index], 'name':name_subgraph+words[i-1], 'config':config,'group':[group], 'node_type': type_element})
def explicit_element_decl_without_conf(i, words, element, name_subgraph, group, type_element):
word = ''
if name_subgraph != '' and name_subgraph[len(name_subgraph)-1] != '.':
name_subgraph = name_subgraph+'.'
if group[len(group)-1] == '.':
group = group[0:len(group)-1]
if words[i-1][0] == '[':
index = string.find(words[i-1], ']')
word = words[i-1][index+1:]
else:
word = words[i-1]
element[len(element)]=({'element':words[i+1], 'name':name_subgraph+word, 'config':[],'group':[group], 'node_type': type_element})
def implicit_element_decl_with_conf(i, words,element, name_subgraph, group, words2):
config=[]
word=words[i]
index=string.find(word, '(')
for w in word.split(','):
if string.find(w,'(')!=-1 and string.find(w,')')==-1:
config.append(w[string.find(w,'(')+1:len(w)])
elif string.find(w,'(')!=-1 and string.find(w,')')!=-1:
config.append(w[string.find(w,'(')+1:len(w)-1])
elif string.find(w,')'):
config.append(w[0:len(w)-1])
else:
config.append(w)
name=nameGenerator(element, word[0:index])
if name_subgraph != '' and name_subgraph[len(name_subgraph)-1] != '.':
name_subgraph = name_subgraph+'.'
element[len(element)]=({'element':word[0:index], 'name':name_subgraph+name, 'config':config,'group':[group], 'node_type':'element'})
words2[i] = name_subgraph+name
def implicit_element_decl_without_conf(i,words,element, name_subgraph, group, words2):
name=nameGenerator(element, words[i])
if name_subgraph != '' and name_subgraph[len(name_subgraph)-1] != '.':
name_subgraph = name_subgraph+'.'
element[len(element)]=({'element':words[i], 'name':name_subgraph+name, 'config':[],'group':[group], 'node_type': 'element'})
words2[i] = name_subgraph+name
def subgraph_element_name(line, compound_element, element, group):
name=nameGenerator(element, 'subgraph')
element[len(element)]=({'element':'Compound_Element', 'name':name, 'config':[],'group':[group], 'node_type': 'compound_element'})
compound_element[len(compound_element)] = ({'name':name, 'compound':line})
return name
def rename_class_element(words, words1,words3, name_ele, name):
for i in range (0,len(words1)): #Rinomina gli elementi espliciti della riga
if i >= len(words1):
continue
if words1[i] != '::' and words1[i] != '->' and string.find(words[i],'@') == -1 and string.find(words1[i], 'input') == -1 and string.find(words1[i], 'output') == -1:
if string.find(words1[i], '[') != -1:
start = string.find(words1[i], '[')
stop = string.find(words1[i], ']')
if start == 0:
name_element = words1[i][stop:]
else:
name_element = words1[i][0:start]
words1[i] = name_ele+'.'+name_element
else:
words1[i] = name_ele+'.'+words[i]
try:
index = words1.index('::')
del words1[index+1]
counter = len(name_ele)
if name_ele[counter-1] == '.':
words1[index-1] = name_ele + words1[index-1]
else:
words1[index-1] = name_ele + '.' + words1[index-1]
del words1[index]
except ValueError:
break
def rename_compound_element(words3, compound, element_renamed):
for i in range(0,len(words3)): # rinomina gli elementi del compound contenuti in word3
try:
index = words3.index('::')
del words3[index+1]
words3[index-1] = compound[1]['name']+'.'+ words3[index-1]
del words3[index]
except ValueError:
break
compound[1]['compound']=words3
for i in range(0,len(words3)): # rinomina gli elementi precedentementi dichiarati e che hanno ancora
for e in element_renamed.items(): # ancora il loro nome originale
if words3[i] == e[1]['origin_name']:
words3[i] = e[1]['new_name']
elif string.find(words3[i], '[')!=-1:
start = string.find(words3[i], '[')
stop = string.find(words3[i], ']')
if start == 0:
name = words3[i][stop+1:]
elif stop == len(words3[i])-1:
name = words3[i][0:start]
if name == e[1]['origin_name']:
words3[i] = e[1]['new_name']
def nameGenerator(element, type_element): #nome di default class@num
implicit_name = False
for e in element.items():
if string.find(e[1]['name'],'@')!=-1 and string.find(e[1]['name'],'.')==-1:
index = string.find(e[1]['name'],'@')
num = int(e[1]['name'][index+1:])
implicit_name = True
if implicit_name :
name = type_element+'@'+str(num+1)
else:
name = type_element+'@0'
return name
def load_list(line, words):
conf=False
port=False
word2=''
word3=''
line_old=' ['
line_new='['
line=line.replace(line_old,line_new)
line_old=['::','->',' ;']
line_new=[' :: ',' -> ',';']
for i in range(0,len(line_old)): #gestisce le dichiarazione esplice degli elementi
line=line.replace(line_old[i],line_new[i]) #es.: name::element o name :: element
for word in line.split():
if conf:
if word[len(word)-1]==')' or word[len(word)-2]==')':
word=word2+' '+word
conf=False
else:
word2=word2+' '+word
continue
if string.find(word,'(')!=-1 and string.find(word,')')==-1: #concatena le stesse config di un elemento
conf=True
word2=word
continue
elif word[len(word)-1]==']' and word[0]=='[' and words[len(words)-1] == '->': #usato per gestire il tipo di dichiarazione di porta d'ingresso
word3=word #es.: [num]port o [num] port
port=True
continue
elif port:
word=word3+''+word
port=False
if word[len(word)-1]==';':
word=word[0:len(word)-1]
words.append(word)
words_new=[]
return words
def handle_edgeslevel(connection):
index = 0
for c in connection.items():
target_level = '0'
source_level = '0'
for w in range(0,len(c[1]['target'])):
if c[1]['target'][w] == '.':
index = w
target_level = c[1]['target'][0:index]
for w in range(0,len(c[1]['source'])):
if c[1]['source'][w] == '.':
index = w
source_level = c[1]['source'][0:index]
if source_level == target_level and source_level != '0' and target_level != '0':
c[1]['group'].append(source_level)
elif source_level == '0' and target_level == '0':
c[1]['group'].append('click')
else:
c[1]['group'].append('Null')
connection2 = connection.copy()
for c in connection.items():
if c[1]['group'] != 'click':
for c1 in connection2.items():
if c1[1]['target'] == c[1]['group']:
c[1]['depth'] = c1[1]['depth']+1
def check_element(check, element_name, words):
word = words
if string.find(words, '[') == 0:
index = string.find(words, ']')
word = words[index+1:]
elif string.find(words,']') == len(words)-1:
index = string.find(words,'[')
word = words[0:index]
start = 0
for i in range(0,len(element_name)):
if element_name[i]=='.':
start = i + 1
if word == element_name[start:]:
check = True
return check | python |
from urllib.request import ssl, socket
from datetime import date, datetime
import pytz
def cert_validate_date(hostname, port = 443)->datetime:
"""
Validate the certificate expiration date
"""
with socket.create_connection((hostname, port)) as sock:
context = ssl.create_default_context()
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
print("SSL version: " + ssock.version())
cert = ssock.getpeercert()
expire_date = cert["notAfter"]
print ("Expire time: " + expire_date)
gmt = pytz.timezone('GMT')
dt = datetime.strptime(expire_date, "%b %d %H:%M:%S %Y GMT")
tzdt = gmt.localize(dt)
print (tzdt)
print (tzdt.astimezone().strftime("%Y-%m-%d %H:%M:%S %z"))
return tzdt.astimezone()
| python |
import asyncio
import dataset
import discord
DATABASE = dataset.connect('sqlite:///data/bot/higgsbot.db')
class Token:
def __init__(self):
self.table = DATABASE['balance']
async def start(self, bot):
for member in bot.get_all_members():
id = member.id
if self.table.find_one(user=id) is None:
self.table.insert(dict(user=id, coins=3))
def check_balance(self, usr):
id = usr.id
if self.table.find_one(user=id) is not None:
user = self.table.find_one(user=id)
return user['coins']
else:
self.table.insert(dict(user=id, coins=3))
return 3
def set_balance(self, usr, b):
if b >= 0:
id = usr.id
if self.table.find_one(user=id) is not None:
self.table.update(dict(user=id, coins=b), ['user'])
return
else:
self.table.insert(dict(user=id, coins=b))
return
else:
raise Exception("Balance cannot be less than 0")
def remove_balance(self, usr, c):
id = usr.id
if self.table.find_one(user=id) is not None:
user = self.table.find_one(user=id)
if (user['coins'] - c) >= 0:
new_coins = user['coins'] - c
self.table.update(dict(user=id, coins=new_coins), ['user'])
return
else:
raise Exception("Balance insufficient")
else:
self.table.insert(dict(user=id, coins=c))
user = self.table.find_one(user=id)
if (user['coins'] - c) >= 0:
new_coins = user['coins'] - c
self.table.update(dict(user=id, coins=new_coins), ['user'])
return
else:
raise Exception("Balance insufficient")
def join(self, usr): # On joining of user add him to the table if he's not already there.
id = usr.id
if self.table.find_one(user=id) is None:
self.table.insert(dict(user=id, coins=3))
async def payment(self):
while True: # 10 minute loop to add CodeTokens.
await asyncio.sleep(600)
for user in self.table:
if user['coins'] < 10:
user['coins'] = user['coins'] + 1
self.table.update(dict(user=user['user'], coins=user['coins']), ['user']) | python |
# Generated by Django 3.1.6 on 2021-02-10 08:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_transaction_wallet'),
]
operations = [
migrations.AlterModelOptions(
name='transaction',
options={'ordering': ['-created_at']},
),
]
| python |
"""commands for register dummy events"""
import click
from autobahn.asyncio.wamp import ApplicationRunner
from playground.racelog.caller import CallEndpoint
@click.command("delete")
@click.argument("eventId", type=click.INT)
@click.pass_obj
def delete(obj,eventid):
"""delete event including data.
The event is referenced by its internal database id.
"""
obj['endpoint'] = "racelog.admin.event.delete"
obj['rpc_data'] = eventid
runner = ApplicationRunner(url=obj['url'], realm=obj['realm'], extra=obj)
runner.run(CallEndpoint)
| python |
from featuretools.primitives import AggregationPrimitive
from tsfresh.feature_extraction.feature_calculators import sum_of_reoccurring_values
from woodwork.column_schema import ColumnSchema
class SumOfReoccurringValues(AggregationPrimitive):
"""Returns the sum of all values, that are present in the time series more
than once.
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.sum_of_reoccurring_values
"""
name = "sum_of_reoccurring_values"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
def get_function(self):
return sum_of_reoccurring_values
| python |
import asyncio
import datetime
import unittest
from unittest import mock
from aiohttp import hdrs
from aiohttp.multidict import CIMultiDict
from aiohttp.web import ContentCoding, Request, StreamResponse, Response
from aiohttp.protocol import HttpVersion, HttpVersion11, HttpVersion10
from aiohttp.protocol import RawRequestMessage
class TestStreamResponse(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def make_request(self, method, path, headers=CIMultiDict(),
version=HttpVersion11):
message = RawRequestMessage(method, path, version, headers,
False, False)
return self.request_from_message(message)
def request_from_message(self, message):
self.app = mock.Mock()
self.payload = mock.Mock()
self.transport = mock.Mock()
self.reader = mock.Mock()
self.writer = mock.Mock()
req = Request(self.app, message, self.payload,
self.transport, self.reader, self.writer)
return req
def test_ctor(self):
resp = StreamResponse()
self.assertEqual(200, resp.status)
self.assertIsNone(resp.keep_alive)
def test_content_length(self):
resp = StreamResponse()
self.assertIsNone(resp.content_length)
def test_content_length_setter(self):
resp = StreamResponse()
resp.content_length = 234
self.assertEqual(234, resp.content_length)
def test_drop_content_length_header_on_setting_len_to_None(self):
resp = StreamResponse()
resp.content_length = 1
self.assertEqual("1", resp.headers['Content-Length'])
resp.content_length = None
self.assertNotIn('Content-Length', resp.headers)
def test_set_content_length_to_None_on_non_set(self):
resp = StreamResponse()
resp.content_length = None
self.assertNotIn('Content-Length', resp.headers)
resp.content_length = None
self.assertNotIn('Content-Length', resp.headers)
def test_setting_content_type(self):
resp = StreamResponse()
resp.content_type = 'text/html'
self.assertEqual('text/html', resp.headers['content-type'])
def test_setting_charset(self):
resp = StreamResponse()
resp.content_type = 'text/html'
resp.charset = 'koi8-r'
self.assertEqual('text/html; charset=koi8-r',
resp.headers['content-type'])
def test_default_charset(self):
resp = StreamResponse()
self.assertIsNone(resp.charset)
def test_reset_charset(self):
resp = StreamResponse()
resp.content_type = 'text/html'
resp.charset = None
self.assertIsNone(resp.charset)
def test_reset_charset_after_setting(self):
resp = StreamResponse()
resp.content_type = 'text/html'
resp.charset = 'koi8-r'
resp.charset = None
self.assertIsNone(resp.charset)
def test_charset_without_content_type(self):
resp = StreamResponse()
with self.assertRaises(RuntimeError):
resp.charset = 'koi8-r'
def test_last_modified_initial(self):
resp = StreamResponse()
self.assertIsNone(resp.last_modified)
def test_last_modified_string(self):
resp = StreamResponse()
dt = datetime.datetime(1990, 1, 2, 3, 4, 5, 0, datetime.timezone.utc)
resp.last_modified = 'Mon, 2 Jan 1990 03:04:05 GMT'
self.assertEqual(resp.last_modified, dt)
def test_last_modified_timestamp(self):
resp = StreamResponse()
dt = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, datetime.timezone.utc)
resp.last_modified = 0
self.assertEqual(resp.last_modified, dt)
resp.last_modified = 0.0
self.assertEqual(resp.last_modified, dt)
def test_last_modified_datetime(self):
resp = StreamResponse()
dt = datetime.datetime(2001, 2, 3, 4, 5, 6, 0, datetime.timezone.utc)
resp.last_modified = dt
self.assertEqual(resp.last_modified, dt)
def test_last_modified_reset(self):
resp = StreamResponse()
resp.last_modified = 0
resp.last_modified = None
self.assertEqual(resp.last_modified, None)
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_start(self, ResponseImpl):
req = self.make_request('GET', '/')
resp = StreamResponse()
self.assertIsNone(resp.keep_alive)
msg = self.loop.run_until_complete(resp.prepare(req))
self.assertTrue(msg.send_headers.called)
self.assertIs(msg, self.loop.run_until_complete(resp.prepare(req)))
self.assertTrue(resp.keep_alive)
req2 = self.make_request('GET', '/')
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(resp.prepare(req2))
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_chunked_encoding(self, ResponseImpl):
req = self.make_request('GET', '/')
resp = StreamResponse()
self.assertFalse(resp.chunked)
resp.enable_chunked_encoding()
self.assertTrue(resp.chunked)
msg = self.loop.run_until_complete(resp.prepare(req))
self.assertTrue(msg.chunked)
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_chunk_size(self, ResponseImpl):
req = self.make_request('GET', '/')
resp = StreamResponse()
self.assertFalse(resp.chunked)
resp.enable_chunked_encoding(chunk_size=8192)
self.assertTrue(resp.chunked)
msg = self.loop.run_until_complete(resp.prepare(req))
self.assertTrue(msg.chunked)
msg.add_chunking_filter.assert_called_with(8192)
self.assertIsNotNone(msg.filter)
def test_chunked_encoding_forbidden_for_http_10(self):
req = self.make_request('GET', '/', version=HttpVersion10)
resp = StreamResponse()
resp.enable_chunked_encoding()
with self.assertRaisesRegex(
RuntimeError,
"Using chunked encoding is forbidden for HTTP/1.0"):
self.loop.run_until_complete(resp.prepare(req))
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_compression_no_accept(self, ResponseImpl):
req = self.make_request('GET', '/')
resp = StreamResponse()
self.assertFalse(resp.chunked)
self.assertFalse(resp.compression)
resp.enable_compression()
self.assertTrue(resp.compression)
msg = self.loop.run_until_complete(resp.prepare(req))
self.assertFalse(msg.add_compression_filter.called)
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_force_compression_no_accept_backwards_compat(self, ResponseImpl):
req = self.make_request('GET', '/')
resp = StreamResponse()
self.assertFalse(resp.chunked)
self.assertFalse(resp.compression)
resp.enable_compression(force=True)
self.assertTrue(resp.compression)
msg = self.loop.run_until_complete(resp.prepare(req))
self.assertTrue(msg.add_compression_filter.called)
self.assertIsNotNone(msg.filter)
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_force_compression_false_backwards_compat(self, ResponseImpl):
req = self.make_request('GET', '/')
resp = StreamResponse()
self.assertFalse(resp.compression)
resp.enable_compression(force=False)
self.assertTrue(resp.compression)
msg = self.loop.run_until_complete(resp.prepare(req))
self.assertFalse(msg.add_compression_filter.called)
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_compression_default_coding(self, ResponseImpl):
req = self.make_request(
'GET', '/',
headers=CIMultiDict({hdrs.ACCEPT_ENCODING: 'gzip, deflate'}))
resp = StreamResponse()
self.assertFalse(resp.chunked)
self.assertFalse(resp.compression)
resp.enable_compression()
self.assertTrue(resp.compression)
msg = self.loop.run_until_complete(resp.prepare(req))
msg.add_compression_filter.assert_called_with('deflate')
self.assertEqual('deflate', resp.headers.get(hdrs.CONTENT_ENCODING))
self.assertIsNotNone(msg.filter)
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_force_compression_deflate(self, ResponseImpl):
req = self.make_request(
'GET', '/',
headers=CIMultiDict({hdrs.ACCEPT_ENCODING: 'gzip, deflate'}))
resp = StreamResponse()
resp.enable_compression(ContentCoding.deflate)
self.assertTrue(resp.compression)
msg = self.loop.run_until_complete(resp.prepare(req))
msg.add_compression_filter.assert_called_with('deflate')
self.assertEqual('deflate', resp.headers.get(hdrs.CONTENT_ENCODING))
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_force_compression_no_accept_deflate(self, ResponseImpl):
req = self.make_request('GET', '/')
resp = StreamResponse()
resp.enable_compression(ContentCoding.deflate)
self.assertTrue(resp.compression)
msg = self.loop.run_until_complete(resp.prepare(req))
msg.add_compression_filter.assert_called_with('deflate')
self.assertEqual('deflate', resp.headers.get(hdrs.CONTENT_ENCODING))
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_force_compression_gzip(self, ResponseImpl):
req = self.make_request(
'GET', '/',
headers=CIMultiDict({hdrs.ACCEPT_ENCODING: 'gzip, deflate'}))
resp = StreamResponse()
resp.enable_compression(ContentCoding.gzip)
self.assertTrue(resp.compression)
msg = self.loop.run_until_complete(resp.prepare(req))
msg.add_compression_filter.assert_called_with('gzip')
self.assertEqual('gzip', resp.headers.get(hdrs.CONTENT_ENCODING))
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_force_compression_no_accept_gzip(self, ResponseImpl):
req = self.make_request('GET', '/')
resp = StreamResponse()
resp.enable_compression(ContentCoding.gzip)
self.assertTrue(resp.compression)
msg = self.loop.run_until_complete(resp.prepare(req))
msg.add_compression_filter.assert_called_with('gzip')
self.assertEqual('gzip', resp.headers.get(hdrs.CONTENT_ENCODING))
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_delete_content_length_if_compression_enabled(self, ResponseImpl):
req = self.make_request('GET', '/')
resp = Response(body=b'answer')
self.assertEqual(6, resp.content_length)
resp.enable_compression(ContentCoding.gzip)
self.loop.run_until_complete(resp.prepare(req))
self.assertIsNone(resp.content_length)
def test_write_non_byteish(self):
resp = StreamResponse()
self.loop.run_until_complete(
resp.prepare(self.make_request('GET', '/')))
with self.assertRaises(AssertionError):
resp.write(123)
def test_write_before_start(self):
resp = StreamResponse()
with self.assertRaises(RuntimeError):
resp.write(b'data')
def test_cannot_write_after_eof(self):
resp = StreamResponse()
self.loop.run_until_complete(
resp.prepare(self.make_request('GET', '/')))
resp.write(b'data')
self.writer.drain.return_value = ()
self.loop.run_until_complete(resp.write_eof())
self.writer.write.reset_mock()
with self.assertRaises(RuntimeError):
resp.write(b'next data')
self.assertFalse(self.writer.write.called)
def test_cannot_write_eof_before_headers(self):
resp = StreamResponse()
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(resp.write_eof())
def test_cannot_write_eof_twice(self):
resp = StreamResponse()
self.loop.run_until_complete(
resp.prepare(self.make_request('GET', '/')))
resp.write(b'data')
self.writer.drain.return_value = ()
self.loop.run_until_complete(resp.write_eof())
self.assertTrue(self.writer.write.called)
self.writer.write.reset_mock()
self.loop.run_until_complete(resp.write_eof())
self.assertFalse(self.writer.write.called)
def test_write_returns_drain(self):
resp = StreamResponse()
self.loop.run_until_complete(
resp.prepare(self.make_request('GET', '/')))
self.assertEqual((), resp.write(b'data'))
def test_write_returns_empty_tuple_on_empty_data(self):
resp = StreamResponse()
self.loop.run_until_complete(
resp.prepare(self.make_request('GET', '/')))
self.assertEqual((), resp.write(b''))
def test_force_close(self):
resp = StreamResponse()
self.assertIsNone(resp.keep_alive)
resp.force_close()
self.assertFalse(resp.keep_alive)
def test_response_cookies(self):
resp = StreamResponse()
self.assertEqual(resp.cookies, {})
self.assertEqual(str(resp.cookies), '')
resp.set_cookie('name', 'value')
self.assertEqual(str(resp.cookies), 'Set-Cookie: name=value; Path=/')
resp.set_cookie('name', 'other_value')
self.assertEqual(str(resp.cookies),
'Set-Cookie: name=other_value; Path=/')
resp.cookies['name'] = 'another_other_value'
resp.cookies['name']['max-age'] = 10
self.assertEqual(
str(resp.cookies),
'Set-Cookie: name=another_other_value; Max-Age=10; Path=/')
resp.del_cookie('name')
expected = 'Set-Cookie: name=("")?; Max-Age=0; Path=/'
self.assertRegex(str(resp.cookies), expected)
resp.set_cookie('name', 'value', domain='local.host')
expected = 'Set-Cookie: name=value; Domain=local.host; Path=/'
self.assertEqual(str(resp.cookies), expected)
def test_response_cookie_path(self):
resp = StreamResponse()
self.assertEqual(resp.cookies, {})
resp.set_cookie('name', 'value', path='/some/path')
self.assertEqual(str(resp.cookies),
'Set-Cookie: name=value; Path=/some/path')
resp.set_cookie('name', 'value', expires='123')
self.assertEqual(str(resp.cookies),
'Set-Cookie: name=value; expires=123;'
' Path=/')
resp.set_cookie('name', 'value', domain='example.com',
path='/home', expires='123', max_age='10',
secure=True, httponly=True, version='2.0')
self.assertEqual(str(resp.cookies).lower(),
'set-cookie: name=value; '
'domain=example.com; '
'expires=123; '
'httponly; '
'max-age=10; '
'path=/home; '
'secure; '
'version=2.0')
def test_response_cookie__issue_del_cookie(self):
resp = StreamResponse()
self.assertEqual(resp.cookies, {})
self.assertEqual(str(resp.cookies), '')
resp.del_cookie('name')
expected = 'Set-Cookie: name=("")?; Max-Age=0; Path=/'
self.assertRegex(str(resp.cookies), expected)
def test_cookie_set_after_del(self):
resp = StreamResponse()
resp.del_cookie('name')
resp.set_cookie('name', 'val')
# check for Max-Age dropped
expected = 'Set-Cookie: name=val; Path=/'
self.assertEqual(str(resp.cookies), expected)
def test_set_status_with_reason(self):
resp = StreamResponse()
resp.set_status(200, "Everithing is fine!")
self.assertEqual(200, resp.status)
self.assertEqual("Everithing is fine!", resp.reason)
def test_start_force_close(self):
req = self.make_request('GET', '/')
resp = StreamResponse()
resp.force_close()
self.assertFalse(resp.keep_alive)
msg = self.loop.run_until_complete(resp.prepare(req))
self.assertFalse(resp.keep_alive)
self.assertTrue(msg.closing)
def test___repr__(self):
req = self.make_request('GET', '/path/to')
resp = StreamResponse(reason=301)
self.loop.run_until_complete(resp.prepare(req))
self.assertEqual("<StreamResponse 301 GET /path/to >", repr(resp))
def test___repr__not_started(self):
resp = StreamResponse(reason=301)
self.assertEqual("<StreamResponse 301 not started>", repr(resp))
def test_keep_alive_http10(self):
message = RawRequestMessage('GET', '/', HttpVersion10, CIMultiDict(),
True, False)
req = self.request_from_message(message)
resp = StreamResponse()
self.loop.run_until_complete(resp.prepare(req))
self.assertFalse(resp.keep_alive)
headers = CIMultiDict(Connection='keep-alive')
message = RawRequestMessage('GET', '/', HttpVersion10, headers,
False, False)
req = self.request_from_message(message)
resp = StreamResponse()
self.loop.run_until_complete(resp.prepare(req))
self.assertEqual(resp.keep_alive, True)
def test_keep_alive_http09(self):
headers = CIMultiDict(Connection='keep-alive')
message = RawRequestMessage('GET', '/', HttpVersion(0, 9), headers,
False, False)
req = self.request_from_message(message)
resp = StreamResponse()
self.loop.run_until_complete(resp.prepare(req))
self.assertFalse(resp.keep_alive)
@mock.patch('aiohttp.web_reqrep.ResponseImpl')
def test_start_twice(self, ResponseImpl):
req = self.make_request('GET', '/')
resp = StreamResponse()
with self.assertWarns(DeprecationWarning):
impl1 = resp.start(req)
impl2 = resp.start(req)
self.assertIs(impl1, impl2)
class TestResponse(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def make_request(self, method, path, headers=CIMultiDict()):
self.app = mock.Mock()
message = RawRequestMessage(method, path, HttpVersion11, headers,
False, False)
self.payload = mock.Mock()
self.transport = mock.Mock()
self.reader = mock.Mock()
self.writer = mock.Mock()
req = Request(self.app, message, self.payload,
self.transport, self.reader, self.writer)
return req
def test_ctor(self):
resp = Response()
self.assertEqual(200, resp.status)
self.assertEqual('OK', resp.reason)
self.assertIsNone(resp.body)
self.assertEqual(0, resp.content_length)
self.assertEqual(CIMultiDict([('CONTENT-LENGTH', '0')]),
resp.headers)
def test_ctor_with_headers_and_status(self):
resp = Response(body=b'body', status=201, headers={'Age': '12'})
self.assertEqual(201, resp.status)
self.assertEqual(b'body', resp.body)
self.assertEqual(4, resp.content_length)
self.assertEqual(CIMultiDict(
[('AGE', '12'),
('CONTENT-LENGTH', '4')]), resp.headers)
def test_ctor_content_type(self):
resp = Response(content_type='application/json')
self.assertEqual(200, resp.status)
self.assertEqual('OK', resp.reason)
self.assertEqual(
CIMultiDict(
[('CONTENT-TYPE', 'application/json'),
('CONTENT-LENGTH', '0')]),
resp.headers)
def test_ctor_text_body_combined(self):
with self.assertRaises(ValueError):
Response(body=b'123', text='test text')
def test_ctor_text(self):
resp = Response(text='test text')
self.assertEqual(200, resp.status)
self.assertEqual('OK', resp.reason)
self.assertEqual(
CIMultiDict(
[('CONTENT-TYPE', 'text/plain; charset=utf-8'),
('CONTENT-LENGTH', '9')]),
resp.headers)
self.assertEqual(resp.body, b'test text')
self.assertEqual(resp.text, 'test text')
def test_assign_nonbyteish_body(self):
resp = Response(body=b'data')
with self.assertRaises(TypeError):
resp.body = 123
self.assertEqual(b'data', resp.body)
self.assertEqual(4, resp.content_length)
def test_assign_nonstr_text(self):
resp = Response(text='test')
with self.assertRaises(TypeError):
resp.text = b'123'
self.assertEqual(b'test', resp.body)
self.assertEqual(4, resp.content_length)
def test_send_headers_for_empty_body(self):
req = self.make_request('GET', '/')
resp = Response()
self.writer.drain.return_value = ()
buf = b''
def append(data):
nonlocal buf
buf += data
self.writer.write.side_effect = append
self.loop.run_until_complete(resp.prepare(req))
self.loop.run_until_complete(resp.write_eof())
txt = buf.decode('utf8')
self.assertRegex(txt, 'HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 0\r\n'
'CONNECTION: keep-alive\r\n'
'DATE: .+\r\nSERVER: .+\r\n\r\n')
def test_render_with_body(self):
req = self.make_request('GET', '/')
resp = Response(body=b'data')
self.writer.drain.return_value = ()
buf = b''
def append(data):
nonlocal buf
buf += data
self.writer.write.side_effect = append
self.loop.run_until_complete(resp.prepare(req))
self.loop.run_until_complete(resp.write_eof())
txt = buf.decode('utf8')
self.assertRegex(txt, 'HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 4\r\n'
'CONNECTION: keep-alive\r\n'
'DATE: .+\r\nSERVER: .+\r\n\r\ndata')
def test_send_set_cookie_header(self):
resp = Response()
resp.cookies['name'] = 'value'
req = self.make_request('GET', '/')
self.writer.drain.return_value = ()
buf = b''
def append(data):
nonlocal buf
buf += data
self.writer.write.side_effect = append
self.loop.run_until_complete(resp.prepare(req))
self.loop.run_until_complete(resp.write_eof())
txt = buf.decode('utf8')
self.assertRegex(txt, 'HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 0\r\n'
'SET-COOKIE: name=value\r\n'
'CONNECTION: keep-alive\r\n'
'DATE: .+\r\nSERVER: .+\r\n\r\n')
def test_set_text_with_content_type(self):
resp = Response()
resp.content_type = "text/html"
resp.text = "text"
self.assertEqual("text", resp.text)
self.assertEqual(b"text", resp.body)
self.assertEqual("text/html", resp.content_type)
def test_set_text_with_charset(self):
resp = Response()
resp.content_type = 'text/plain'
resp.charset = "KOI8-R"
resp.text = "текст"
self.assertEqual("текст", resp.text)
self.assertEqual("текст".encode('koi8-r'), resp.body)
self.assertEqual("koi8-r", resp.charset)
def test_started_when_not_started(self):
resp = StreamResponse()
self.assertFalse(resp.prepared)
def test_started_when_started(self):
resp = StreamResponse()
self.loop.run_until_complete(
resp.prepare(self.make_request('GET', '/')))
self.assertTrue(resp.prepared)
def test_drain_before_start(self):
@asyncio.coroutine
def go():
resp = StreamResponse()
with self.assertRaises(RuntimeError):
yield from resp.drain()
self.loop.run_until_complete(go())
def test_nonstr_text_in_ctor(self):
with self.assertRaises(TypeError):
Response(text=b'data')
def test_text_in_ctor_with_content_type(self):
resp = Response(text='data', content_type='text/html')
self.assertEqual('data', resp.text)
self.assertEqual('text/html', resp.content_type)
def test_text_in_ctor_with_content_type_header(self):
resp = Response(text='текст',
headers={'Content-Type': 'text/html; charset=koi8-r'})
self.assertEqual('текст'.encode('koi8-r'), resp.body)
self.assertEqual('text/html', resp.content_type)
self.assertEqual('koi8-r', resp.charset)
def test_text_with_empty_payload(self):
resp = Response(status=200)
self.assertEqual(resp.body, None)
self.assertEqual(resp.text, None)
| python |
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
Base.__table_args__ = {
"mysql_charset": "utf8",
"mysql_collate": "utf8_general_ci",
}
| python |
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import *
from tensorflow.keras.preprocessing import sequence
from tf2bert.text.tokenizers import Tokenizer
from tf2bert.text.labels import TaggingTokenizer
from tf2bert.text.labels import find_entities_chunking
import dataset
def batch_pad(X, maxlen=None, dtype="int32"):
if maxlen is None:
maxlen = max([len(i) for i in X])
X = sequence.pad_sequences(
X,
maxlen=maxlen,
dtype=dtype,
padding="post",
truncating="post",
value=0
)
return X
def convert(X, y):
"""转换为这种形式[text, (start, end, label), (start, end, label), ...],
其中text[start:end]是实体且类型为label。
"""
data = []
for text, tags in zip(X, y):
sample = []
sample.append(text)
for label, start, end in find_entities_chunking(tags):
sample.append((start, end, label))
data.append(sample)
return data
def load_data(file="train"):
X, y = dataset.load_china_people_daily(file)
return convert(X, y)
train_data = load_data("train")
valid_data = load_data("dev")
test_data = load_data("test")
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, data, batch_size):
pass
class NamedEntityRecognizer:
def __init__(self, tagger, batch_size=32):
self.tagger = tagger
self.batch_size = batch_size # 批量大小
def predict(self, texts):
"""如果输入大于一个样本,则做batch预测"""
if isinstance(texts, list):
return self._predict_batch(texts)
return self._predict_one(texts)
def preprocess(self, text):
tokens = tokenizer.tokenize(text, maxlen=512)
mapping = tokenizer.rematch(text, tokens)
token_ids = tokenizer.tokens_to_ids(tokens)
segment_ids = [0] * len(token_ids)
return mapping, token_ids, segment_ids
def convert(self, text):
pass
def decode_tags(self, mapping):
pass
def _predict_one(self, text):
mapping, token_ids, segment_ids = self.preprocess(text)
length = len(token_ids)
token_ids = batch_pad(token_ids)
segment_ids = batch_pad(segment_ids)
label_ids = model.predict([token_ids, segment_ids])[0]
labels = self.tagger.decode(label_ids)
entities = []
for label, start, end in find_entities_chunking(labels):
entities.append((start, end, label))
# TODO mapping
def _predict_batch(self, texts):
pass
class Evaluator(tf.keras.callbacks.Callback):
def __init__(self, ner, valid_data=None, test_data=None):
self.ner = ner # 实体识别器
self.valid_data = valid_data
self.test_data = test_data
self.best_valid_f1 = 0.0
self.best_test_f1 = 0.0
def evaluate(self, data):
texts = [sample[0] for sample in data]
y_true = [set([tuple(i) for i in sample[1:]]) for sample in data]
y_pred = [set(i) for i in self.ner.predict(texts)]
X = Y = Z = 1e-10
for R, T in zip(y_pred, y_true):
X += len(R & T)
Y += len(R)
Z += len(T)
precision = X / Y
recall = X / Z
f1 = 2 * X / (Y + Z)
return precision, recall, f1
def on_epoch_end(self, epoch, logs=None):
template = "precision:{:.5f}, recall:{:.5f}, f1:{:.5f}, best f1:{:.5f}"
if self.valid_data is not None:
precision, recall, f1 = self.evaluate(self.valid_data)
if f1 >= self.best_valid_f1:
self.best_valid_f1 = f1
self.model.save_weights("best_model.weights")
print("valid:", template.format(precision, recall, f1, self.best_valid_f1))
if self.test_data is not None:
precision, recall, f1 = self.evaluate(self.test_data)
if f1 >= self.best_test_f1:
self.best_test_f1 = f1
print("test:", template.format(precision, recall, f1, self.best_test_f1))
maxlen = 128
vocab_size = 0
hdims = 256
inputs = Input(shape=(maxlen,))
x = Embedding(input_dim=vocab_size, output_dim=hdims, mask_zero=True)(inputs)
x = Dropout(0.1)(x)
x = LayerNormalization()(x)
x = Bidirectional(LSTM(hdims, return_sequences=True), merge_mode="concat")(x)
x = Dense(num_classes)(x)
crf = CRF(
lr_multiplier=1,
trans_initializer="glorot_normal",
trainable=True
)
outputs = crf(x)
base = Model(inputs=inputs, outputs=outputs)
model = CRFModel(base)
model.summary()
model.compile(optimizer="adam")
if __name__ == "__main__":
X, y = dataset.load_china_people_daily("train")
data = convert(X, y)
for i in data:
print(i)
input()
| python |
"""
Tester.
"""
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
from grizzled.misc import ReadOnly, ReadOnlyObjectError
import pytest
class Something(object):
def __init__(self, a=1, b=2):
self.a = a
self.b = b
@pytest.fixture
def readonly_something():
something = Something(10, 20)
assert something.a == 10
assert something.b == 20
something.a += 1
assert something.a == 11
return ReadOnly(something)
def test_class_attr(readonly_something):
assert readonly_something.__class__ is Something
def test_is_instance(readonly_something):
assert isinstance(readonly_something, Something)
def test_access_1(readonly_something):
with pytest.raises(ReadOnlyObjectError):
readonly_something.a += 1
def test_access_2(readonly_something):
with pytest.raises(ReadOnlyObjectError):
readonly_something.a = 200
| python |
import configparser
import os
from discord.ext import commands
import requests
COMMAND_PREFIX = '!track '
ACGN_LIST_HELP = 'Lists all tracked acgn data.'
ACGN_SEARCH_HELP = '''
Searches acgns in the database.
Lists acgns with title that (partially) matches <title>.
Args:
title: A string.
'''
ACGN_ADD_HELP = '''
Adds an acgn in the database.
Args:
title: A string.
final_episode: Number of final episode.
'''
ACGN_UPDATE_HELP = '''
Updates an acgn in the database.
Updates <final_episode> of <acgn_id>.
Args:
acgn_id: A MongoDB ObjectId.
final_episode: Number of final episode.
'''
PROGRESS_LIST_ALL_HELP = 'Lists all tracked progress data.'
PROGRESS_LIST_HELP = 'Lists tracked progress data for you.'
PROGRESS_ADD_HELP = '''
Adds a progress for you in the database.
Adds a progress of <acgn_id> for you.
You cannot add a progress for another user.
Args:
acgn_id: A MongoDB ObjectId.
episode: Number of the episode.
'''
PROGRESS_UPDATE_HELP = '''
Updates your progress in the database.
Updates <episode> of your progress for <acgn_id>.
Args:
acgn_id: A MongoDB ObjectId.
episode: Number of the episode.
'''
env = 'TEST'
# PROD or TEST
config = configparser.ConfigParser()
config.read(os.path.abspath(os.path.join(".ini")))
service_url = config[env]['SERVICE_URL']
bot = commands.Bot(command_prefix=COMMAND_PREFIX)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, (commands.MissingRequiredArgument,
commands.TooManyArguments)):
await ctx.send('Bad arguments.')
await ctx.send_help(ctx.command)
@bot.group(name='acgn')
async def acgn_commands(ctx):
if ctx.invoked_subcommand is None:
if ctx.subcommand_passed is None:
await no_subcommand_provided(ctx)
else:
await command_not_found(ctx)
@acgn_commands.command(name='list',
ignore_extra=False,
help=ACGN_LIST_HELP)
async def acgn_list(ctx):
url = service_url + '/acgns'
response = requests.get(url=url)
if response.status_code == 400:
await bad_request(ctx, response)
return
if response.status_code != 200:
await backend_error(ctx, response)
return
data = response.json()
await send_acgns_message(ctx, data)
@acgn_commands.command(name='search',
ignore_extra=False,
help=ACGN_SEARCH_HELP)
async def acgn_search(ctx, title):
url = service_url + '/acgns'
params = {
'title': title
}
response = requests.get(url=url, params=params)
if response.status_code == 400:
await bad_request(ctx, response)
return
if response.status_code != 200:
await backend_error(ctx, response)
return
data = response.json()
await send_acgns_message(ctx, data)
@acgn_commands.command(name='add',
ignore_extra=False,
help=ACGN_ADD_HELP)
async def acgn_add(ctx, title, final_episode):
url = service_url + '/acgns'
data = {
'title': title,
'final_episode': str(final_episode)
}
response = requests.post(url=url, data=data)
if response.status_code == 400:
await bad_request(ctx, response)
return
if response.status_code != 200:
await backend_error(ctx, response)
return
await ctx.send('Add Success.')
@acgn_commands.command(name='update',
ignore_extra=False,
help=ACGN_UPDATE_HELP)
async def acgn_update(ctx, acgn_id, final_episode):
url = service_url + '/acgns/' + str(acgn_id)
data = {
'final_episode': str(final_episode)
}
response = requests.put(url=url, data=data)
if response.status_code == 400:
await bad_request(ctx, response)
return
if response.status_code != 200:
await backend_error(ctx, response)
return
await ctx.send('Update Success.')
async def user_search(ctx):
url = service_url + '/users'
params = {
'discord_id': ctx.author.id
}
response = requests.get(url=url, params=params)
if response.status_code == 400:
await bad_request(ctx, response)
return None, -1
if response.status_code != 200:
await backend_error(ctx, response)
return None, -1
return response.json(), 0
async def user_add(ctx):
data = {
'discord_id': ctx.author.id,
'discord_username': ctx.author.name
}
url = service_url + '/users'
response = requests.post(url=url, data=data)
if response.status_code == 400:
await bad_request(ctx, response)
return None, -1
if response.status_code != 200:
await backend_error(ctx, response)
return None, -1
return response.json(), 0
async def user_get_id(ctx):
# Find user_id for author
user, status = await user_search(ctx)
if status < 0:
return None, -1
if user is None:
# if user not in database, create entry for them
user, status = await user_add(ctx)
if status < 0:
return None, -1
return user.get('_id'), 0
@bot.group(name='progress')
async def progress_commands(ctx):
if ctx.invoked_subcommand is None:
if ctx.subcommand_passed is None:
await no_subcommand_provided(ctx)
else:
await command_not_found(ctx)
@progress_commands.command(name='list-all',
ignore_extra=False,
help=PROGRESS_LIST_ALL_HELP)
async def progress_list_all(ctx):
url = service_url + '/progresses'
response = requests.get(url=url)
if response.status_code == 400:
await bad_request(ctx, response)
return
if response.status_code != 200:
await backend_error(ctx, response)
return
data = response.json()
await send_progresses_message(ctx, data)
@progress_commands.command(name='list',
ignore_extra=False,
help=PROGRESS_LIST_HELP)
async def progress_list_by_user(ctx):
user_id, status = await user_get_id(ctx)
if status < 0:
return
# Find progresses for user_id
url = service_url + '/users/' + str(user_id) + '/progresses'
response = requests.get(url=url)
if response.status_code == 400:
await bad_request(ctx, response)
return
if response.status_code != 200:
await backend_error(ctx, response)
return
data = response.json()
await send_progresses_message(ctx, data)
@progress_commands.command(name='add',
ignore_extra=False,
help=PROGRESS_ADD_HELP)
async def progress_add(ctx, acgn_id, episode):
user_id, status = await user_get_id(ctx)
if status < 0:
return
url = service_url + '/progresses'
data = {
'user_id': user_id,
'acgn_id': acgn_id,
'episode': str(episode)
}
response = requests.post(url=url, data=data)
if response.status_code == 400:
await bad_request(ctx, response)
return
if response.status_code != 200:
await backend_error(ctx, response)
return
await ctx.send('Add Success.')
async def progress_find_id(ctx, acgn_id):
# Find progress_id using user_id and acgn_id
# Unlike user_get_id, doesn't automatically insert a record if not found
user_id, status = await user_get_id(ctx)
if status < 0:
return None, -1
url = service_url + '/users/' + str(user_id) + '/progresses'
params = {
'acgn_id': acgn_id
}
response = requests.get(url=url, params=params)
if response.status_code == 400:
await bad_request(ctx, response)
return None, -1
if response.status_code != 200:
await backend_error(ctx, response)
return None, -1
data = response.json()
# it should only contain one result
return data[0].get('_id'), 0
@progress_commands.command(name='update',
ignore_extra=False,
help=PROGRESS_UPDATE_HELP)
async def progress_update(ctx, acgn_id, episode):
progress_id, status = await progress_find_id(ctx, acgn_id)
if status < 0:
return
url = service_url + '/progresses/' + str(progress_id)
data = {
'episode': episode
}
response = requests.put(url=url, data=data)
if response.status_code == 400:
await bad_request(ctx, response)
return
if response.status_code != 200:
await backend_error(ctx, response)
return
await ctx.send('Update Success.')
def header_message(msg):
len_of_msg = len(msg)
return msg + '\n' + ('-' * len_of_msg)
async def send_block_message(ctx, msgs):
block_msg = '```\n'
if isinstance(msgs, list):
for msg in msgs:
block_msg += msg + '\n'
else:
block_msg += msgs + '\n'
block_msg += '```'
await ctx.send(block_msg)
async def send_acgns_message(ctx, data):
msgs = [f'There are {len(data)} results.\n']
if len(data) != 0:
header = header_message('AcgnId: Title (Final Episode)')
msgs.append(header)
for acgn in data:
msg = (f'{acgn.get("_id")}: {acgn.get("title")} '
f'({acgn.get("final_episode")})')
msgs.append(msg)
await send_block_message(ctx, msgs)
async def send_progresses_message(ctx, data):
msgs = [f'There are {len(data)} results.\n']
if len(data) != 0:
header = header_message('ProgressId: [UserId] AcgnId (Episode)')
msgs.append(header)
for progress in data:
msg = (f'{progress.get("_id")}: [{progress.get("user_id")}] '
f'{progress.get("acgn_id")} ({progress.get("episode")})')
msgs.append(msg)
await send_block_message(ctx, msgs)
async def backend_error(ctx, response):
await ctx.send('Internal Service Error')
message = response.json().get('message')
if message:
await ctx.send(message)
async def bad_request(ctx, response):
await ctx.send('Bad Request')
message = response.json().get('message')
if message:
await ctx.send(message)
async def no_subcommand_provided(ctx):
msg = (f'A subcommand is needed. You can type `{COMMAND_PREFIX}'
f'help {ctx.command}` for more info')
await ctx.send(msg)
async def command_not_found(ctx):
await ctx.send(f'No command called \"{ctx.subcommand_passed}\" found.')
| python |
# coding: utf-8
import magic
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.html import escape, format_html
from django.utils.translation import ugettext_lazy as _
from trojsten.submit import constants
from trojsten.submit.helpers import get_description_file_path, write_chunks_to_file
from trojsten.submit.models import Submit
class SourceSubmitForm(forms.Form):
LANGUAGE_CHOICES = (
(".", "Zisti podľa prípony"),
(".cc", "C++ (.cpp/.cc)"),
(".pas", "Pascal (.pas/.dpr)"),
(".c", "C (.c)"),
(".py", "Python 3.4 (.py/.py3)"),
(".hs", "Haskell (.hs)"),
(".cs", "C# (.cs)"),
(".java", "Java (.java)"),
)
submit_file = forms.FileField(
max_length=settings.UPLOADED_FILENAME_MAXLENGTH, allow_empty_file=True
)
language = forms.ChoiceField(label="Jazyk", choices=LANGUAGE_CHOICES)
class DescriptionSubmitForm(forms.Form):
submit_file = forms.FileField(
max_length=settings.UPLOADED_FILENAME_MAXLENGTH, allow_empty_file=True
)
def clean_submit_file(self):
sfile = self.cleaned_data["submit_file"]
mimetype = magic.from_buffer(self.cleaned_data["submit_file"].read(2048), mime=True)
if mimetype not in settings.SUBMIT_DESCRIPTION_ALLOWED_MIMETYPES:
raise forms.ValidationError(
format_html(
"Zaslaný súbor má nepodporovaný formát: {mimetype}<br />"
"Podporované sú súbory {allowed}",
mimetype=escape(mimetype),
allowed=escape(" ".join(settings.SUBMIT_DESCRIPTION_ALLOWED_EXTENSIONS)),
)
)
return sfile
class TestableZipSubmitForm(forms.Form):
submit_file = forms.FileField(
max_length=settings.UPLOADED_FILENAME_MAXLENGTH, allow_empty_file=True
)
def clean_submit_file(self):
sfile = self.cleaned_data["submit_file"]
if sfile:
if sfile.name.split(".")[-1].lower() != "zip":
raise forms.ValidationError("Zaslaný súbor nemá koncovku .zip")
else:
raise forms.ValidationError("Chýba súbor")
class SubmitAdminForm(forms.ModelForm):
submit_file = forms.FileField(
max_length=settings.UPLOADED_FILENAME_MAXLENGTH,
allow_empty_file=True,
label=_("Submit file"),
help_text=_("Here you can upload a file with submit description"),
required=False,
)
def clean(self):
cleaned_data = super(SubmitAdminForm, self).clean()
if (
cleaned_data["submit_file"]
and cleaned_data["submit_type"] != constants.SUBMIT_TYPE_DESCRIPTION
):
raise ValidationError(
_("You can attach a submit file only to descriptions."), code="invalid"
)
return cleaned_data
def save(self, commit=True):
submit = super(SubmitAdminForm, self).save(commit)
file = self.cleaned_data.get("submit_file")
if file:
user = self.cleaned_data.get("user")
task = self.cleaned_data.get("task")
sfiletarget = get_description_file_path(file, user, task)
write_chunks_to_file(sfiletarget, file.chunks())
submit.filepath = sfiletarget
if commit:
submit.save()
return submit
class Meta:
model = Submit
fields = "__all__"
class TextSubmitForm(forms.Form):
submitted_text = forms.CharField(label="Riešenie:", max_length=512)
| python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RAnnotationdbi(RPackage):
"""Manipulation of SQLite-based annotations in Bioconductor.
Implements a user-friendly interface for querying SQLite-based
annotation data packages."""
bioc = "AnnotationDbi"
version('1.56.2', commit='13fdc4a93852199ca6ec120a2fe1078f9f445f67')
version('1.52.0', commit='c4e0ca9bd65362ae9cad6a98d90f54267b0ae838')
version('1.46.1', commit='ff260913741d0fcf9487eeb1f44a6c6968ced5b9')
version('1.44.0', commit='ce191b08cfd612d014431325c26c91b11c5f13ac')
version('1.42.1', commit='71085b47ea2e1ef929bebe8b17eb8e8a573f98e3')
version('1.40.0', commit='e34dff07e10402eecbf95604a512bc1fc4edb127')
version('1.38.2', commit='67d46facba8c15fa5f0eb47c4e39b53dbdc67c36')
depends_on('r@2.7.0:', type=('build', 'run'))
depends_on('r-biocgenerics@0.15.10:', type=('build', 'run'))
depends_on('r-biocgenerics@0.23.1:', type=('build', 'run'), when='@1.40.0:')
depends_on('r-biocgenerics@0.29.2:', type=('build', 'run'), when='@1.46.1:')
depends_on('r-biobase@1.17.0:', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-dbi', type=('build', 'run'))
depends_on('r-rsqlite', type=('build', 'run'))
depends_on('r-s4vectors@0.9.25:', type=('build', 'run'))
depends_on('r-keggrest', type=('build', 'run'), when='@1.56.2:')
| python |
var = 5
a = f"Test: {var:d}" # cool formatting!
| python |
# -*- coding: utf-8 -*-
"""Dialogo para selecionar pastas."""
from os import listdir
from pathlib import Path
import gi
gi.require_version(namespace='Gtk', version='3.0')
from gi.repository import Gtk
class MainWindow(Gtk.ApplicationWindow):
def __init__(self):
super().__init__()
self.set_title(title='Dialogo para selecionar pastas')
self.set_default_size(width=1366 / 2, height=768 / 2)
self.set_position(position=Gtk.WindowPosition.CENTER)
self.set_default_icon_from_file(filename='../assets/icons/icon.png')
self.set_border_width(border_width=10)
vbox = Gtk.Box.new(orientation=Gtk.Orientation.VERTICAL, spacing=10)
self.add(widget=vbox)
button_select_folder = Gtk.Button.new_with_label(label='Selecionar pasta')
button_select_folder.connect("clicked", self.open_select_folder)
vbox.add(widget=button_select_folder)
button_select_folders = Gtk.Button.new_with_label(label='Selecionar pastas')
button_select_folders.connect("clicked", self.open_select_folders)
vbox.add(widget=button_select_folders)
def open_select_folder(self, button):
dialog = Gtk.FileChooserDialog(
name='selecionar-pasta',
title='Selecionar Pasta',
parent=self,
modal=True,
action=Gtk.FileChooserAction.SELECT_FOLDER,
)
# Adicionando os botões que serão exibidos
dialog.add_buttons(
Gtk.STOCK_OPEN, Gtk.ResponseType.OK,
Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL
)
# Definindo o diretório padrão.
home = str(Path.home())
dialog.set_current_folder(filename=home)
# Executando o dialogo e recebendo a resposta.
response = dialog.run()
# Verificando a resposta recebida.
if response == Gtk.ResponseType.OK:
print('Botão ABRIR pressionado')
print(f'Caminho até a pasta: {dialog.get_filename()}')
print(f'URI até a pasta: {dialog.get_uri()}')
folder = dialog.get_filename()
print(f'Conteudo da pasta {folder}:\n {listdir(folder)}')
# Fechando o diálogo.
dialog.destroy()
def open_select_folders(self, button):
dialog = Gtk.FileChooserDialog(
name='selecionar-pastas',
title='Selecionar Pastas',
parent=self,
modal=True,
action=Gtk.FileChooserAction.SELECT_FOLDER,
)
dialog.add_buttons(
Gtk.STOCK_OPEN, Gtk.ResponseType.OK,
Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL
)
# Definindo a seleção múltipla.
dialog.set_select_multiple(select_multiple=True)
home = str(Path.home())
dialog.set_current_folder(filename=home)
response = dialog.run()
if response == Gtk.ResponseType.OK:
print('Botão ABRIR pressionado')
print(f'Caminho até a pasta: {dialog.get_filenames()}')
print(f'URI até a pasta: {dialog.get_uris()}')
folders = dialog.get_filenames()
for folder in folders:
print(f'Conteudo da pasta {folder}:\n {listdir(folder)}\n')
dialog.destroy()
if __name__ == '__main__':
win = MainWindow()
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
| python |
# Taku Ito
# 2/22/2019
# General function modules for SRActFlow
# For group-level/cross-subject analyses
import numpy as np
import multiprocessing as mp
import scipy.stats as stats
import nibabel as nib
import statsmodels.api as sm
import sklearn
import h5py
import os
os.sys.path.append('glmScripts/')
import taskGLMPipeline_v2 as tgp
import sys
sys.path.append('utils/')
import loadExperimentalData as led
import tools
projectdir = '/home/ti61/f_mc1689_1/SRActFlow/'
glasserfile2 = projectdir + 'data/Q1-Q6_RelatedParcellation210.LR.CorticalAreas_dil_Colors.32k_fs_RL.dlabel.nii'
glasser2 = nib.load(glasserfile2).get_data()
glasser2 = np.squeeze(glasser2)
subjNums = ['013','014','016','017','018','021','023','024','026','027','028','030','031','032','033',
'034','035','037','038','039','040','041','042','043','045','046','047','048','049','050',
'053','055','056','057','058','062','063','066','067','068','069','070','072','074','075',
'076','077','081','085','086','087','088','090','092','093','094','095','097','098','099',
'101','102','103','104','105','106','108','109','110','111','112','114','115','117','119',
'120','121','122','123','124','125','126','127','128','129','130','131','132','134','135',
'136','137','138','139','140','141']
###############################################
# Begin script
#### Load original data
print('Load original motor response data')
nResponses = 2
data_task_rh = np.zeros((len(glasser2),nResponses,len(subjNums)))
data_task_lh = np.zeros((len(glasser2),nResponses,len(subjNums)))
scount = 0
for subj in subjNums:
data_task_rh[:,:,scount] = np.real(tools.loadMotorResponses(subj,hand='Right'))
data_task_lh[:,:,scount] = np.real(tools.loadMotorResponses(subj,hand='Left'))
scount += 1
####
# Isolate RH and LH vertices for motor response betas
tmp = np.squeeze(nib.load(projectdir + 'data/results/MAIN/MotorRegionsMasksPerSubj/sractflow_smn_outputRH_mask.dscalar.nii').get_data())
rh_ind = np.where(tmp==True)[0]
realdata_rh = data_task_rh[rh_ind,:,:].copy()
tmp = np.squeeze(nib.load(projectdir + 'data/results/MAIN/MotorRegionsMasksPerSubj/sractflow_smn_outputLH_mask.dscalar.nii').get_data())
lh_ind = np.where(tmp==True)[0]
realdata_lh = data_task_lh[lh_ind,:,:].copy()
h5f = h5py.File(projectdir + 'data/results/MAIN/MotorResponseBetas_OutputVertices.h5','a')
h5f.create_dataset('RH',data=realdata_rh)
h5f.create_dataset('LH',data=realdata_lh)
h5f.close()
| python |
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from finn.util.basic import launch_process_helper, which
def out_of_context_synth(
verilog_dir,
top_name,
fpga_part="xczu3eg-sbva484-1-e",
clk_name="ap_clk_0",
clk_period_ns=5.0,
):
"Run out-of-context Vivado synthesis, return resources and slack."
# ensure that the OH_MY_XILINX envvar is set
if "OHMYXILINX" not in os.environ:
raise Exception("The environment variable OHMYXILINX is not defined.")
# ensure that vivado is in PATH: source $VIVADO_PATH/settings64.sh
if which("vivado") is None:
raise Exception("vivado is not in PATH, ensure settings64.sh is sourced.")
omx_path = os.environ["OHMYXILINX"]
script = "vivadocompile.sh"
# vivadocompile.sh <top-level-entity> <clock-name (optional)> <fpga-part (optional)>
call_omx = "zsh %s/%s %s %s %s %f" % (
omx_path,
script,
top_name,
clk_name,
fpga_part,
float(clk_period_ns),
)
call_omx = call_omx.split()
launch_process_helper(call_omx, proc_env=os.environ.copy(), cwd=verilog_dir)
vivado_proj_folder = "%s/results_%s" % (verilog_dir, top_name)
res_counts_path = vivado_proj_folder + "/res.txt"
with open(res_counts_path, "r") as myfile:
res_data = myfile.read().split("\n")
ret = {}
ret["vivado_proj_folder"] = vivado_proj_folder
for res_line in res_data:
res_fields = res_line.split("=")
print(res_fields)
try:
ret[res_fields[0]] = float(res_fields[1])
except ValueError:
ret[res_fields[0]] = 0
except IndexError:
ret[res_fields[0]] = 0
if ret["WNS"] == 0:
ret["fmax_mhz"] = 0
else:
ret["fmax_mhz"] = 1000.0 / (clk_period_ns - ret["WNS"])
return ret
| python |
# Create an Excel file and save data from
# show version command using pandas
# (Implicitly uses xlsxwriter to create the Excel file)
import pandas as pd
from pandas import ExcelWriter
from netmiko import ConnectHandler
# Devices to SSH into
devices = [
{
"device_type": "cisco_ios",
"ip": "sandbox-iosxe-latest-1.cisco.com",
"username": "developer",
"password": "C1sco12345",
"port": 22,
"fast_cli": False,
},
{
"device_type": "cisco_ios",
"ip": "sandbox-iosxe-recomm-1.cisco.com",
"username": "developer",
"password": "C1sco12345",
"port": 22,
"fast_cli": False,
},
]
# Create an empty list to hold all dicts
output = []
# Name of exported excel file
excel_file = "Example4-3-Inventory-Details-pandas.xlsx"
with ExcelWriter(path=excel_file) as writer:
# Loop over all devices
for device in devices:
# Create a connection instance to each device
with ConnectHandler(**device) as net_connect:
facts = net_connect.send_command(
command_string="show version", use_textfsm=True
)
# Append the show command output to the `output` empty list
output.append(facts[0])
# Create a data frame from the ouput list
df = (
pd.DataFrame(data=output)
.reindex( # to reorder the columns
columns=[
"hostname",
"serial",
"mac",
"hardware",
"rommon",
"version",
"running_image",
"reload_reason",
"uptime",
"restarted",
"config_register",
]
)
.rename( # Rename the columns header
columns={
"hostname": "Device Hostname",
"serial": "Serial Number",
"mac": "MAC Address",
"hardware": "Device Model",
"rommon": "SW Type",
"version": "SW Version",
"running_image": "Running Image",
"reload_reason": "Last Reload Reason",
"uptime": "Uptime",
"restarted": "Restarted at",
"config_register": "Config Register",
}
)
)
# Export data to an Excel file using to_excel from Pandas
df.to_excel(
excel_writer=writer, # name of Excel file
index=False, # remove automatically generated first index column
sheet_name="Device List using Pandas",
verbose=True, # show verbose output for errors
freeze_panes=(1, 1), # freeze top row & most left column
engine="xlsxwriter", # the engine to create the Excel file
)
print("Done")
| python |
from pathlib import Path
from jina.peapods import Pod
import pytest
from fastapi import UploadFile
from jina.flow import Flow
from jina.enums import PodRoleType
from jina.peapods.pods import BasePod
from jina.parsers import set_pea_parser, set_pod_parser
from jinad.models import SinglePodModel
from jinad.store import InMemoryPeaStore, InMemoryPodStore, InMemoryFlowStore
cur_dir = Path(__file__).parent
def pod_list():
return [SinglePodModel(pod_role=PodRoleType.POD)]
def flow_file_str():
with open(str(cur_dir / 'yaml' / 'flow.yml'), 'r') as f:
config_str = f.read()
return config_str
@pytest.mark.parametrize('config', [flow_file_str(), pod_list()])
def test_flow_store(config):
store = InMemoryFlowStore()
with store._session():
flow_id, _, _ = store._create(config=config)
assert flow_id in store._store.keys()
assert isinstance(store._store[flow_id]['flow'], Flow)
store._delete(flow_id)
assert flow_id not in store._store.keys()
def test_flow_store_with_files(tmpdir):
config = flow_file_str()
file_yml = UploadFile(Path(tmpdir) / 'file1.yml')
file_py = UploadFile(Path(tmpdir) / 'file1.py')
files = [file_yml, file_py]
store = InMemoryFlowStore()
with store._session():
flow_id, _, _ = store._create(config=config, files=files)
assert Path(file_yml.filename).exists()
assert Path(file_py.filename).exists()
assert flow_id in store._store.keys()
assert isinstance(store._store[flow_id]['flow'], Flow)
store._delete(flow_id)
assert flow_id not in store._store.keys()
assert not Path(file_yml.filename).exists()
assert not Path(file_py.filename).exists()
def test_pod_store():
args = set_pod_parser().parse_args([])
store = InMemoryPodStore()
with store._session():
pod_id = store._create(pod_arguments=args)
assert pod_id in store._store.keys()
assert isinstance(store._store[pod_id]['pod'], BasePod)
store._delete(pod_id)
assert pod_id not in store._store.keys()
def test_pea_store():
args = set_pea_parser().parse_args([])
store = InMemoryPeaStore()
with store._session():
pea_id = store._create(pea_arguments=args)
assert pea_id in store._store.keys()
# assert isinstance(store._store[pea_id]['pea'], LocalRuntime)
store._delete(pea_id)
assert pea_id not in store._store.keys()
| python |
import sys
sys.path.append('../')
import TankModel as TM
import pylab as pl
import pandas as pd
pl.style.use('seaborn')
import numpy as np
def main():
data = pd.read_csv('../sample_data/tank_sample_data.csv')
rf = data['Pr'].values
et = data['ET'].values
obsQ = data['Q'].values
area = 2000
delTime = 24
# calibrate & save the prameters
# param = TM.calibrate(rf,et,area,delTime,obsQ)
# np.savez('model_data.npz',param=param)
param = np.load('model_data.npz')['param']
simQ = TM.tankDischarge(rf,et,param,area,delTime)
pl.plot(simQ,label='sim')
pl.plot(obsQ,label='obs')
pl.legend()
pl.show()
if __name__ == '__main__':
main() | python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-11-30 20:34
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
db_alias = schema_editor.connection.alias
Message = apps.get_model("mailing", "Message")
MessageAuthor = apps.get_model("mailing", "MessageAuthor")
for message in Message.objects.using(db_alias).all():
message.new_author, _ = MessageAuthor.objects.using(db_alias).get_or_create(author_type=message.author_type, author_id=message.author_id)
message.save()
def backward(apps, schema_editor):
db_alias = schema_editor.connection.alias
Message = apps.get_model("mailing", "Message")
ContentType = apps.get_model("contenttypes", "ContentType")
for message in Message.objects.using(db_alias).all():
author_type = message.new_author.author_type
message.author_type = message.new_author.author_type
message.author_id = message.new_author.author_id
AuthorType = apps.get_model(author_type.app_label, author_type.model)
author = AuthorType.objects.get(pk=message.author_id)
if author_type.model == 'conference':
message.from_email = author.contact_email
else:
message.from_email = author.email
message.save()
class Migration(migrations.Migration):
dependencies = [
('mailing', '0003_auto_20171129_2155'),
]
operations = [
migrations.RunPython(forward, backward),
]
| python |
# Generated by Django 3.1.4 on 2020-12-12 22:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20201212_2213'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='file',
field=models.FileField(blank=True, upload_to='uploads/'),
),
]
| python |
for _ in range(int(input())):
k, n = int(input()), int(input())
c = [list(range(1, n+1))]
for row in range(1, k+1):
c.append([sum(c[row-1][:column]) for column in range(1, n+1)])
print(c[k][n-1])
| python |
def get_add(n):
def add(x):
return x + n
return add
myadd = get_add(1)
assert 2 == myadd(1)
def foo():
x = 1
def bar(y):
def baz():
z = 1
return x + y + z
return baz
return bar(1)
assert 3 == foo()()
def change():
x = 1
def bar():
assert x == 2
x = 2
bar()
change()
print("ok")
| python |
def recurse(s, t, i, j, s1, t1):
# print(i, s[i], j, t[j], s1, t1)
if i == len(s) and j == len(t):
print(''.join(s1))
print(''.join(t1))
print()
return
if i < len(s):
recurse(s, t, i+1, j, s1 + [s[i]], t1 + ['-'])
if j < len(t):
recurse(s, t, i, j+1, s1 + ['-'], t1 + [t[j]])
if i < len(s) and j < len(t):
recurse(s, t, i+1, j+1, s1 + [s[i]], t1 + [t[j]])
s = "SMILES"
t = "ILEAC"
recurse(s, t, 0, 0, [], [])
| python |
import wx
from views.views_manager import *
class MainApp(wx.App):
def __init__(self):
wx.App.__init__(self)
# Initial the main window
self.views_manager = ViewsManager()
self.views_manager.main_window.Show()
self.main_window = self.views_manager.get_window("MainWindow")
self.SetTopWindow(self.main_window)
def OnInit(self):
return True
if __name__ == "__main__":
app = MainApp()
# Loop
app.MainLoop()
| python |
"""A module for testing Genomic Duplication Tokenization."""
import unittest
from variation.tokenizers import GenomicDuplication
from .tokenizer_base import TokenizerBase
class TestGenomicDuplicationTokenizer(TokenizerBase, unittest.TestCase):
"""A class for testing Genomic Duplication Tokenization."""
def tokenizer_instance(self):
"""Return Genomic Duplication instance."""
return GenomicDuplication()
def token_type(self):
"""Return genomic duplication token type."""
return "GenomicDuplication"
def fixture_name(self):
"""Return the fixture name for Genomic Duplication."""
return "genomic_duplication"
| python |
from django.conf import settings
from django.shortcuts import redirect
from django.urls import resolve
class DevToolsLoginRequiredMiddleware:
def __init__(self, get_response):
self.get_response = get_response
assert settings.APP_ENV in ("local", "test", "dev")
def __call__(self, request):
assert hasattr(request, "user")
if (
not request.user.is_authenticated
and resolve(request.path).app_name != "dev_tools"
):
return redirect(settings.LOGIN_URL)
response = self.get_response(request)
return response
| python |
import random
from unittest import TestCase
from guitarpractice.exercises.technique_hammers_pulls import technique_hammers_pulls
from guitarpractice.models import Beat
class TestHammersAndPulls(TestCase):
def test_level_one_has_eighth_notes(self):
random.seed(10)
result = technique_hammers_pulls(variation='level-1')
self.assertEqual(8, len(result.notes))
self.assertTrue(all(Beat(1, 8) == note.duration for note in result.notes))
def test_level_two_has_eighth_notes(self):
random.seed(10)
result = technique_hammers_pulls(variation='level-2')
self.assertEqual(8, len(result.notes))
self.assertTrue(all(Beat(1, 8) == note.duration for note in result.notes))
def test_level_two_can_have_sixteenth_notes(self):
random.seed(3)
result = technique_hammers_pulls(variation='level-2')
self.assertEqual(16, len(result.notes))
self.assertTrue(all(Beat(1, 16) == note.duration for note in result.notes))
| python |
#!/usr/bin/env python
'''Version Information Definition'''
__version_info__ = (0, 0, 4)
__version__ = ".".join(str(i) for i in __version_info__)
| python |
import blessed
BLESSED_VERSION = tuple(int(x) for x in blessed.__version__.split(".", 2)[:2])
if BLESSED_VERSION < (1, 17):
def link(term: blessed.Terminal, url: str, text: str, url_id: str = "") -> str:
return url
else:
def link(term: blessed.Terminal, url: str, text: str, url_id: str = "") -> str:
return term.link(url, text, url_id=url_id) # type: ignore
| python |
# REMOVE ELEMENT LEETCODE SOLUTION:
# creating a class.
class Solution(object):
# creating a function to delete the desired number from a given array.
def removeElement(self, nums, val):
# creating a while-loop to iterate for the time that the value is present in the array.
while val in nums:
# code to remove the desired value.
nums.remove(val)
# returning the modified array.
return(len(nums)) | python |
import adv.adv_test
import adv
from slot.d import *
from slot.a import *
def module():
return Celliera
class Celliera(adv.Adv):
a3 = ('a',0.08,'hp70')
conf = {}
conf['slots.a'] = RR()+JotS()
#conf['slots.d'] = DJ()
acl12 = """
`s1
`s2, seq=5
`s3
"""
acl21 = """
`s2, seq=5
`s1
`s3
"""
conf['acl'] = acl21
def d_slots(this):
if 'bow' in this.ex:
this.conf.slot.a = RR()+BN()
def prerun(this):
this.s2buff = adv.Selfbuff("s2_shapshifts1",1, 10,'ss','ss')
this.s2str = adv.Selfbuff("s2_str",0.25,10)
def s1_proc(this, e):
if this.s2buff.get():
this.s2buff.buff_end_timer.timing += 2.5
this.s2str.buff_end_timer.timing += 2.5
def s2_proc(this, e):
this.s2buff.on()
this.s2str.on()
if __name__ == '__main__':
conf = {}
adv.adv_test.test(module(), conf, verbose=0)
| python |
import os
import sys
import inspect
import unittest
import json
# For selecting the correct path
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir) + "/fsm"
sys.path.insert(0, parentdir)
from config import config_read
class Test_config_read(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""generates a fsm object before each test is launched
"""
cls._config = config_read("config","config.ini")
cls._config.read_config("../fsm/config", cls._config.get_f_name())
def test_void_path(self):
"""Checks the non void path parameter
"""
self.assertRaises(ValueError,config_read,"","config.ini")
def test_void_file_name(self):
"""Checks the non void f_name parameter
"""
self.assertRaises(ValueError,config_read,"config","")
def test_default_server_ip(self):
"""Checks that default server ip is localhost
"""
self.assertEqual("40.114.216.24",self._config.get_server_ip())
def test_default_header_json(self):
"""Checks that default header json is value
"""
value = json.dumps({"Content-type":"application/json","Accept":"text/plain"})
self.assertEqual(value.replace(" ",""),self._config.get_header_json())
def test_default_broker_ip(self):
"""Checks that default broker ip is 51.103.29.76
"""
value = "51.103.29.76"
self.assertEqual(value,self._config.get_broker_ip())
def test_default_root_topic(self):
"""Checks that default root topic is /HOME
"""
value = "/HOME"
self.assertEqual(value,self._config.get_root_topic())
def test_default_allowed_user(self):
"""Checks that default allowed user is POC
"""
value = "POC"
self.assertEqual(value,self._config.get_gateway_user())
| python |
# coding=utf-8
data_path = '../data'
cv_train_num = 100000 # 用于交叉验证
train_num = 120000
test_num = 90000
w2v_dim = 300
seed = 2017
| python |
"""
Recall the definition of the Fibonacci numbers from “Rabbits and Recurrence Relations”, which followed the recurrence
relation Fn=Fn−1+Fn−2 and assumed that each pair of rabbits reaches maturity in one month and produces a single pair
of offspring (one male, one female) each subsequent month.
Our aim is to somehow modify this recurrence relation to achieve a dynamic programming solution in the case that all
rabbits die out after a fixed number of months. For example, if rabbits live for three months, they reproduce only
twice before dying.
Given: Positive integers n≤100 and m≤20.
Return: The total number of pairs of rabbits that will remain after the n-th month if all rabbits live for m months.
"""
from typing import List
import rps.dynamic_programming_problems.fibonacci as fib
def count_mortal_rabbits(lines: List[str]) -> str:
"""
:param lines: Line with time in month and lifespan of rabbits, separated by space
:return: Total number of rabbit pairs after that time
"""
line, = lines
time, lifespan = map(int, line.split())
rabbits = fib.count_mortal_rabbits(time, lifespan)
return f"{rabbits}"
| python |
import sys
import django
from django.conf import settings
def billing_function(shop):
return (5, 3, "test subscription")
configuration = {
"DEBUG": True,
"DATABASES": {"default": {"ENGINE": "django.db.backends.sqlite3"}},
"INSTALLED_APPS": [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"shopify_auth",
"django_toolbox.apps.billing",
],
"AUTHENTICATION_BACKENDS": ["shopify_auth.backends.ShopUserBackend"],
"TEMPLATES": [
{"BACKEND": "django.template.backends.django.DjangoTemplates", "APP_DIRS": True}
],
"ROOT_URLCONF": "django_toolbox.apps.billing.tests.urls",
"SHOPIFY_APP_NAME": "Test App",
"SHOPIFY_APP_API_KEY": "test-api-key",
"SHOPIFY_APP_API_SECRET": "test-api-secret",
"SHOPIFY_APP_API_SCOPE": ["read_products"],
"SHOPIFY_APP_IS_EMBEDDED": True,
"SHOPIFY_APP_DEV_MODE": False,
"SHOPIFY_APP_TEST_CHARGE": False,
"SHOPIFY_APP_API_VERSION": "2020-01",
"BILLING_FUNCTION": billing_function,
"BILLING_REDIRECT_URL": "success",
"APP_NAME": "test_app_name",
"MIDDLEWARE": [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
],
}
settings.configure(**configuration)
django.setup()
from django.test.runner import DiscoverRunner
test_runner = DiscoverRunner()
default_tests = [
"test_graphql_client",
"django_toolbox.apps.billing",
"django_toolbox.discounts",
]
if len(sys.argv[1:]) > 0:
tests = sys.argv[1:]
else:
tests = default_tests
failures = test_runner.run_tests(tests)
if failures:
sys.exit(failures)
| python |
# Natural Language Toolkit: Genesis Corpus Reader
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
The Carnegie Mellon Pronouncing Dictionary [cmudict.0.6]
ftp://ftp.cs.cmu.edu/project/speech/dict/
Copyright 1998 Carnegie Mellon University
File Format: Each line consists of an uppercased word, a counter
(for alternative pronunciations), and a transcription. Vowels are
marked for stress (1=primary, 2=secondary, 0=no stress). E.g.:
NATURAL 1 N AE1 CH ER0 AH0 L
The dictionary contains 127069 entries. Of these, 119400 words are assigned
a unique pronunciation, 6830 words have two pronunciations, and 839 words have
three or more pronunciations. Many of these are fast-speech variants.
Phonemes: There are 39 phonemes, as shown below:
Phoneme Example Translation Phoneme Example Translation
------- ------- ----------- ------- ------- -----------
AA odd AA D AE at AE T
AH hut HH AH T AO ought AO T
AW cow K AW AY hide HH AY D
B be B IY CH cheese CH IY Z
D dee D IY DH thee DH IY
EH Ed EH D ER hurt HH ER T
EY ate EY T F fee F IY
G green G R IY N HH he HH IY
IH it IH T IY eat IY T
JH gee JH IY K key K IY
L lee L IY M me M IY
N knee N IY NG ping P IH NG
OW oat OW T OY toy T OY
P pee P IY R read R IY D
S sea S IY SH she SH IY
T tea T IY TH theta TH EY T AH
UH hood HH UH D UW two T UW
V vee V IY W we W IY
Y yield Y IY L D Z zee Z IY
ZH seizure S IY ZH ER
"""
from util import *
from api import *
import os
from nltk.internals import deprecated
class CMUDictCorpusReader(CorpusReader):
def entries(self):
"""
@return: the cmudict lexicon as a list of entries
containing (word, identifier, transcription) tuples.
"""
return concat([StreamBackedCorpusView(filename, read_cmudict_block)
for filename in self.abspaths()])
def raw(self):
"""
@return: the cmudict lexicon as a raw string.
"""
return concat([open(filename).read()
for filename in self.abspaths()])
def words(self):
"""
@return: a list of all words defined in the cmudict lexicon.
"""
return [word for (word, num, transcription) in self.entries()]
def transcriptions(self):
"""
@return: the cmudict lexicon as a dictionary, whose keys are
upper case words and whose values are tuples of pronunciation
entries.
"""
lexicon = self.entries()
d = {}
for word, num, transcription in lexicon:
if num == 1:
d[word] = (transcription,)
else:
d[word] += (transcription,)
return d
#{ Deprecated since 0.8
@deprecated("Use .entries() or .transcriptions() instead.")
def read(self, items='cmudict', format='listed'):
if format == 'listed': return self.entries(items)
if format == 'dictionary': return self.transcriptions(items)
raise ValueError('bad format %r' % format)
@deprecated("Use .transcriptions() instead.")
def dictionary(self, items='cmudict'): return self.transcriptions(items)
@deprecated("Use .entries() instead.")
def listed(self, items='cmudict'): return self.entries(items)
#}
def read_cmudict_block(stream):
entries = []
while len(entries) < 100: # Read 100 at a time.
line = stream.readline()
if line == '': return entries # end of file.
pieces = line.split()
entries.append( (pieces[0], int(pieces[1]), tuple(pieces[2:])) )
return entries
| python |
from ..data import platemap_to_dataframe, scale_plate
import pandas as pd
def read_multiple_plates(tables, read_single, platemap=None, **kwargs):
"""Reads data for one or more plates, then merges the data together.
This function simplifies reading and data reduction where you have either
1. multiple plates, each containing separate samples, and/or
2. each sample has multiple parameters measured (e.g OD600, A450, etc).
This function produces a ``DataFrame`` where each such ``measure`` (e.g.
OD600, FITC, A450, etc.) is in a separate column, and each physical well is
in a single row.
For each entry in ``table``, this function reads each of the ``measures`` in
that table and joins those measures horizontally (one measure per column);
then it concatenates ``table``\ s vertically, such that there is one row per well.
Each ``dict`` in ``tables`` represents a single plate, which may have multiple
``measures``. Each of the ``measures`` will be read and joined by well. The
union of parameters in each ``measure`` and ``table`` will be passed as
``**kwargs`` to ``read_single``.
Each ``table`` can have several keys which serve special functions. Other
keys will be passed as ``kwargs`` to ``read_single`` as above
* ``measures``: list of dicts, each representing a different variable.
Will be merged with ``table`` (values in the ``measure`` overwrite those
in the ``table``) and passed as ``**kwargs`` to ``read_single``.
* ``platemap``: dict containing platemap metadata that will be passed to
:func:`~microplates.data.platemap_to_dataframe`. The metadata from the ``platemap``
argument and from this key will be merged
* ``transform``: function that will be called with the ``DataFrame`` and ``table``,
and should return a new, possibly modified ``DataFrame``
* ``scale``: tuple ``(from_wells, to_wells)``; will be used to call
:func:`data.scale_plate`
Examples
--------
# single plate, multiple measures (OD600, FITC), each measure is in a
# separate tab of the spreadsheet
>>> read_multiple_plates([
... { 'io': 'plate1.xlsx', 'measures': [
... { 'sheet_name':'OD600', 'measure':'OD600' },
... { 'sheet_name':'FITC', 'measure':'FITC' }
... ]}
... ], read_single = pd.read_excel )
# multiple plates, in separate excel files
>>> read_multiple_plates([
... { 'io': 'plate1.xlsx', 'measure':'OD600', 'data': {'plate':1} },
... { 'io': 'plate2.xlsx', 'measure':'OD600', 'data': {'plate':2} }
... ], read_single = pd.read_excel )
# multiple plates in different tabs of the same excel file
>>> read_multiple_plates([
... { 'sheet_name': 'plate1', 'measure':'OD600', 'data': {'plate':1} },
... { 'sheet_name': 'plate2', 'measure':'OD600', 'data': {'plate':2} }
... ], read_single = pd.read_excel, io='plates.xlsx', measure='OD600' )
# multiple plates in same excel file; can read using a function from
# a submodule of microplates.io:
>>> read_multiple_plates([
... { 'sheet_name': 'plate1', 'measure':'OD600', 'data': {'plate':1} },
... { 'sheet_name': 'plate2', 'measure':'OD600', 'data': {'plate':2} }
... ],
... read_single=microplates.io.tecan.read_single,
... path='plates.xlsx', measure='OD600' )
Parameters
----------
tables : list of dicts
See examples
read_single : function
Function to read a single plate. Generally will be a function from
the `io` submodule. The values for a single `measure` or `table` will
be used as `**kwargs` for `read_single`
platemap : dict
Platemap; will be evaluated by `data.platemap_to_dataframe` and joined
to each `table`
**kwargs : dict, optional
Additional arguments will be merged into each ``table``, with values
from the ``table`` overwriting those in ``**kwargs``.
Returns
-------
int
Description of anonymous integer return value.
"""
dfs = []
special_keys = set(["data","measures","transform","platemap","convert"])
if platemap is None:
platemap = {}
platemap = platemap_to_dataframe(platemap)
# for each file
for table in tables:
table = {**kwargs, **table}
# extract metadata to add as constant column
if "data" in table:
table_metadata = table["data"]
else:
table_metadata = {}
# if multiple tables are included in the file
if "measures" in table:
measures = table["measures"]
else:
measures = [table]
# if there is a function to modify this table, extract it
if "transform" in table:
transform = table["transform"]
else:
transform = None
# if there is a per-table platefile, grab it
if "platemap" in table:
table_platemap = table["platemap"]
else:
table_platemap = {}
table_platemap = platemap_to_dataframe(table_platemap)
# if instructions to broadcast the per-table mapfile from
# one microplate shape to another (e.g. 96 to 384), do the conversion
if "scale" in table:
convert_from, convert_to = table["scale"]
table_platemap = scale_plate(table_platemap, convert_from, convert_to)
table = {x: table[x] for x in table if x not in special_keys}
# for each table in the file
measure_dfs = []
for measure in measures:
measure_df = read_single(**{ **table, **measure })
measure_dfs.append(measure_df)
# concatenate different tables in this file, matching the wells
df = pd.concat(measure_dfs, join='inner', axis=1)
df = pd.merge(left=table_platemap, right=df, left_index=True, right_index=True)
# apply variables given for the whole table
for col in table_metadata:
# create any columns that don't exist
if col not in df:
df[col] = table_metadata[col]
df = df.fillna(table_metadata)
# apply an arbitrary transformation
if transform is not None:
df = transform(df, table)
dfs.append(df)
data = pd.concat(dfs, join='outer')
data = pd.merge(left=platemap, right=data, left_index=True, right_index=True)
return data
| python |
from application.infrastructure.error.errors import VCFHandlerBaseError
class SQLError(VCFHandlerBaseError):
message = "SQL error."
error_type = "SQLError"
class SQLAlchemyEngineNotInitializedError(SQLError):
message = "Not initialized SQLAlchemy Engine."
error_type = "SQLAlchemyEngineNotInitializedError"
| python |
__all__ = ["lammps_parser.py"]
| python |
"""
STATEMENT
Given a complete binary tree, count the number of nodes.
CLARIFICATIONS
- So, I can assume the tree is complete, or have to check for that? You can assume that.
- To reiterate, a complete binary tree only has the last level not filled. The last
level is filled from the left, if any.
EXAMPLES
(not drawn)
COMMENTS
- We first have to figure out the height h of the tree. We can do that going as far left
down as we can.
- Then, the leaves can be counted separately, given the height.
"""
def countNodes(root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
level = root
height = 0
while level.left != None:
height += 1
level = level.left
if not height:
return 1
return (2**(height))-1 + _countLeaves(root, height)
def _countLeaves(root, height):
if height == 0:
return 0
h, level = height, root
while level.left != None:
h -= 1
level = level.left
if h:
return 0
h, level = height, root
while level.right != None:
h -= 1
level = level.right
if not h:
return 2**height
level, h = root.left, height-1
if level == None:
return 1
while level.right != None:
h -= 1
level = level.right
if not h:
return 2**(height-1) + _countLeaves(root.right, height-1)
else:
return _countLeaves(root.left, height-1)
| python |
#
# Copyright (C) 2016-2019 by Nathan Lovato, Daniel Oakey, Razvan Radulescu, and contributors
#
# This file is part of Power Sequencer.
#
# Power Sequencer is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Power Sequencer is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with Power Sequencer. If
# not, see <https://www.gnu.org/licenses/>.
#
class ProjectSettings:
RESOLUTION_X = 1920
RESOLUTION_Y = 1080
PROXY_RESOLUTION_X = 640
PROXY_RESOLUTION_Y = 360
PROXY_STRING = "_proxy"
class FOLDER_NAMES:
AUDIO = "audio"
IMG = "img"
VIDEO = "video"
IMG_ASSETS = "-assets"
def __dir__(self):
return self.FOLDER_NAMES.AUDIO, self.FOLDER_NAMES.IMG, self.FOLDER_NAMES.VIDEO
class SequenceTypes:
"""
Tuples of identifiers to check if a strip is of a certain type or type group
"""
VIDEO = ("MOVIE", "MOVIECLIP", "META", "SCENE")
EFFECT = (
"CROSS",
"ADD",
"SUBTRACT",
"ALPHA_OVER",
"ALPHA_UNDER",
"GAMMA_CROSS",
"MULTIPLY",
"OVER_DROP",
"WIPE",
"GLOW",
"TRANSFORM",
"COLOR",
"SPEED",
"ADJUSTMENT",
"GAUSSIAN_BLUR",
)
TRANSITION = ("CROSS", "GAMMA_CROSS", "WIPE")
SOUND = ("SOUND",)
IMAGE = ("IMAGE",)
TRANSITIONABLE = (
VIDEO + IMAGE + ("MULTICAM", "GAUSSIAN_BLUR", "TRANSFORM", "ADJUSTMENT", "SPEED")
)
# Strips that can be cut. If most effect strips are linked to their inputs
# and shouldn't be cut, some can be edited directly
CUTABLE = VIDEO + SOUND + IMAGE + ("MULTICAM", "COLOR", "ADJUSTMENT")
EXTENSIONS_IMG = (
"jpeg",
"jpg",
"png",
"tga",
"tiff",
"tif",
"exr",
"hdr",
"bmp",
"cin",
"dpx",
"psd",
)
EXTENSIONS_AUDIO = (".wav", ".mp3", ".ogg", ".flac", ".opus")
EXTENSIONS_VIDEO = (
".mp4",
".avi",
".mts",
".flv",
".mkv",
".mov",
".mpg",
".mpeg",
".vob",
".ogv",
"webm",
)
EXTENSIONS_ALL = tuple(list(EXTENSIONS_IMG) + list(EXTENSIONS_AUDIO) + list(EXTENSIONS_VIDEO))
class Extensions:
"""
Tuples of file types for checks when importing files
"""
DICT = {"img": EXTENSIONS_IMG, "audio": EXTENSIONS_AUDIO, "video": EXTENSIONS_VIDEO}
class SearchMode:
NEXT = 1
CHANNEL = 2
ALL = 3
| python |
# coding: utf-8
from dHydra.console import *
import time
"""
仅为了演示如何调用start_worker函数开启一个进程(传入参数)
将开启Ctp期货数据全市场的行情源,与存储到MongoDB的进程
注意这里的进程开启时候都用到了./config文件夹下的配置文件,
而配置帐号的ctp.json则是os.getcwd()对应的目录(与config目录同级)
"""
# 存储
start_worker(
worker_name="CtpMdToMongo",
nickname="CtpMdToMongo",
config="CtpMd.json"
)
time.sleep(4)
# 开启行情源
start_worker(
worker_name="CtpMd",
nickname="CtpMd",
account="ctp.json",
config="CtpMd.json"
) | python |
# import argparse
import datetime as dt
from src.config.appConfig import getJsonConfig, initConfigs
from src.app.monthlyReportGenerator import MonthlyReportGenerator
import cx_Oracle
initConfigs()
# get app config
appConfig = getJsonConfig()
cxOraclePath = appConfig['cxOraclePath']
if not cxOraclePath == '':
cx_Oracle.init_oracle_client(lib_dir= cxOraclePath)
# get app db connection string from config file
appDbConStr: str = appConfig['appDbConnStr']
outageDbConStr :str = appConfig['outageDbConnStr']
dumpFolder: str = appConfig['dumpFolder']
# generate report word file monthly_rep_template
tmplPath: str = "templates/monthly_rep_template.docx"
# create weekly report
mnthlyRprtGntr = MonthlyReportGenerator(appDbConStr,outageDbConStr)
monthDt = dt.datetime(2021,6,1)
mnthlyRprtGntr.generateMonthlyReport(monthDt, tmplPath, dumpFolder)
print('Report generation Done')
| python |
import os
import hashlib
import socket
def application(msg,address):
lim = "|:|:|"
while 1:
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.settimeout(10)
seq = 0
fil = open('new_' + msg, 'w');
try:
print('entered')
trial = 0
send = s1.sendto(msg, address)
print('Receiving indefinetly...')
while 1:
print('waiting to receive...')
try:
trial = 0
info, serv_addr = s1.recvfrom(4096)
except:
print('requesting again...')
if trial < 11:
trial = trial + 1
print('connection timeout...retrying...\n')
continue
else:
print('removing the empty file created in the location...')
print('maximum trials out...\n')
os.remove('new_' + msg)
break
c_hash = hashlib.sha1(info.split(lim)[3]).hexdigest()
seq_no = info.split(lim)[1]
if info.split(lim)[0] == c_hash and seq == int(seq_no == True):
print('check sum matched...')
pack_len = info.split(lim)[2]
if info.split(lim)[3] == 'FNF':
print('requested file not found...')
print('removing the empty file created in the location...')
os.remove('new_' + msg)
else:
fil.write(info.split(lim)[3])
print(('sequence number: ' + seq_no + '\npacket size: ' + pack_len))
msg = (str(seq_no) + "," + pack_len)
send = s1.sendto(msg, serv_addr)
else:
print('checksum mismatch detected, dropping packet...')
print(('Server hash: ' + info.split(lim)[0]))
print(('Client hash: ' + c_hash))
continue
if int(pack_len) < 500:
seq_no = int(not seq_no)
break
finally:
print('closing the socket')
s1.close()
fil.close()
break
server_address = input('enter the server ip as a string: ')
server_port = input('enter the port number: ')
address = (server_address, server_port)
msg = (input('enter the required file name:'))
application(msg,address)
| python |
import os
import sys
import time
import mmap
import requests
import zipfile
import tarfile
import logging
import resource
import progressbar
from urllib.parse import urljoin
from urllib.parse import urlparse
from django.utils.translation import ugettext as _
from ... import defaults as defs
logger = logging.getLogger('geoware.downloader')
class FileDownloader(object):
"""
Class to download files.
"""
cache_dir = defs.GEOWARE_DATA_DIR
response_headers = None
def __init__(self, stdout=None):
self.stdout = stdout or sys.stdout
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def _is_file_up2date(self):
"""
Returns True if local and remote files are not the same.
"""
up2date = False
local_path = self.downloaded_file_path
if os.path.isfile(local_path):
response_headers = self._get_remote_file_info()
if response_headers:
ltime = time.gmtime(os.path.getmtime(local_path))
lsize = os.path.getsize(local_path)
rtime = time.strptime(response_headers['last-modified'].strip(), '%a, %d %b %Y %H:%M:%S %Z')
rsize = int(response_headers['content-length'].strip())
if ltime >= rtime or lsize == rsize:
up2date = True
return up2date
def _get_remote_file_info(self):
"""
Returns the response headers for URL.
"""
if not self.response_headers:
resp = requests.head(self.remote_file_path)
if resp.status_code == requests.codes.ok:
self.response_headers = resp.headers
return self.response_headers
def _get_remote_file_size(self):
"""
Returns the remote file size.
"""
headers = self._get_remote_file_info()
size = int(headers['content-length'].strip())
return size
def _get_progress_widgets(self):
"""
Returns the progress widgets for a file download.
"""
format_custom_text = progressbar.FormatCustomText(
'Fetching [ %(file)s ] :: ', dict(file=self.remote_file_name),
)
widgets = [
format_custom_text,
progressbar.ETA(),
progressbar.Percentage(),
progressbar.Bar(),
]
return widgets
def stage(self, file_type=None):
"""
Stage the next file download.
"""
self.file_info = defs.GEOWARE_FILE_DICT.get(file_type)
if not self.file_info:
raise Exception("Invalid File Type {type}".format(type=file_type))
def download(self, force=False):
"""
Returns the path to a newly downloaded, or an unchanged file.
"""
up2date = self._is_file_up2date()
if up2date and not force:
self.stdout.write("Fetched file from cache ({file})\n".format(file=self.downloaded_file_name))
return self.downloaded_file_path
self.stdout.write("Downloading file ({file}) from ({host})\n".format(
file=self.remote_file_name, host=urlparse(self.remote_file_path).hostname))
resp = requests.get(self.remote_file_path, stream=True)
if resp.status_code != requests.codes.ok:
self.stdout.write("Download failed with ({status})\n".format(code=resp.status_code))
return None
size_so_far = 0
chunk_size = 4096
total_size = self._get_remote_file_size()
widgets = self._get_progress_widgets()
with progressbar.ProgressBar(max_value=total_size, widgets=widgets) as pbar:
with open(self.downloaded_file_path, 'wb') as aFile:
for chunk in resp.iter_content(chunk_size=chunk_size):
size_so_far += len(chunk)
if chunk:
aFile.write(chunk)
pbar.update(size_so_far)
self.stdout.write("Fetched file from server ({file})\n".format(file=self.downloaded_file_name))
return self.downloaded_file_path
def extract(self):
"""
Extract a compressed file.
"""
file_path = self.downloaded_file_path
filetype = file_path.split('?')[0]
if filetype.endswith('.txt'):
return file_path
if filetype.endswith('.zip'):
extractor, mode = zipfile.ZipFile, 'r'
elif filetype.endswith('.tar.gz') or file_path.endswith('.tgz'):
extractor, mode = tarfile.open, 'r:gz'
elif filetype.endswith('.tar.bz2') or file_path.endswith('.tbz'):
extractor, mode = tarfile.open, 'r:bz2'
else:
self.stdout.write("Unable to extract file ({file})\n".format(file=self.downloaded_file_name))
return None
cwd = os.getcwd()
os.chdir(self.cache_dir)
try:
efile = extractor(file_path, mode)
try:
efile.extractall()
except Exception as err:
self.stdout.write("File failed to extract fully\n({file})\n".format(file=self.downloaded_file_path))
return
finally:
efile.close()
except Exception as err:
self.stdout.write("Unable to extract. Bad or corrupted file\n({file})\n".format(file=self.downloaded_file_path))
return
finally:
os.chdir(cwd)
self.stdout.write("Extracted file ({file})\n".format(file=self.extracted_file_name))
return self.extracted_file_path
@property
def remote_file_name(self):
return self.file_info['remote']
@property
def remote_file_path(self):
return urljoin(self.file_info['url'], self.file_info['remote'])
@property
def downloaded_file_name(self):
return self.remote_file_name
@property
def downloaded_file_path(self):
return os.path.abspath(os.path.join(self.cache_dir, self.remote_file_name))
@property
def extracted_file_name(self):
return self.file_info['local']
@property
def extracted_file_path(self):
return os.path.abspath(os.path.join(self.cache_dir, self.extracted_file_name))
| python |
def sum_numbers(first_int, second_int):
"""Returns the sum of the two integers"""
result = first_int + second_int
return result
def subtract(third_int):
"""Returns the difference between the
result of sum_numbers and the third integer"""
diff = sum_numbers(first_int=number_1, second_int=number_2) - third_int
return diff
def add_and_subtract(first_int, second_int, third_int):
"""Receives all the three integers and
returns the other two functions"""
sum_numbers(first_int, second_int)
subtract(third_int)
number_1 = int(input())
number_2 = int(input())
number_3 = int(input())
add_and_subtract(number_1, number_2, number_3)
print(subtract(number_3))
# def sum_numbers(num_1: int, num_2: int):
# """Returns the sum of the two arguments"""
#
# total = num_1 + num_2
#
# return total
#
# def subtract(sum_1: int, num_3: int):
# """Returns the difference between sum_numbers
# and num_3"""
#
# difference = sum_1 - num_3
#
# return difference
#
# def add_and_subtract(num_1: int, num_2: int, num_3: int):
# """Receives all the three integers and
# returns the other two functions"""
#
# sum_1 = sum_numbers(num_1, num_2)
# result = subtract(sum_1, num_3)
#
# return result
#
# number_1 = int(input())
# number_2 = int(input())
# number_3 = int(input())
#
# print(add_and_subtract(number_1, number_2, number_3)) | python |
import re
from typing import Dict, Iterable, List, cast
import emoji
from discord import Message
from discord.ext.commands import Bot, Cog, Context, command
DEFAULT_VOTE_EMOJIS = ("👍", "👎")
CUSTOM_EMOJI_PATTERN = re.compile(r"\<\:\w+\:\d+\>")
class VoteCog(Cog, name="commanderbot.ext.vote"):
def __init__(self, bot: Bot):
self.bot: Bot = bot
@staticmethod
def get_emojis(message: Message) -> Iterable[str]:
# Get message content and cast it to a string
message_content: str = str(message.clean_content)
# Find unicode and custom emojis in the message
found_emojis: List[Dict[str, int | str]] = emoji.emoji_lis(message_content)
for custom_emoji in CUSTOM_EMOJI_PATTERN.finditer(message_content):
found_emojis.append(
{"location": custom_emoji.start(), "emoji": custom_emoji.group()}
)
# Return early with the default emojis if no emojis were found
if not found_emojis:
return DEFAULT_VOTE_EMOJIS
# Create a list of unique emojis that are sorted in the order they appeared
emojis: List[str] = []
for e in sorted(found_emojis, key=lambda i: i["location"]):
emoji_char: str = str(e["emoji"])
if emoji_char not in emojis:
emojis.append(emoji_char)
return emojis
@command(name="vote")
async def cmd_vote(self, ctx: Context):
# Determine which emoji reactions to seed the message with, silently ignoring
# errors raised by any individual emoji.
for emoji in self.get_emojis(cast(Message, ctx.message)):
try:
await ctx.message.add_reaction(emoji)
except:
pass
| python |
from .__geoplot import bokeh_geoplot as Geoplot | python |
from django.urls import path, include
from django.contrib import admin
app_name = 'app'
urlpatterns = [
path('admin/', admin.site.urls, name='admin-index'),
path('admin1/', include('core.app.urls.admin.urls')),
path('', include('core.app.urls.site.urls')),
]
| python |
from __future__ import unicode_literals, division
import array
from collections import defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.externals import six
from sklearn.externals.six.moves import xrange
from sklearn.preprocessing import normalize
from sklearn.feature_extraction.hashing import FeatureHasher
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.utils.validation import check_is_fitted,check_array, FLOAT_DTYPES
from sklearn.utils.fixes import sp_version
#from ..utils.fixes import _Mapping as Mapping
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
normalized = unicodedata.normalize('NFKD', s)
if normalized == s:
return s
else:
return ''.join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
min_n, max_n = self.ngram_range
if min_n == 1:
# no need to do any slicing for unigrams
# iterate through the string
ngrams = list(text_document)
min_n += 1
else:
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams_append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams_append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams_append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return check_stop_list(self.stop_words)
def check_stop_words_consistency(self, stop_words, preprocess, tokenize):
# NB: stop_words is validated, unlike self.stop_words
if id(self.stop_words) != getattr(self, '_stop_words_id', None):
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn('Your stop_words may be inconsistent with your '
'preprocessing. Tokenizing the stop words '
'generated tokens %r not in stop_words.' %
sorted(inconsistent))
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
self.check_stop_words_consistency(stop_words, preprocess,
tokenize)
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
def _validate_params(self):
"""Check validity of ngram_range parameter"""
min_n, max_m = self.ngram_range
if min_n > max_m:
raise ValueError(
"Invalid value for ngram_range=%s "
"lower boundary larger than the upper boundary."
% str(self.ngram_range))
class HashingVectorizer(BaseEstimator, VectorizerMixin, TransformerMixin):
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', alternate_sign=True,
non_negative=False, dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.alternate_sign = alternate_sign
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
return self
def fit(self, X, y=None):
# triggers a parameter validation
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_params()
self._get_hasher().fit(X, y=y)
return self
def transform(self, X):
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_params()
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
alternate_sign=self.alternate_sign,
non_negative=self.non_negative)
def document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(X.indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode='clip')
return X
def limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = []
values = make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
if indptr[-1] > 2147483648: # = 2**31 - 1
if sp_version >= (0, 14):
indices_dtype = np.int64
else:
raise ValueError(('sparse CSR array has {} non-zero '
'elements and requires 64 bit indexing, '
' which is unsupported with scipy {}. '
'Please upgrade to scipy >=0.14')
.format(indptr[-1], '.'.join(sp_version)))
else:
indices_dtype = np.int32
j_indices = np.asarray(j_indices, dtype=indices_dtype)
indptr = np.asarray(indptr, dtype=indices_dtype)
values = np.frombuffer(values, dtype=np.intc)
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sort_indices()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_params()
self.validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self.sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self.limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self.check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
self.check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
if not hasattr(self, 'vocabulary_'):
self.validate_vocabulary()
self.check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
if not sp.issparse(X):
X = sp.csr_matrix(X)
dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64
if self.use_idf:
n_samples, n_features = X.shape
df = document_frequency(X).astype(dtype)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(n_samples / df) + 1
self.idf_diag = sp.diags(idf, offsets=0,
shape=(n_features, n_features),
format='csr',
dtype=dtype)
return self
def transform(self, X, copy=True):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES, copy=copy)
if not sp.issparse(X):
X = sp.csr_matrix(X, dtype=np.float64)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, 'idf_diag', 'idf vector is not fitted')
expected_n_features = self.idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self.idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
# if _idf_diag is not set, this will raise an attribute error,
# which means hasattr(self, "idf_") is False
return np.ravel(self.idf_diag.sum(axis=0))
@idf_.setter
def idf_(self, value):
value = np.asarray(value, dtype=np.float64)
n_features = value.shape[0]
self.idf_diag = sp.spdiags(value, diags=0, m=n_features,
n=n_features, format='csr')
class TfidfVectorizer(CountVectorizer):
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.float64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self.tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self.tfidf.norm
@norm.setter
def norm(self, value):
self.tfidf.norm = value
@property
def use_idf(self):
return self.tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self.tfidf.use_idf = value
@property
def smooth_idf(self):
return self.tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self.tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self.tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self.tfidf.sublinear_tf = value
@property
def idf_(self):
return self.tfidf.idf_
@idf_.setter
def idf_(self, value):
self.validate_vocabulary()
if hasattr(self, 'vocabulary_'):
if len(self.vocabulary_) != len(value):
raise ValueError("idf length = %d must be equal "
"to vocabulary size = %d" %
(len(value), len(self.vocabulary)))
self.tfidf.idf_ = value
def check_params(self):
if self.dtype not in FLOAT_DTYPES:
warnings.warn("Only {} 'dtype' should be used. {} 'dtype' will "
"be converted to np.float64."
.format(FLOAT_DTYPES, self.dtype),
UserWarning)
def fit(self, raw_documents, y=None):
self.check_params()
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self.tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
self.check_params()
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self.tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self.tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
check_is_fitted(self, 'tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self.tfidf.transform(X, copy=False) | python |
import unittest
import xraylib
class TestCompoundParser(unittest.TestCase):
def test_good_compounds(self):
self.assertIsInstance(xraylib.CompoundParser("C19H29COOH"), dict)
self.assertIsInstance(xraylib.CompoundParser("C12H10"), dict)
self.assertIsInstance(xraylib.CompoundParser("C12H6O2"), dict)
self.assertIsInstance(xraylib.CompoundParser("C6H5Br"), dict)
self.assertIsInstance(xraylib.CompoundParser("C3H4OH(COOH)3"), dict)
self.assertIsInstance(xraylib.CompoundParser("HOCH2CH2OH"), dict)
self.assertIsInstance(xraylib.CompoundParser("C5H11NO2"), dict)
self.assertIsInstance(xraylib.CompoundParser("CH3CH(CH3)CH3"), dict)
self.assertIsInstance(xraylib.CompoundParser("NH2CH(C4H5N2)COOH"), dict)
self.assertIsInstance(xraylib.CompoundParser("H2O"), dict)
self.assertIsInstance(xraylib.CompoundParser("Ca5(PO4)3F"), dict)
self.assertIsInstance(xraylib.CompoundParser("Ca5(PO4)3OH"), dict)
self.assertIsInstance(xraylib.CompoundParser("Ca5.522(PO4.48)3OH"), dict)
self.assertIsInstance(xraylib.CompoundParser("Ca5.522(PO.448)3OH"), dict)
def test_bad_compounds(self):
with self.assertRaises(ValueError):
xraylib.CompoundParser("CuI2ww")
with self.assertRaises(ValueError):
xraylib.CompoundParser("0C")
with self.assertRaises(ValueError):
xraylib.CompoundParser("2O")
with self.assertRaises(ValueError):
xraylib.CompoundParser("13Li")
with self.assertRaises(ValueError):
xraylib.CompoundParser("2(NO3)")
with self.assertRaises(ValueError):
xraylib.CompoundParser("H(2)")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Ba(12)")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Cr(5)3")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Pb(13)2")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Au(22)11")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Au11(H3PO4)2)")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Au11(H3PO4))2")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Au(11(H3PO4))2")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Ca5.522(PO.44.8)3OH")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Ba[12]")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Auu1")
with self.assertRaises(ValueError):
xraylib.CompoundParser("AuL1")
with self.assertRaises(ValueError):
xraylib.CompoundParser(None)
with self.assertRaises(ValueError):
xraylib.CompoundParser(" ")
with self.assertRaises(ValueError):
xraylib.CompoundParser("\t")
with self.assertRaises(ValueError):
xraylib.CompoundParser("\n")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Au L1")
with self.assertRaises(ValueError):
xraylib.CompoundParser("Au\tFe")
with self.assertRaises(TypeError):
xraylib.CompoundParser(26)
def test_H2SO4(self):
cd = xraylib.CompoundParser('H2SO4')
self.assertEqual(cd['nElements'], 3)
self.assertEqual(cd['molarMass'], 98.09)
self.assertEqual(cd['nAtomsAll'], 7.0)
self.assertEqual(cd['Elements'], (1, 8, 16))
self.assertAlmostEqual(cd['massFractions'], (0.02059333265368539, 0.6524620246712203, 0.32694464267509427))
self.assertAlmostEqual(cd['nAtoms'], (2.0, 4.0, 1.0))
class TestSymbolToAtomicNumber(unittest.TestCase):
def test_Fe(self):
self.assertEqual(xraylib.SymbolToAtomicNumber('Fe'), 26)
def test_bad_symbol(self):
with self.assertRaises(ValueError):
xraylib.SymbolToAtomicNumber('Uu')
def test_bad_type(self):
with self.assertRaises(TypeError):
xraylib.SymbolToAtomicNumber(26)
with self.assertRaises(ValueError):
xraylib.SymbolToAtomicNumber(None)
class TestAtomicNumberToSymbol(unittest.TestCase):
def test_Fe(self):
self.assertEqual(xraylib.AtomicNumberToSymbol(26), 'Fe')
def test_bad_symbol(self):
with self.assertRaises(ValueError):
xraylib.AtomicNumberToSymbol(-2)
with self.assertRaises(ValueError):
xraylib.AtomicNumberToSymbol(108)
def test_bad_type(self):
with self.assertRaises(TypeError):
xraylib.AtomicNumberToSymbol("26")
with self.assertRaises(TypeError):
xraylib.AtomicNumberToSymbol("Fe")
with self.assertRaises(TypeError):
xraylib.AtomicNumberToSymbol(None)
class TestCrossValidation(unittest.TestCase):
def test(self):
for Z in range(1, 108):
symbol = xraylib.AtomicNumberToSymbol(Z)
self.assertEqual(xraylib.SymbolToAtomicNumber(symbol), Z)
if __name__ == '__main__':
unittest.main(verbosity=2)
| python |
#!/usr/bin/env python
#--------------------------------------------------------------
# Function to add the aperture class instances to the SNS linac lattice.
# These apertures are not belong to the particular accelerator elements,
# so we created them as markers: MEBT:ChpPlt:Entr and MEBT:ChpPlt:Exit
#--------------------------------------------------------------
import math
import sys
import os
from orbit.py_linac.lattice import LinacApertureNode
from orbit.py_linac.lattice import Quad
def AddMEBTChopperPlatesAperturesToSNS_Lattice(accLattice,aprtNodes):
"""
Function will add two Aperture nodes at the entrance and exit of
MEBT chopper plates. It returns the list of Aperture nodes.
"""
x_size = 0.060
y_size = 0.018
shape = 3
node_pos_dict = accLattice.getNodePositionsDict()
node1 = accLattice.getNodesForName("MEBT:ChpPlt:Entr")[0]
node2 = accLattice.getNodesForName("MEBT:ChpPlt:Exit")[0]
for node in [node1,node2]:
node_name = node.getName()
(posBefore, posAfter) = node_pos_dict[node]
apertureNode = LinacApertureNode(shape,x_size/2.0,y_size/2.0,posBefore)
apertureNode.setName(node_name+":Aprt")
apertureNode.setSequence(node.getSequence())
node.addChildNode(apertureNode,node.ENTRANCE)
aprtNodes.append(apertureNode)
aprtNodes = sorted(aprtNodes, key = lambda x: x.getPosition(), reverse = False)
return aprtNodes
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('employees', '0007_employee_email'),
]
operations = [
migrations.CreateModel(
name='DrivingLicence',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('licence_number', models.CharField(default=None, max_length=120, null=True, blank=True)),
('date_of_issue', models.DateField(null=True, blank=True)),
('expiry_date', models.DateField(null=True, blank=True)),
('code', models.CharField(default=None, max_length=120, null=True, blank=True, choices=[('A', 'A-Motor Cycle'), ('A1', 'A1-Motor Cycle LTE 125cc'), ('B', 'B-Light Motor Vehicle LTE 3500kg '), ('EB', 'EB-Articulated vehicles LTE 3500kg'), ('C1', 'C1-Minibuses, Buses and Goods vehicles LTE 16000kg'), ('C', 'C-Buses and goods vehicles GTE 16000kg'), ('EC1', 'EC1-Articulated vehicles LTE 16000kg'), ('EC', 'EC-Articulated vehicles GTE 18000kg')])),
('vehicle_restrictions', models.CharField(default=None, max_length=120, null=True, blank=True, choices=[('0', 'None'), ('1', 'Automatic transmission'), ('2', 'Electrically powered'), ('3', 'Physically disabled'), ('4', 'Bus GTE 16000kg (GVM) permited')])),
('driver_restrictions', models.CharField(default=None, max_length=120, null=True, blank=True, choices=[('0', 'None'), ('1', 'Glasses or Contact lenses'), ('2', 'Artificial limb')])),
('date_added', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(related_name='user_driving_licence', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('driver', models.ForeignKey(related_name='driver_driving_licence', blank=True, to='employees.Employee', null=True)),
('modified_by', models.ForeignKey(related_name='user_modified_driving_licence', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='HistoricalDrivingLicence',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('licence_number', models.CharField(default=None, max_length=120, null=True, blank=True)),
('date_of_issue', models.DateField(null=True, blank=True)),
('expiry_date', models.DateField(null=True, blank=True)),
('code', models.CharField(default=None, max_length=120, null=True, blank=True, choices=[('A', 'A-Motor Cycle'), ('A1', 'A1-Motor Cycle LTE 125cc'), ('B', 'B-Light Motor Vehicle LTE 3500kg '), ('EB', 'EB-Articulated vehicles LTE 3500kg'), ('C1', 'C1-Minibuses, Buses and Goods vehicles LTE 16000kg'), ('C', 'C-Buses and goods vehicles GTE 16000kg'), ('EC1', 'EC1-Articulated vehicles LTE 16000kg'), ('EC', 'EC-Articulated vehicles GTE 18000kg')])),
('vehicle_restrictions', models.CharField(default=None, max_length=120, null=True, blank=True, choices=[('0', 'None'), ('1', 'Automatic transmission'), ('2', 'Electrically powered'), ('3', 'Physically disabled'), ('4', 'Bus GTE 16000kg (GVM) permited')])),
('driver_restrictions', models.CharField(default=None, max_length=120, null=True, blank=True, choices=[('0', 'None'), ('1', 'Glasses or Contact lenses'), ('2', 'Artificial limb')])),
('date_added', models.DateTimeField(editable=False, blank=True)),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('driver', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='employees.Employee', null=True)),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical driving licence',
},
),
]
| python |
"""Define tests for the REST API."""
import datetime
import aiohttp
import pytest
from aionotion import async_get_client
from .common import TEST_EMAIL, TEST_PASSWORD, load_fixture
@pytest.mark.asyncio
async def test_task_all(aresponses):
"""Test getting all tasks."""
aresponses.add(
"api.getnotion.com",
"/api/users/sign_in",
"post",
aresponses.Response(
text=load_fixture("auth_success_response.json"),
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
),
)
aresponses.add(
"api.getnotion.com",
"/api/tasks",
"get",
aresponses.Response(
text=load_fixture("task_all_response.json"),
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
),
)
async with aiohttp.ClientSession() as session:
client = await async_get_client(TEST_EMAIL, TEST_PASSWORD, session=session)
tasks = await client.task.async_all()
assert len(tasks) == 4
assert tasks[0]["status"]["value"] == "not_missing"
assert tasks[1]["status"]["insights"]["primary"]["to_state"] == "no_leak"
@pytest.mark.asyncio
async def test_task_create(aresponses):
"""Test creating a task."""
aresponses.add(
"api.getnotion.com",
"/api/users/sign_in",
"post",
aresponses.Response(
text=load_fixture("auth_success_response.json"),
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
),
)
aresponses.add(
"api.getnotion.com",
"/api/sensors/12345/tasks",
"post",
aresponses.Response(
text=load_fixture("task_create_response.json"),
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
),
)
async with aiohttp.ClientSession() as session:
client = await async_get_client(TEST_EMAIL, TEST_PASSWORD, session=session)
create_resp = await client.task.async_create(
12345, [{"id": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", "type": "missing"}]
)
assert create_resp["id"] == "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
assert create_resp["task_type"] == "missing"
@pytest.mark.asyncio
async def test_task_delete(aresponses):
"""Test deleting a task."""
aresponses.add(
"api.getnotion.com",
"/api/users/sign_in",
"post",
aresponses.Response(
text=load_fixture("auth_success_response.json"),
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
),
)
aresponses.add(
"api.getnotion.com",
"/api/sensors/12345/tasks/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
"delete",
aresponses.Response(
text=None,
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
),
)
async with aiohttp.ClientSession() as session:
client = await async_get_client(TEST_EMAIL, TEST_PASSWORD, session=session)
await client.task.async_delete(12345, "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")
@pytest.mark.asyncio
async def test_task_get(aresponses):
"""Test getting a task by ID."""
aresponses.add(
"api.getnotion.com",
"/api/users/sign_in",
"post",
aresponses.Response(
text=load_fixture("auth_success_response.json"),
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
),
)
aresponses.add(
"api.getnotion.com",
"/api/tasks/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
"get",
aresponses.Response(
text=load_fixture("task_get_response.json"),
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
),
)
async with aiohttp.ClientSession() as session:
client = await async_get_client(TEST_EMAIL, TEST_PASSWORD, session=session)
task = await client.task.async_get("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")
assert task["id"] == "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
assert task["task_type"] == "missing"
@pytest.mark.asyncio
async def test_task_history(aresponses):
"""Test getting a task's history."""
aresponses.add(
"api.getnotion.com",
"/api/users/sign_in",
"post",
aresponses.Response(
text=load_fixture("auth_success_response.json"),
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
),
)
aresponses.add(
"api.getnotion.com",
"/api/tasks/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/data",
"get",
aresponses.Response(
text=load_fixture("task_history_response.json"),
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
),
)
async with aiohttp.ClientSession() as session:
client = await async_get_client(TEST_EMAIL, TEST_PASSWORD, session=session)
history = await client.task.async_history(
"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
data_before=datetime.datetime.now(),
data_after=datetime.datetime.now() - datetime.timedelta(days=3),
)
assert len(history) == 3
| python |
import numpy as np
from Augmentor.Operations import Operation, Skew, Distort, Rotate, Shear, Flip, Zoom, HistogramEqualisation
from PIL import Image
import cv2
from utils.augmentation.Cloner import Clone
from utils.augmentation.Colorizer import Colorize
from utils.augmentation.Skitcher import Skitch
import random
def do_operation(opt, image, padding=50, rotate=False):
image_cv = cv2.cvtColor((image * 255).astype(np.uint8), cv2.IMREAD_COLOR) # [..., ::-1]
dim = image_cv.shape[:2]
if str(opt) == 'Skew':
color = [0, 0, 0]
top, bottom = padding, padding
left, right = padding, padding
image_cv = cv2.copyMakeBorder(image_cv, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
image = opt.perform_operation([Image.fromarray(image_cv)])[0]
if rotate:
image = image.rotate(270, expand=True)
return np.array(image.resize(dim)) / 255.0
operations = {0: lambda: Skew(probability=1, skew_type="RANDOM", magnitude=0.7),
1: lambda: Distort(probability=1, grid_width=random.randint(1, 50), grid_height=random.randint(1, 50),
magnitude=5),
2: lambda: Rotate(probability=1, rotation=random.randint(1, 360)),
3: lambda: Shear(probability=1, max_shear_left=0, max_shear_right=random.randint(5, 15)) \
if random.randint(0, 1) == 1 else Shear(probability=1, max_shear_left=random.randint(5, 15),
max_shear_right=0),
4: lambda: Zoom(probability=1, min_factor=random.randint(2, 10) / 10,
max_factor=random.randint(10, 12) / 10),
5: lambda: Colorize(probability=1),
6: lambda: Skitch(probability=1),
7: lambda: Flip(probability=1, top_bottom_left_right="RANDOM"),
8: lambda: Clone(probability=1)
}
| python |
# --------------
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df = pd.read_csv(path)
print(df.head(5))
X=df.iloc[:,:7]
y=df.iloc[:,7]
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=6)
# Code ends here
# --------------
import matplotlib.pyplot as plt
# Code starts here
plt.boxplot(X_train['bmi'])
q_value=X_train['bmi'].quantile(q=0.95)
print(q_value)
y_train.value_counts()
# Code ends here
# --------------
# Code starts here
relation = X_train.corr()
print(relation)
sns.pairplot(X_train)
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols = ['children','sex','region','smoker']
fig, axes= plt.subplots(nrows = 2 , ncols = 2, figsize=(30,30))
for i in range(2):
for j in range(2):
col =cols[i*2+j];
sns.countplot(x=X_train[col],hue=y_train,ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr = LogisticRegression()
grid=GridSearchCV(estimator=lr,param_grid=dict(parameters))
grid.fit(X_train,y_train)
y_pred=grid.predict(X_test)
accuracy=accuracy_score(y_test,y_pred)
print(accuracy)
#ridge_grid = GridSearchCV(estimator=ridge_model,param_grid=dict(alpha=ridge_lambdas))
#ridge_grid.fit(X_train,y_train)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Code starts herey_
score=roc_auc_score(y_test,y_pred)
y_in=(grid.predict_proba(X_test))
print(y_in[:,1])
y_pred_proba=y_in[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_test,y_pred_proba)
roc_auc = roc_auc_score(y_test,y_pred_proba)
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
# Code ends here
| python |
from rest_framework.serializers import ModelSerializer
from apps.recetas.models import Receta
class RecetaSerializer(ModelSerializer):
class Meta:
model = Receta
fields = [
'cantidad',
'fecha',
'personal',
'bienes',
]
| python |
from django.test import TestCase
from django.template import Template, Context
def render(template, context):
t = Template(template)
return t.render(context)
class XSSTests(TestCase):
def test_use_component_doesnt_allow_xss(self):
TEMPLATE = "" \
"{% load megamacros %}" \
"{% definecomponent xss_test %}" \
"<div>" \
"{% defineslot slot1 %}{% enddefineslot %}" \
"</div>" \
"{% enddefinecomponent %}" \
"{% usecomponent xss_test %}" \
"{% fillslot slot1 %}{{somevar}}{% endfillslot %}" \
"{% endusecomponent %}"
ctx = {
"somevar": "<script>alert(0);</script>"
}
content = render(TEMPLATE, Context(ctx))
self.assertEqual(content, "<div><script>alert(0);</script></div>") | python |
from bip_utils import Bip39MnemonicGenerator, Bip39SeedGenerator, Bip44, Bip44Coins, WifDecoder, \
RippleConf, XrpAddr, Bip32, Bip44Changes
from keygen.crypto_coin import CryptoCoin
from keygen.crypto_coin_service import CoinService
# mnemonic = Bip39MnemonicGenerator.FromWordsNumber(12)
mnemonic = "copy curve retire hidden cover wrap muffin raw crop olympic kingdom right"
# Generate random mnemonic
# mnemonic = Bip39MnemonicGenerator.FromWordsNumber(12)
print("Mnemonic string: %s" % mnemonic)
# Generate seed from mnemonic
seed_bytes = Bip39SeedGenerator(mnemonic).Generate()
# Generate BIP44 master keys
bip_obj_mst = Bip44.FromSeed(seed_bytes, Bip44Coins.BITCOIN)
# Print master key
print("Master key (bytes): %s" % bip_obj_mst.PrivateKey().Raw().ToHex())
print("Master key (extended): %s" % bip_obj_mst.PrivateKey().ToExtended())
print("Master key (HEX): %s" % bip_obj_mst.PrivateKey().Raw().ToHex())
print("Master key (WIF): %s" % bip_obj_mst.PrivateKey().ToWif())
print("Master key (Address): %s" % bip_obj_mst.PublicKey().ToAddress())
# Generate BIP44 account keys: m/44'/0'/0'
bip_obj_acc = bip_obj_mst.Purpose().Coin().Account(0)
# Generate BIP44 chain keys: m/44'/0'/0'/0
bip_obj_chain = bip_obj_acc.Change(Bip44Changes.CHAIN_EXT)
# Generate the address pool (first 20 addresses): m/44'/0'/0'/0/i
for i in range(5):
bip_obj_addr = bip_obj_chain.AddressIndex(i)
print("%d. Address public key (extended): %s" % (i, bip_obj_addr.PublicKey().ToExtended()))
print("%d. Address Priv key (hex): %s" % (i, bip_obj_addr.PrivateKey().Raw().ToHex()))
print("%d. Address private key (extended): %s" % (i, bip_obj_addr.PrivateKey().ToExtended()))
print("%d. Wif: %s" % (i, bip_obj_addr.PrivateKey().ToWif()))
print("%d. Address: %s" % (i, bip_obj_addr.PublicKey().ToAddress())) | python |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from typing import List, Tuple, Iterable
import numpy as np
import pandas as pd
from ax.core.experiment import Experiment
from ax.core.utils import get_model_times
from ax.service.scheduler import Scheduler
from ax.utils.common.typeutils import not_none
@dataclass(frozen=True)
class BenchmarkResult:
"""The result of a single optimization loop from one
(BenchmarkProblem, BenchmarkMethod) pair. More information will be added to the
BenchmarkResult as the suite develops.
"""
name: str
experiment: Experiment
# Tracks best point if single-objective problem, max hypervolume if MOO
optimization_trace: np.ndarray
fit_time: float
gen_time: float
@classmethod
def from_scheduler(cls, scheduler: Scheduler) -> BenchmarkResult:
fit_time, gen_time = get_model_times(experiment=scheduler.experiment)
return cls(
name=scheduler.experiment.name,
experiment=scheduler.experiment,
optimization_trace=cls._get_trace(scheduler=scheduler),
fit_time=fit_time,
gen_time=gen_time,
)
@staticmethod
def _get_trace(scheduler: Scheduler) -> np.ndarray:
if scheduler.experiment.is_moo_problem:
return np.array(
[
scheduler.get_hypervolume(
trial_indices=[*range(i + 1)], use_model_predictions=False
)
if i != 0
else 0
# TODO[mpolson64] on i=0 we get an error with SearchspaceToChoice
for i in range(len(scheduler.experiment.trials))
],
)
best_trials = [
scheduler.get_best_trial(
trial_indices=[*range(i + 1)], use_model_predictions=False
)
for i in range(len(scheduler.experiment.trials))
]
return np.array(
[
not_none(not_none(trial)[2])[0][
not_none(
scheduler.experiment.optimization_config
).objective.metric.name
]
for trial in best_trials
if trial is not None and not_none(trial)[2] is not None
]
)
@dataclass(frozen=True)
class AggregatedBenchmarkResult:
"""The result of a benchmark test, or series of replications. Scalar data present
in the BenchmarkResult is here represented as (mean, sem) pairs. More information
will be added to the AggregatedBenchmarkResult as the suite develops.
"""
name: str
experiments: Iterable[Experiment]
# mean, sem columns
optimization_trace: pd.DataFrame
# (mean, sem) pairs
fit_time: Tuple[float, float]
gen_time: Tuple[float, float]
@classmethod
def from_benchmark_results(
cls,
results: List[BenchmarkResult],
) -> AggregatedBenchmarkResult:
return cls(
name=results[0].name,
experiments=[result.experiment for result in results],
optimization_trace=pd.DataFrame(
{
"mean": [
np.mean(
[
results[j].optimization_trace[i]
for j in range(len(results))
]
)
for i in range(len(results[0].optimization_trace))
],
"sem": [
cls._series_to_sem(
series=[
results[j].optimization_trace[i]
for j in range(len(results))
]
)
for i in range(len(results[0].optimization_trace))
],
}
),
fit_time=cls._series_to_mean_sem(
series=[result.fit_time for result in results]
),
gen_time=cls._series_to_mean_sem(
series=[result.gen_time for result in results]
),
)
@staticmethod
def _series_to_mean_sem(series: List[float]) -> Tuple[float, float]:
return (
np.mean(series),
AggregatedBenchmarkResult._series_to_sem(series=series),
)
@staticmethod
def _series_to_sem(series: List[float]) -> float:
return np.std(series, ddof=1) / np.sqrt(len(series))
| python |
# coding: utf-8
"""
Function for calculating the modular inverse. Exports the following items:
- inverse_mod()
Source code is derived from
http://webpages.charter.net/curryfans/peter/downloads.html, but has been heavily
modified to fit into this projects lint settings. The original project license
is listed below:
Copyright (c) 2014 Peter Pearson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
import math
import platform
from .util import int_to_bytes, int_from_bytes
# First try to use ctypes with OpenSSL for better performance
try:
from ._ffi import (
buffer_from_bytes,
bytes_from_buffer,
FFIEngineError,
LibraryNotFoundError,
null,
)
# Some versions of PyPy have segfault issues, so we just punt on PyPy
if platform.python_implementation() == 'PyPy':
raise EnvironmentError()
try:
from ._perf._big_num_ctypes import libcrypto
def inverse_mod(a, p):
"""
Compute the modular inverse of a (mod p)
:param a:
An integer
:param p:
An integer
:return:
An integer
"""
ctx = libcrypto.BN_CTX_new()
a_bytes = int_to_bytes(abs(a))
p_bytes = int_to_bytes(abs(p))
a_buf = buffer_from_bytes(a_bytes)
a_bn = libcrypto.BN_bin2bn(a_buf, len(a_bytes), null())
if a < 0:
libcrypto.BN_set_negative(a_bn, 1)
p_buf = buffer_from_bytes(p_bytes)
p_bn = libcrypto.BN_bin2bn(p_buf, len(p_bytes), null())
if p < 0:
libcrypto.BN_set_negative(p_bn, 1)
r_bn = libcrypto.BN_mod_inverse(null(), a_bn, p_bn, ctx)
r_len_bits = libcrypto.BN_num_bits(r_bn)
r_len = int(math.ceil(r_len_bits / 8))
r_buf = buffer_from_bytes(r_len)
libcrypto.BN_bn2bin(r_bn, r_buf)
r_bytes = bytes_from_buffer(r_buf, r_len)
result = int_from_bytes(r_bytes)
libcrypto.BN_free(a_bn)
libcrypto.BN_free(p_bn)
libcrypto.BN_free(r_bn)
libcrypto.BN_CTX_free(ctx)
return result
except (LibraryNotFoundError, FFIEngineError):
raise EnvironmentError()
# If there was an issue using ctypes or OpenSSL, we fall back to pure python
except (EnvironmentError, ImportError):
def inverse_mod(a, p):
"""
Compute the modular inverse of a (mod p)
:param a:
An integer
:param p:
An integer
:return:
An integer
"""
if a < 0 or p <= a:
a = a % p
# From Ferguson and Schneier, roughly:
c, d = a, p
uc, vc, ud, vd = 1, 0, 0, 1
while c != 0:
q, c, d = divmod(d, c) + (c,)
uc, vc, ud, vd = ud - q * uc, vd - q * vc, uc, vc
# At this point, d is the GCD, and ud*a+vd*p = d.
# If d == 1, this means that ud is a inverse.
assert d == 1
if ud > 0:
return ud
else:
return ud + p
def fill_width(bytes_, width):
"""
Ensure a byte string representing a positive integer is a specific width
(in bytes)
:param bytes_:
The integer byte string
:param width:
The desired width as an integer
:return:
A byte string of the width specified
"""
while len(bytes_) < width:
bytes_ = b'\x00' + bytes_
return bytes_
| python |
# -*- coding: utf-8 -*-
"""
"""
from .bpy_helper import needs_bpy_bmesh
@needs_bpy_bmesh()
def _create_ground_material(name: str = "ground_material", *, bpy):
if name in bpy.data.materials:
raise RuntimeError("Material '{}' already exists".format(name))
mat = bpy.data.materials.new(name=name)
mat.use_nodes = True
nodes = mat.node_tree.nodes
nodes.clear()
node_tex_coord = nodes.new(type="ShaderNodeTexCoord")
node_tex_coord.location = 0, 0
node_vector_math = nodes.new(type="ShaderNodeVectorMath")
node_vector_math.location = 200, 0
node_vector_math.operation = "DISTANCE"
node_vector_math.inputs[1].default_value = (0.5, 0.5, 1.0)
node_scale_distance = nodes.new(type="ShaderNodeMath")
node_scale_distance.inputs[1].default_value = 1.5
node_scale_distance.operation = "MULTIPLY"
node_scale_distance.location = 400, 0
node_color_ramp = nodes.new(type="ShaderNodeValToRGB")
node_color_ramp.location = 600, 0
color_ramp = node_color_ramp.color_ramp
color_ramp.color_mode = "RGB"
color_ramp.interpolation = "EASE"
assert len(color_ramp.elements) == 2
color_ramp.elements[0].position = 0.27
color_ramp.elements[0].alpha = 0.0
color_ramp.elements[0].color = 0.0, 0.0, 0.0, 0.0
color_ramp.elements[1].position = 0.69
color_ramp.elements[1].alpha = 1.0
color_ramp.elements[1].color = 1.0, 1.0, 1.0, 1.0
node_bsdf = nodes.new(type="ShaderNodeBsdfPrincipled")
node_bsdf.inputs[7].default_value = 0.92 # roughness
node_bsdf.inputs[12].default_value = 0.0 # clearcoat
node_bsdf.inputs[13].default_value = 0.25 # clearcoat roughness
node_bsdf.location = 900, -100
node_transparent = nodes.new(type="ShaderNodeBsdfTransparent")
node_transparent.location = 1200, -200
node_mix = nodes.new(type="ShaderNodeMixShader")
node_mix.location = 1500, 0
node_output = nodes.new(type="ShaderNodeOutputMaterial")
node_output.location = 1800, 0
links = mat.node_tree.links
links.new(node_tex_coord.outputs[0], node_vector_math.inputs[0])
# for some reason it is outputs[1] for the vector math node (bug?)
links.new(node_vector_math.outputs[1], node_scale_distance.inputs[0])
links.new(node_scale_distance.outputs[0], node_color_ramp.inputs[0])
links.new(node_color_ramp.outputs[1], node_mix.inputs[0])
links.new(node_bsdf.outputs[0], node_mix.inputs[1])
links.new(node_transparent.outputs[0], node_mix.inputs[2])
links.new(node_mix.outputs[0], node_output.inputs[0])
return mat
@needs_bpy_bmesh()
def create_ground(name_prefix: str = "ground", *, bpy, bmesh):
diameter: float = 10.0
height: float = 0.1
bm = bmesh.new()
bmesh.ops.create_cone(
bm,
cap_ends=True,
cap_tris=False,
segments=256,
diameter1=diameter,
diameter2=diameter,
depth=height,
calc_uvs=False,
)
me = bpy.data.meshes.new("{}_mesh".format(name_prefix))
bm.to_mesh(me)
bm.free()
obj = bpy.data.objects.new("{}_obj".format(name_prefix), me)
material = _create_ground_material("{}_material".format(name_prefix))
obj.data.materials.append(material)
return obj
@needs_bpy_bmesh()
def add_spotlight_ground(scene=None, name_prefix: str = "spotlight", *, bpy):
if scene is None:
scene = bpy.context.scene
obj_ground = create_ground(name_prefix="{}_ground".format(name_prefix))
scene.collection.objects.link(obj_ground)
| python |
from itertools import product
from hyperparameter_tuner.single_parameter_generator import single_parameter_generator as sgen
class run_command_generator():
def __init__(self, single_parameter_generator_list, command_prefix="python ../experiment.py",
output_path="./results"):
for gen in single_parameter_generator_list:
assert isinstance(gen, sgen)
self.single_parameter_generator_list = single_parameter_generator_list
self.run_command = command_prefix
self.output_path = output_path
def run_commands(self):
all_parrams_gennerator = self.single_parameter_generator_list[0].params()
for p in self.single_parameter_generator_list[1:]:
all_parrams_gennerator = product(all_parrams_gennerator, p.params())
for train_params in all_parrams_gennerator:
command = str(train_params).replace('(', '').replace(')', '').replace('\'', '').replace(',', '')
stripped_command = command.replace(' ', '_').replace('-', '').replace('.', '')
output_path = f"{self.output_path}/{stripped_command}"
command = f"{self.run_command} {command} >{output_path}.out 2>{output_path}.err"
yield command
def default_commands_generator(command_prefix="python experiment.py", output_path="./hyperparameter_tuner/results"):
return run_command_generator([sgen("name", ["vgg_16"]),
sgen("learning_rate", [0.001, 0.005, 0.01, 0.03, 0.07, 0.1, 0.5, 1]),
sgen("batch_size", [20, 25, 30, 35, 50, 75]),
], command_prefix=command_prefix, output_path=output_path).run_commands()
if __name__ == '__main__':
commands = default_commands_generator()
for c in commands:
print(c)
| python |
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.pot.app.run import main
if __name__ == '__main__':
main()
| python |
"""从客户端收到一条数据后,在数据头增加’来自服务器‘字符串,然后一起转发回客户端,然后关闭服务器套接字。"""
'''
@Time : 2018/1/21 下午4:12
@Author : scrappy_zhang
@File : net02_udp_server.py
'''
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
address = ('192.168.234.1', 8888) # 地址:设定服务器要使用端口8888
sock.bind(address) # 绑定端口
recv_data = sock.recvfrom(1024) # 接收数据
send_data = '来自服务器' + recv_data[0].decode() # 数据处理,增加'来自服务器'
sock.sendto(send_data.encode('utf-8'), recv_data[1]) # 发送数据
sock.close() # 关闭套接字
| python |
import random
from raiden.storage.serialize import JSONSerializer
from raiden.storage.sqlite import SerializedSQLiteStorage
from raiden.storage.wal import WriteAheadLog
from raiden.tests.utils import factories
from raiden.transfer import node
from raiden.transfer.architecture import StateManager
from raiden.transfer.state_change import ActionInitChain
from raiden.utils.signer import LocalSigner
class MockTokenNetwork:
@staticmethod
def detail_participants(
participant1,
participant2,
block_identifier,
channel_identifier,
):
# To be changed by each test
return None
class MockPaymentChannel:
def __init__(self, token_network, channel_id):
self.token_network = token_network
class MockChain:
def __init__(self):
self.network_id = 17
# let's make a single mock token network for testing
self.token_network = MockTokenNetwork()
def payment_channel(self, token_network_address, channel_id):
return MockPaymentChannel(self.token_network, channel_id)
class MockRaidenService:
def __init__(self, message_handler=None, state_transition=None):
self.chain = MockChain()
self.private_key, self.address = factories.make_privatekey_address()
self.signer = LocalSigner(self.private_key)
self.chain.node_address = self.address
self.message_handler = message_handler
if state_transition is None:
state_transition = node.state_transition
serializer = JSONSerializer
state_manager = StateManager(state_transition, None)
storage = SerializedSQLiteStorage(':memory:', serializer)
self.wal = WriteAheadLog(state_manager, storage)
state_change = ActionInitChain(
pseudo_random_generator=random.Random(),
block_number=0,
block_hash=factories.make_block_hash(),
our_address=self.chain.node_address,
chain_id=self.chain.network_id,
)
self.wal.log_and_dispatch(state_change)
def on_message(self, message):
if self.message_handler:
self.message_handler.on_message(self, message)
def handle_and_track_state_change(self, state_change):
pass
def handle_state_change(self, state_change):
pass
def sign(self, message):
message.sign(self.signer)
| python |
from __future__ import annotations
class OpensearchIndexId:
"""
Build OpenSearch Index Id using given endpoint and index name or resolve the index name from given resource Id.
"""
def __init__(self, opensearch_endpoint: str, index_name: str) -> None:
self.opensearch_endpoint = opensearch_endpoint
self.index_name = index_name
def make_resource_id(self):
"""
Make resource id of OpenSearch index by concatenating given endpoint and index name.
OpenSearch endpoint and index name concatenated using delimiter '||'.
:param opensearch_domain: OpenSearch domain endpoint.
:param index_name: Index name.
:return: Resource id of OpenSearch index.
"""
return f'{self.opensearch_endpoint}||{self.index_name}'
@staticmethod
def resource_id(resource_id: str) -> OpensearchIndexId:
"""
Split given resource_id using delimiter '||' and initialize a class.
:param resource_id: OpenSearch index resource id e.g. opensearch.eu-central-1.es.amazonaws.com||posts-3qs1999pg-c
:return: OpensearchIndexId class instance.
"""
return OpensearchIndexId(*resource_id.split('||'))
| python |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + {"toc": true, "cell_type": "markdown"}
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"></ul></div>
# -
import xarray as xr
import glob
import numpy as np
import sys
invar = sys.argv[1]
root_path = '/mnt/nas4.meop2/meop40.data.model/CMAM/0A.daily/' #'/mnt/4data/CMAM/0A.daily/'
infiles = sorted(glob.glob(f'{root_path}{invar}/{invar}_6hrPlev_CMAM_CMAM30-SD_r1i1p1_*-*18.nc'))
# +
var = f'dzm{invar}dt'
cesta_out = f'{root_path}{var}/'
for i, infile in enumerate(infiles):
suffix = infile.split(invar)[-1]#infile_u.split('/lwa_')[1]
outfile = f'{cesta_out}{var}{suffix}'
da = xr.open_dataset(infile)[invar].mean('lon')
da_out = da.differentiate('time', datetime_unit='s')
da_out.name = var
print(outfile)
da_out.to_netcdf(outfile)
| python |
import unittest
from dojo import separate_names, get_bigger_name, ordenados
entrada = [['Joao', 'NO'], ['Carlos', 'YES'], ['Abner', 'NO'], ['Samuel', 'YES'], ['Ricardo', 'NO'], ['Abhay', 'YES'], ['Samuel', 'YES'], ['Andres', 'YES']]
class DojoTest(unittest.TestCase):
def test_separate_names(self):
self.assertEqual(separate_names(entrada), (["Carlos", "Samuel", "Abhay", "Samuel", "Andres",],["Joao", "Abner", "Ricardo"]))
def test_get_bigger_name(self):
self.assertEqual(get_bigger_name(["Carlos", "Samuel", "Abhay", "Samuel", "Andres"]), "Carlos")
def test_ordenados(self):
self.assertEqual(ordenados(["Carlos", "Samuel", "Abhay", "Samuel", "Andres"]), ["Abhay", "Andres", "Carlos", "Samuel"])
if __name__ == '__main__':
unittest.main()
# Juan - Ingrid - Lara - Tiago
# [['Joao', 'NO'], ['Carlos', 'YES'], ['Abner', 'NO'], ['Samuel', 'YES'], ['Ricardo', 'NO'], ['Abhay', 'YES'], ['Samuel', 'YES'], ['Andres', 'YES'], ['Roberto', 'NO'], ['Carlos', 'YES'], ['Samuel', 'YES'], ['Samuel', 'YES'], ['Abhay', 'YES'], ['Aline', 'YES'], ['Andres', 'YES']]
# [[]]
#['Joao','Abner', ]
# 1 - Processar input -> Colocar numa lista de listas
# 2 - Separar em pessoas que colocaram Yes e não
# Enquanto estamos colocando as pessoas do Yes na lista:
# Teremos uma variavel que vai ter o nome com maior quantidade de letras
# quando for inserir um novo nome na lista do yes, verificar se a quantidade é maior
# se for, troca a variavel, se não, não troca
# 3 - No final ordena alfabeticamente as listas e faz um concat das que tem sim com não.''''Carlos','Abner''Samuel','Ricardo','Abhay' | python |
from config import CONFIG
import models
def check_date(date_string):
"""checks user date string is in correct format for parsing to a datetime object"""
failure_message = CONFIG['date_check_failure_msg']
try:
date_time_obj = models.datetime.datetime.strptime(
date_string, CONFIG['date_string_format']
)
except ValueError:
return failure_message
else:
return date_time_obj
def check_time(time_string):
"""checks the user has entered a string that contains a character that can be parsed to int"""
failure_message = CONFIG['time_check_failure_msg']
try:
minutes_int = int(time_string)
except ValueError:
return failure_message
else:
return minutes_int
def check_dates(date_string1, date_string2):
"""checks that the user makes correctly formatted date entries for date range search,
using check_date() and orders the dates correctly"""
dt_obj1 = check_date(date_string1)
dt_obj2 = check_date(date_string2)
failures = []
if isinstance(dt_obj1, models.datetime.datetime) and isinstance(dt_obj2, models.datetime.datetime):
if dt_obj1 <= dt_obj2:
return dt_obj1, dt_obj2
else:
return dt_obj2, dt_obj1
elif isinstance(dt_obj1, str) and isinstance(dt_obj2, models.datetime.datetime):
dt_obj1 = 'start ' + dt_obj1
failures.append(dt_obj1)
failures.append(dt_obj2)
elif isinstance(dt_obj2, str) and isinstance(dt_obj1, models.datetime.datetime):
dt_obj2 = 'end ' + dt_obj2
failures.append(dt_obj1)
failures.append(dt_obj2)
else:
dt_obj1 = 'start ' + dt_obj1
dt_obj2 = 'end ' + dt_obj2
failures.append(dt_obj1)
failures.append(dt_obj2)
return failures | python |
#
# PySNMP MIB module ENTERASYS-NAT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ENTERASYS-NAT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:04:10 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
etsysModules, = mibBuilder.importSymbols("ENTERASYS-MIB-NAMES", "etsysModules")
InterfaceIndexOrZero, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
InetVersion, InetAddress, InetAddressPrefixLength, InetPortNumber, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetVersion", "InetAddress", "InetAddressPrefixLength", "InetPortNumber", "InetAddressType")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, NotificationType, IpAddress, TimeTicks, MibIdentifier, Counter32, Bits, Counter64, ModuleIdentity, Unsigned32, Integer32, iso, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "NotificationType", "IpAddress", "TimeTicks", "MibIdentifier", "Counter32", "Bits", "Counter64", "ModuleIdentity", "Unsigned32", "Integer32", "iso", "ObjectIdentity")
RowStatus, DisplayString, TruthValue, TextualConvention, DateAndTime = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TruthValue", "TextualConvention", "DateAndTime")
etsysNatMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75))
etsysNatMIB.setRevisions(('2010-06-02 11:53',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: etsysNatMIB.setRevisionsDescriptions(('Initial version',))
if mibBuilder.loadTexts: etsysNatMIB.setLastUpdated('201006021153Z')
if mibBuilder.loadTexts: etsysNatMIB.setOrganization('Enterasys Networks, Inc')
if mibBuilder.loadTexts: etsysNatMIB.setContactInfo('Postal: Enterasys Networks 50 Minuteman Rd. Andover, MA 01810-1008 USA Phone: +1 978 684 1000 E-mail: support@enterasys.com WWW: http://www.enterasys.com')
if mibBuilder.loadTexts: etsysNatMIB.setDescription('The Enterasys Networks Proprietary MIB module for entities implementing NAT.')
etsysNatGlobal = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1))
etsysNatGlobalStats = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1))
etsysNatGlobalIpv4Config = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2))
etsysNatTables = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2))
etsysNatConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 3))
etsysNatStatsPoolsUsed = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsPoolsUsed.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsPoolsUsed.setDescription('This object contains the number of NAT Pools currently configured.')
etsysNatStatsListRulesUsed = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsListRulesUsed.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsListRulesUsed.setDescription('This object contains the number of NAT List Rules currently configured.')
etsysNatStatsStaticRulesUsed = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsStaticRulesUsed.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsStaticRulesUsed.setDescription('This object contains the number of NAT Static Rules currently configured.')
etsysNatStatsAddressUsed = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsAddressUsed.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsAddressUsed.setDescription('This object contains the number of NAT IP Addresses currently configured.')
etsysNatStatsPortMapsUsed = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsPortMapsUsed.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsPortMapsUsed.setDescription('This object contains the number of NAT Port Maps currently configured.')
etsysNatStatsBindingsCurrent = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsBindingsCurrent.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsBindingsCurrent.setDescription('This object contains the current number of active NAT bindings.')
etsysNatStatsBindingsHigh = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsBindingsHigh.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsBindingsHigh.setDescription('This object contains the highest number of NAT bindings active at one time.')
etsysNatStatsBindingsDeleted = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsBindingsDeleted.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsBindingsDeleted.setDescription('This object contains the total number of NAT bindings that have been deleted.')
etsysNatStatsBindingsTotal = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsBindingsTotal.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsBindingsTotal.setDescription('This object contains the total number of NAT bindings created.')
etsysNatStatsBindingsExhausted = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsBindingsExhausted.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsBindingsExhausted.setDescription('This object contains the number of times a request to create a NAT binding failed because there were no available free entries.')
etsysNatStatsBindingsMaxReached = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsBindingsMaxReached.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsBindingsMaxReached.setDescription('This object contains the number of times a request to create a NAT binding failed because the maximum number of bindings specified in etsysNatConfigMaxEntries allowed has been reached.')
etsysNatStatsBindingsNoIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsBindingsNoIpAddr.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsBindingsNoIpAddr.setDescription('This object contains the number of times a request to create a NAT binding failed because there were no available configured IP addresses to establish a binding.')
etsysNatStatsBindingsNoPortmapPort = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsBindingsNoPortmapPort.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsBindingsNoPortmapPort.setDescription('This object contains the number of times a request to create a NAT binding failed because there were no available Port Map Port entries to establish a binding.')
etsysNatStatsBindingsNoFtpALG = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsBindingsNoFtpALG.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsBindingsNoFtpALG.setDescription("This object contains the number of times a request to create a NAT binding failed because there were no available FTP ALG's to establish a binding.")
etsysNatStatsBindingsPerSecond = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsBindingsPerSecond.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsBindingsPerSecond.setDescription('This object contains the average number of NAT bindings created per second. Normalized for 5 seconds.')
etsysNatStatsClear = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 16), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatStatsClear.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsClear.setDescription('Setting this object to a value of true(1) will clear NAT statistics and cause the following objects to be reset to zero and new values generated: etsysNatStatsBindingsDeleted, etsysNatStatsBindingsTotal, etsysNatStatsBindingsExhausted, etsysNatStatsBindingsMaxReached, etsysNatStatsBindingsNoIpAddr, etsysNatStatsBindingsNoPortmapPort, etsysNatStatsBindingsNoFtpALG, etsysNatStatsBindingsPerSecond. The following etsysNatStatsBindingsHigh object will be reset to its current active count. Setting this object to a value of false(2) has no effect. This object will always return a value of false(2).')
etsysNatStatsClearDateAndTime = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 17), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsClearDateAndTime.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsClearDateAndTime.setDescription('The date / time the NAT statistics were cleared.')
etsysNatStatsTranslationProtocolRulesCount = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 18), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsTranslationProtocolRulesCount.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsTranslationProtocolRulesCount.setDescription('The number of etsysNatTranslationProtocolRulesEntry in the etsysNatTranslationProtocolRulesTable')
etsysNatStatsMinTimeoutValue = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 19), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsMinTimeoutValue.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsMinTimeoutValue.setDescription('The minimum timeout value allowable for NAT timeouts.')
etsysNatStatsMaxTimeoutValue = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 1, 20), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStatsMaxTimeoutValue.setStatus('current')
if mibBuilder.loadTexts: etsysNatStatsMaxTimeoutValue.setDescription('The maximum timeout value allowable for NAT timeouts.')
etsysNatIpv4ConfigLogTranslations = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigLogTranslations.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigLogTranslations.setDescription("This object contains the state of NAT IPv4 Translations logging. - `disabled', a log message will not be generated when a NAT binding is either created or deleted. - `enabled', a log message will be generated when a NAT binding is either created or deleted.")
etsysNatIpv4ConfigInspectDNS = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigInspectDNS.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigInspectDNS.setDescription("This object contains the state of NAT IPv4 Inspect/Fix up DNS. NAT DNS packet inspection and fix up consists of parsing DNS request or response packets, identifying IP addresses contained within that may need to be NAT'ed, and fixing up the DNS packet with the appropriate NAT translations. - `disabled', NAT does not inspect DNS packets that are being forwarded by the NAT process. - `enabled', NAT inspects DNS packets that are being forwarded by the NAT process.")
etsysNatIpv4ConfigFtpCtrlPort = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 3), InetPortNumber().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(21)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigFtpCtrlPort.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigFtpCtrlPort.setDescription('This object contains the NAT IPv4 FTP control port.')
etsysNatIpv4ConfigMaxEntries = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 4), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 4294967295), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigMaxEntries.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigMaxEntries.setDescription("This object contains the maximum number of NAT IPv4 Translation entries. A value of '0' indicates that there is no limit.")
etsysNatIpv4ConfigTimeout = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 5), Unsigned32().clone(240)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigTimeout.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigTimeout.setDescription('This object contains the general NAT IPv4 idle translation timeout value.')
etsysNatIpv4ConfigUdpTimeout = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 6), Unsigned32().clone(240)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigUdpTimeout.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigUdpTimeout.setDescription('This object contains the NAT IPv4 UDP idle translation timeout value.')
etsysNatIpv4ConfigTcpTimeout = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 7), Unsigned32().clone(240)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigTcpTimeout.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigTcpTimeout.setDescription('This object contains the NAT IPv4 TCP idle translation timeout value.')
etsysNatIpv4ConfigFtpTimeout = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 8), Unsigned32().clone(240)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigFtpTimeout.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigFtpTimeout.setDescription('This object contains the NAT IPv4 FTP idle translation timeout value.')
etsysNatIpv4ConfigDnsTimeout = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 9), Unsigned32().clone(240)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigDnsTimeout.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigDnsTimeout.setDescription('This object contains the NAT IPv4 DNS idle translation timeout value.')
etsysNatIpv4ConfigIcmpTimeout = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 10), Unsigned32().clone(240)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigIcmpTimeout.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigIcmpTimeout.setDescription('This object contains the NAT IPv4 ICMP idle translation timeout value.')
etsysNatIpv4ConfigFinRstTimeout = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 11), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 4294967295), )).clone(3)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigFinRstTimeout.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigFinRstTimeout.setDescription("The timeout value after TCP FIN/RST observed on IPv4 NAT bindings. Setting a value of '0' results in this feature being disabled.")
etsysNatIpv4ConfigFinRstTimeoutHalfClosedStatus = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 1, 2, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatIpv4ConfigFinRstTimeoutHalfClosedStatus.setStatus('current')
if mibBuilder.loadTexts: etsysNatIpv4ConfigFinRstTimeoutHalfClosedStatus.setDescription("This object indicates if the IPv4 TCP FIN/RST timeout will apply to bindings only when the hardware connections have aged out. - `disabled', indicates the IPv4 TCP FIN/RST timeout does not apply to half closed bindings. - `enabled', indicates the IPv4 TCP FIN/RST timeout applies to half closed bindings.")
etsysNatTranslationProtocolRulesTable = MibTable((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 1), )
if mibBuilder.loadTexts: etsysNatTranslationProtocolRulesTable.setStatus('current')
if mibBuilder.loadTexts: etsysNatTranslationProtocolRulesTable.setDescription('A table of entries defining NAT rules for non standard protocols.')
etsysNatTranslationProtocolRulesEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 1, 1), ).setIndexNames((0, "ENTERASYS-NAT-MIB", "etsysNatTranslationInetVersion"), (0, "ENTERASYS-NAT-MIB", "etsysNatTranslationProtocol"), (0, "ENTERASYS-NAT-MIB", "etsysNatTranslationPort"))
if mibBuilder.loadTexts: etsysNatTranslationProtocolRulesEntry.setStatus('current')
if mibBuilder.loadTexts: etsysNatTranslationProtocolRulesEntry.setDescription('An entry in the etsysNatTranslationProtocolRulesTable defining the configuration values for a unique NAT Translation Protocol entry. The Translation Protocol Rules Table entry defines the protocol and timeout value for a NAT Translation Protocol.')
etsysNatTranslationInetVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 1, 1, 1), InetVersion())
if mibBuilder.loadTexts: etsysNatTranslationInetVersion.setStatus('current')
if mibBuilder.loadTexts: etsysNatTranslationInetVersion.setDescription('The Internet address version of the NAT Translation Protocol Rule.')
etsysNatTranslationProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 255), )))
if mibBuilder.loadTexts: etsysNatTranslationProtocol.setStatus('current')
if mibBuilder.loadTexts: etsysNatTranslationProtocol.setDescription("The NAT Translation Internet Protocol number. A value of any(0) indicates that any protocol is allowed. Common IP Protocol values are ICMP(1), TCP(6) or UDP(17). All IP Protocol values except for TCP and UDP require a etsysNatTranslationPort value of '0'. TCP or UDP may have a non zero value in etsysNatTranslationPort.")
etsysNatTranslationPort = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 1, 1, 3), InetPortNumber().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 65535), )))
if mibBuilder.loadTexts: etsysNatTranslationPort.setStatus('current')
if mibBuilder.loadTexts: etsysNatTranslationPort.setDescription("The port assigned to this NAT Translation Protocol entry. A value of '0' indicates any port is allowed for this NAT Translation Protocol entry. An entry in etsysNatTranslationProtocol of either TCP(6) or UDP(17) may contain a non-zero value in this entry indicating the specific port assigned for this protocol. Any other value in etsysNatTranslationProtocol requires that this entry be set to '0'.")
etsysNatTranslationTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 1, 1, 4), Unsigned32().clone(240)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatTranslationTimeout.setStatus('current')
if mibBuilder.loadTexts: etsysNatTranslationTimeout.setDescription('The timeout value for idle NAT translations using this NAT Translation Protocol entry.')
etsysNatTranslationOneShot = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2))).clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatTranslationOneShot.setStatus('current')
if mibBuilder.loadTexts: etsysNatTranslationOneShot.setDescription('This object enables the One Shot feature for this NAT Translation Protocol entry. One Shot is a feature specific to protocol bindings such as ICMP and UDP traffic such as DNS which are generally bi-directional and usually only send one packet in each direction. One Shot provides the benefit of quickly cleaning up such bindings given their temporary nature.')
etsysNatTranslationRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 1, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatTranslationRowStatus.setStatus('current')
if mibBuilder.loadTexts: etsysNatTranslationRowStatus.setDescription("This object permits management of the table by facilitating actions such as row creation and deletion. The value of this object has no effect on whether other objects in the conceptual row can be modified. The only allowable row status of this conceptual row in the table are: active(1) - The NAT Translation Protocol entry is available for use in NAT operations. createAndGo(4) - This is the preferred mechanism for creating conceptual rows in this table. A valid etsysNatTranslationProtocol must be present. A value in etsysNatTranslationProtocol not equal to either TCP(6), UDP(17) requires a '0' value in etsysNatTranslationPort. There are no other writable leaves so a successful createAndGo will always transition a new entry to the active(1) state. destroy(6) - This will remove the etsysNatTranslationProtocolRulesEntry from the etsysNatTranslationProtocolRulesTable and make it unavailable for NAT operations.")
etsysNatPoolTable = MibTable((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2), )
if mibBuilder.loadTexts: etsysNatPoolTable.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolTable.setDescription('A table of entries defining the operational characteristics of NAT Pools.')
etsysNatPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1), ).setIndexNames((0, "ENTERASYS-NAT-MIB", "etsysNatPoolAddressType"), (0, "ENTERASYS-NAT-MIB", "etsysNatPoolName"))
if mibBuilder.loadTexts: etsysNatPoolEntry.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolEntry.setDescription('An entry in the etsysNatPoolTable containing the operational characteristics of a NAT Pool. A NAT Pool defines a range of public (outside) IP addresses defining this NAT pool and is identified by etsysNatPoolName.')
etsysNatPoolAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 1), InetAddressType())
if mibBuilder.loadTexts: etsysNatPoolAddressType.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolAddressType.setDescription('The Internet Protocol version. The value of this property affects the size and format of: etsysNatPoolFirstIpAddr, etsysNatPoolLastIpAddr, and etsysNatPoolNextIpAddr. All objects must be of the same INET Address Type.')
etsysNatPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 63)))
if mibBuilder.loadTexts: etsysNatPoolName.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolName.setDescription('This object contains the unique name of the NAT Pool.')
etsysNatPoolFirstIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 3), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(4, 4), ValueSizeConstraint(16, 16), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatPoolFirstIpAddr.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolFirstIpAddr.setDescription('The assigned IP address represents the first IP address in the range for this NAT Pool entry.')
etsysNatPoolLastIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 4), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(4, 4), ValueSizeConstraint(16, 16), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatPoolLastIpAddr.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolLastIpAddr.setDescription('The assigned IP address represents the last IP address in the range for this NAT Pool entry.')
etsysNatPoolPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 5), InetAddressPrefixLength()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatPoolPrefixLen.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolPrefixLen.setDescription("The length of the IP network mask used to refine the range of IP addresses defined by etsysNatPoolFirstIpAddr and etsysNatPoolLastIpAddr in defining the range of IP addresses for this NAT Pool entry. The values for the index objects etsysNatPoolFirstIpAddr, etsysNatPoolLastIpAddr and etsysNatPoolPrefixLen must be consistent. If a prefix length of '0' is specified than all addresses in the range are used.")
etsysNatPoolNextIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 6), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(4, 4), ValueSizeConstraint(16, 16), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatPoolNextIpAddr.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolNextIpAddr.setDescription('The Next NAT IP address that the pool is due to assign to a dynamic translation binding')
etsysNatPoolAddrCount = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatPoolAddrCount.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolAddrCount.setDescription('The total number of IP addresses assigned to this NAT Pool entry.')
etsysNatPoolAddrUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatPoolAddrUsed.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolAddrUsed.setDescription('The total number of IP addresses in use for this NAT Pool entry.')
etsysNatPoolAddrAlloc = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatPoolAddrAlloc.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolAddrAlloc.setDescription('The total number of IP addresses allocated for this NAT Pool entry.')
etsysNatPoolOutOfAddrs = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatPoolOutOfAddrs.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolOutOfAddrs.setDescription('The number of times an allocation of a Pool entry failed because there were no available NAT IP addresses for this NAT Pool entry.')
etsysNatPoolPortAlloc = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatPoolPortAlloc.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolPortAlloc.setDescription('The total number of Port Maps allocated for this Nat Pool entry.')
etsysNatPoolOutOfPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatPoolOutOfPorts.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolOutOfPorts.setDescription('The number of times an allocation of a Pool entry failed because there were no available NAT Port Maps for this NAT Pool entry.')
etsysNatPoolConns = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatPoolConns.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolConns.setDescription('The current number of active NAT connections established using this NAT Pool entry.')
etsysNatPoolHits = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatPoolHits.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolHits.setDescription('The total number of hits this NAT Pool has received.')
etsysNatPoolListRulesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatPoolListRulesCount.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolListRulesCount.setDescription('The total number of List Rules (etsysNatListPoolName) configured using this Nat Pool entry.')
etsysNatPoolLsnatVservers = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 16), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatPoolLsnatVservers.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolLsnatVservers.setDescription('The number of Virtual Servers (etsysLsnatVserverSourceNatPool) using this Nat Pool.')
etsysNatPoolRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 2, 1, 17), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatPoolRowStatus.setStatus('current')
if mibBuilder.loadTexts: etsysNatPoolRowStatus.setDescription('This object permits management of the table by facilitating actions such as row creation and deletion. The value of this object has no effect on whether other objects in the conceptual row can be modified. The only allowable row status of this conceptual row in the table are: active(1) - The NAT Pool entry is available for use by NAT operations. Other writable leaves in this table MAY be modified while the row is in the active state. createAndGo(4) - This is the preferred mechanism for creating conceptual rows in this table. A valid createAndGo requires a unique etsysNatPoolName, errors resulting from existing etsysNatPoolNames will result in the row not being created. A valid createAndGo also requires sets to these leaves as part of a multi-varbind set when creating the instance: etsysNatPoolFirstIpAddr, etsysNatPoolLastIpAddr, etsysNatPoolPrefixLen A valid createAndGo also requires a valid IP address range and etsysNatPoolPrefixLen, errors resulting from an invalid IP range or prefix length will result in the row not being created. There are no other writable leaves so a successful createAndGo will always transition a new entry to the active(1) state. destroy(6) - This will remove the etsysNatPoolEntry from the etsysNatPoolTable and make it unavailable for NAT operations. This will result in any NAT List table entries (etsysNatListPoolName) using this entry to be deleted. In addition this will also result in all associated LSNAT Virtual Server Table entries (etsysLsnatVserverSourceNatPool) to also be deleted.')
etsysNatListRuleTable = MibTable((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3), )
if mibBuilder.loadTexts: etsysNatListRuleTable.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleTable.setDescription('A table of entries defining the operational characteristics of NAT Lists rules.')
etsysNatListRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1), ).setIndexNames((0, "ENTERASYS-NAT-MIB", "etsysNatListRuleInetVersion"), (0, "ENTERASYS-NAT-MIB", "etsysNatListRuleDirection"), (0, "ENTERASYS-NAT-MIB", "etsysNatListRuleMatchType"), (0, "ENTERASYS-NAT-MIB", "etsysNatListRuleName"), (0, "ENTERASYS-NAT-MIB", "etsysNatListRuleInsideVrfName"))
if mibBuilder.loadTexts: etsysNatListRuleEntry.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleEntry.setDescription('An entry in the etsysNatListRuleTable containing the operational characteristics of a NAT List rule. A NAT List rule defines the operational characteristics for the forming of dynamic address bindings from a pre-configured address pool (etsysNatPoolName) of public outside addresses and a pre-configured access-list (etsysNatListRuleName) of local inside addresses.')
etsysNatListRuleInetVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1, 1), InetVersion())
if mibBuilder.loadTexts: etsysNatListRuleInetVersion.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleInetVersion.setDescription('The Internet address version of the NAT List rule. The value of etsysNatListRuleInetVersion must be consistent with the value of etsysNatPoolAddressType defined by the etsysNatPoolName associated with this etsysNatListRuleEntry.')
etsysNatListRuleDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inside", 1), ("outside", 2))))
if mibBuilder.loadTexts: etsysNatListRuleDirection.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleDirection.setDescription("This value identifies if this NAT List rule is associated with an 'inside' or 'outside' network.")
etsysNatListRuleMatchType = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("source", 1), ("destination", 2))))
if mibBuilder.loadTexts: etsysNatListRuleMatchType.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleMatchType.setDescription("This value identifies if this NAT List rule is associated with a 'source' or 'destination' packet.")
etsysNatListRuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64)))
if mibBuilder.loadTexts: etsysNatListRuleName.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleName.setDescription('This object contains the unique name of the access-list of IP addresses to translate for the inside source address defined by this NAT List rule.')
etsysNatListRuleInsideVrfName = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1, 5), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 16)))
if mibBuilder.loadTexts: etsysNatListRuleInsideVrfName.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleInsideVrfName.setDescription('This object contains the unique name of the Inside VRF for cross VRF NAT for this NAT List rule.')
etsysNatListRulePoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1, 6), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 63))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatListRulePoolName.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRulePoolName.setDescription('This object contains the unique name of the NAT Pool. This value must be set with the row status set that creates the entry. The value may not be changed after the row is created.')
etsysNatListRuleIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1, 7), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatListRuleIfIndex.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleIfIndex.setDescription("The outside NAT List rule interface index used for this NAT List rule entry. A value of '0' indicates that this List Rule would apply to 'any' interface. This value must be set with the row status set that creates the entry. The value may not be changed after the row is created.")
etsysNatListRuleOverloaded = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1, 8), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatListRuleOverloaded.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleOverloaded.setDescription('This entry indicates if this NAT List rule entry will be overloaded enabling Network Address Port Translation (NAPT). This value must be set with the row status set that creates the entry. The value may not be changed after the row is created.')
etsysNatListRuleConns = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatListRuleConns.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleConns.setDescription('The current number of active NAT connections established on this NAT List rule entry.')
etsysNatListRuleHits = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatListRuleHits.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleHits.setDescription('The total number of hits this NAT List rule entry has received.')
etsysNatListRuleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 3, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatListRuleRowStatus.setStatus('current')
if mibBuilder.loadTexts: etsysNatListRuleRowStatus.setDescription('This object permits management of the table by facilitating actions such as row creation and deletion. The value of this object has no effect on whether other objects in the conceptual row can be modified. The only allowable row status of this conceptual row in the table are: active(1) - The NAT List rule entry is available for use in NAT operations. createAndGo(4) - This is the preferred mechanism for creating conceptual rows in this table. A valid createAndGo requires an active(1) etsysNatPoolName be provided for the etsysNatListRulePoolName. If the list rule leaves: etsysNatListRuleIfIndex or etsysNatListRuleOverloaded are to be set, then can only be set at create time. Sets to these leaves: etsysNatListRulePoolName, etsysNatListRuleIfIndex, etsysNatListRuleOverloaded Will not be accepted after the entry is created. Errors resulting from an invalid configuration will result in the row not being created. There are no other writable leaves so a successful createAndGo will always transition a new entry to the active(1) state. destroy(6) - This will remove the etsysNatListRuleEntry from the etsysNatListRuleTable and make it unavailable for NAT operations.')
etsysNatStaticRuleTable = MibTable((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4), )
if mibBuilder.loadTexts: etsysNatStaticRuleTable.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleTable.setDescription('A table of entries defining the operational characteristics of NAT Static rules.')
etsysNatStaticRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1), ).setIndexNames((0, "ENTERASYS-NAT-MIB", "etsysNatStaticRuleAddressType"), (0, "ENTERASYS-NAT-MIB", "etsysNatStaticRuleDirection"), (0, "ENTERASYS-NAT-MIB", "etsysNatStaticRuleMatchType"), (0, "ENTERASYS-NAT-MIB", "etsysNatStaticRuleProtocol"), (0, "ENTERASYS-NAT-MIB", "etsysNatStaticRuleLocalIpAddr"), (0, "ENTERASYS-NAT-MIB", "etsysNatStaticRuleLocalPort"), (0, "ENTERASYS-NAT-MIB", "etsysNatStaticRuleGlobalIpAddr"), (0, "ENTERASYS-NAT-MIB", "etsysNatStaticRuleGlobalPort"), (0, "ENTERASYS-NAT-MIB", "etsysNatStaticRuleInsideVrfName"))
if mibBuilder.loadTexts: etsysNatStaticRuleEntry.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleEntry.setDescription('An entry in the etsysNatStaticRuleTable containing the operational characteristics of a NAT Static rule. A NAT Static rule defines a range of private (inside) IP addresses and operational characteristics for which specified NAT Static rules will be applied. Static inside address translations are one-to-one bindings between the inside and outside IP addresses.')
etsysNatStaticRuleAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 1), InetAddressType())
if mibBuilder.loadTexts: etsysNatStaticRuleAddressType.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleAddressType.setDescription('The Internet Protocol version. The value of this property affects the size and format of: etsysNatStaticRuleLocalIpAddr, and etsysNatStaticRuleGlobalIpAddr both objects must be of the same INET Address Type.')
etsysNatStaticRuleDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inside", 1), ("outside", 2))))
if mibBuilder.loadTexts: etsysNatStaticRuleDirection.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleDirection.setDescription("This value identifies if this NAT Static rule is associated with an 'inside' or 'outside' network.")
etsysNatStaticRuleMatchType = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("source", 1), ("destination", 2))))
if mibBuilder.loadTexts: etsysNatStaticRuleMatchType.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleMatchType.setDescription("This value identifies if this NAT List rule is associated with a 'source' or 'destination' packet.")
etsysNatStaticRuleProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 6, 17))).clone(namedValues=NamedValues(("any", 0), ("tcp", 6), ("udp", 17))))
if mibBuilder.loadTexts: etsysNatStaticRuleProtocol.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleProtocol.setDescription("This object contains the NAT Binding protocol type. A value of '0' indicates that the protocol type is 'any' and requires that etsysNatStaticRuleLocalPort and etsysNatStaticRuleGlobalPort be set to a value of '0'. A value of tcp(6) or udp(17) indicates that this entry defines NAPT translation of inside source addresses.")
etsysNatStaticRuleLocalIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 5), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(4, 4), ValueSizeConstraint(16, 16), )))
if mibBuilder.loadTexts: etsysNatStaticRuleLocalIpAddr.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleLocalIpAddr.setDescription('The NAT Static Local rule IP addresses representing the local IP address for this NAT Static rule entry.')
etsysNatStaticRuleLocalPort = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 6), InetPortNumber().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 65535), )))
if mibBuilder.loadTexts: etsysNatStaticRuleLocalPort.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleLocalPort.setDescription("The NAT Static Local rule port representing the local port for this NAT Static rule entry. This value is only meaningful if the etsysNatStaticRuleProtocol is set to tcp(6) or udp(17) otherwise this value must be set to '0'.")
etsysNatStaticRuleGlobalIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 7), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(4, 4), ValueSizeConstraint(16, 16), )))
if mibBuilder.loadTexts: etsysNatStaticRuleGlobalIpAddr.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleGlobalIpAddr.setDescription('The NAT Static Global rule IP addresses representing the global IP address for this NAT Static rule entry.')
etsysNatStaticRuleGlobalPort = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 8), InetPortNumber().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 65535), )))
if mibBuilder.loadTexts: etsysNatStaticRuleGlobalPort.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleGlobalPort.setDescription("The NAT Static Global rule port representing the global port for this NAT Static rule entry. This value is only meaningful if the etsysNatStaticRuleProtocol is set to tcp(6) or udp(17) otherwise this value must be set to '0'.")
etsysNatStaticRuleInsideVrfName = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 16)))
if mibBuilder.loadTexts: etsysNatStaticRuleInsideVrfName.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleInsideVrfName.setDescription('This object contains the unique name of the Inside VRF for cross VRF NAT.')
etsysNatStaticRuleOverloaded = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 10), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStaticRuleOverloaded.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleOverloaded.setDescription('This entry indicates if this NAT Static rule entry is overloaded enabling Network Address Port Translation (NAPT). If the etsysNatStaticRuleProtocol is set to tcp(6) or udp(17) this value will display true(1) otherwise it will display false(2).')
etsysNatStaticRuleConns = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStaticRuleConns.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleConns.setDescription('The current number of active NAT connections established on this NAT Static rule entry.')
etsysNatStaticRuleHits = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStaticRuleHits.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleHits.setDescription('The total number of hits this NAT Static rule entry has received.')
etsysNatStaticRuleReservedBindingId = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatStaticRuleReservedBindingId.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleReservedBindingId.setDescription("This value is a unique NAT Static rule binding identifier reserved by NAT at the creation of this Static rule entry. A value of '0' indicates that there is no binding identifier currently set for this static entry.")
etsysNatStaticRuleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 4, 1, 14), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysNatStaticRuleRowStatus.setStatus('current')
if mibBuilder.loadTexts: etsysNatStaticRuleRowStatus.setDescription("This object permits management of the table by facilitating actions such as row creation and deletion. The value of this object has no effect on whether other objects in the conceptual row can be modified. The only allowable row status of this conceptual row in the table are: active(1) - The NAT Static rule entry is available for use in NAT operations. createAndGo(4) - This is the preferred mechanism for creating conceptual rows in this table. A valid createAndGo requires a valid etsysNatStaticRuleProtocol, a valid IP addresses for etsysNatStaticRuleLocalIpAddr and etsysNatStaticRuleGlobalIpAddr, and a valid port for etsysNatStaticRuleLocalPort and etsysNatStaticRuleGlobalPort, errors resulting from an invalid configuration will result in the row not being created. There are no other writable leaves so a successful createAndGo will always transition a new entry to the active(1) state. destroy(6) - This will remove the etsysNatStaticRuleEntry from the etsysNatStaticRuleTable, remove it's static NAT binding entry and make it unavailable for NAT operations.")
etsysNatBindingTable = MibTable((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5), )
if mibBuilder.loadTexts: etsysNatBindingTable.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingTable.setDescription('A table of entries defining active NAT bindings.')
etsysNatBindingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1), ).setIndexNames((0, "ENTERASYS-NAT-MIB", "etsysNatBindingId"))
if mibBuilder.loadTexts: etsysNatBindingEntry.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingEntry.setDescription('An entry in the etsysNatBindingTable defining an active NAT binding. The etsysNatBindingEntry defines the runtime characteristics of a NAT binding.')
etsysNatBindingId = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 1), Unsigned32())
if mibBuilder.loadTexts: etsysNatBindingId.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingId.setDescription('This value is a unique NAT binding identifier assigned by NAT during binding creation.')
etsysNatBindingState = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("init", 1), ("syncing", 2), ("waitroute", 3), ("established", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingState.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingState.setDescription("This object contains the state of the binding. - `init', indicates this binding is initializing. - `syncing', indicates this binding is syncing. - `waitroute', indicates this binding is in a wait route state. - `established', indicates this binding is established.")
etsysNatBindingAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 3), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingAddressType.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingAddressType.setDescription('The Internet Protocol version. The value of this property affects the size and format of: etsysNatBindingForwardSrcIp, etsysNatBindingForwardDstIp, etsysNatBindingReverseSrcIp and etsysNatBindingReverseDstIp objects.')
etsysNatBindingForwardSrcIp = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 4), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingForwardSrcIp.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingForwardSrcIp.setDescription('The forward source IP address for this NAT binding entry.')
etsysNatBindingForwardSrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 5), InetPortNumber().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 65535), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingForwardSrcPort.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingForwardSrcPort.setDescription("The forward source port for this NAT binding entry. This value is only meaningful if the etsysNatBindingProtocol is set to tcp(6) or udp(17) otherwise this object will return a value of '0'.")
etsysNatBindingForwardDstIp = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 6), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingForwardDstIp.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingForwardDstIp.setDescription('The forward destination IP address for this NAT binding entry.')
etsysNatBindingForwardDstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 7), InetPortNumber().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 65535), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingForwardDstPort.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingForwardDstPort.setDescription("The forward destination port for this NAT binding entry. This value is only meaningful if the etsysNatBindingProtocol is set to tcp(6) or udp(17) otherwise this object will return a value of '0'.")
etsysNatBindingReverseSrcIp = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 8), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingReverseSrcIp.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingReverseSrcIp.setDescription('The reverse NAT source IP address for this NAT binding entry.')
etsysNatBindingReverseSrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 9), InetPortNumber().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 65535), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingReverseSrcPort.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingReverseSrcPort.setDescription("The reverse NAT source port for this NAT binding entry. This value is only meaningful if the etsysNatBindingProtocol is set to tcp(6) or udp(17) otherwise this object will return a value of '0'.")
etsysNatBindingReverseDstIp = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 10), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingReverseDstIp.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingReverseDstIp.setDescription('The reverse NAT destination IP address for this NAT binding entry.')
etsysNatBindingReverseDstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 11), InetPortNumber().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 65535), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingReverseDstPort.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingReverseDstPort.setDescription("The reverse NAT destination port for this NAT binding entry. This value is only meaningful if the etsysNatBindingProtocol is set to tcp(6) or udp(17) otherwise this object will return a value of '0'.")
etsysNatBindingRuleType = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("static", 1), ("dynamic", 2), ("staticReserved", 3), ("dynamicReserved", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingRuleType.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingRuleType.setDescription("This object contains the NAT Binding Rule type. - `static', indicates that the NAT binding is static. - `dynamic', indicates that the NAT binding is dynamic. - `staticReserved', indicates that the NAT binding is static (reserved). - `dynamicReserved', indicates that the NAT binding is dynamic (reserved).")
etsysNatBindingPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 13), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingPoolName.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingPoolName.setDescription('This object contains the name of the NAT Pool (etsysNatPoolName) associated with this NAT binding.')
etsysNatBindingProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 255), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingProtocol.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingProtocol.setDescription("This object contains the NAT Binding protocol type. A value of '0' indicates that the protocol type is 'any'. Common IP Protocol values are TCP(6) and UDP(17).")
etsysNatBindingAlgType = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("none", 1), ("ftpctrl", 2), ("ftpdata", 3), ("tftpctrl", 4), ("icmpecho", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingAlgType.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingAlgType.setDescription("This object contains the NAT ALG Binding type. - `none', indicates that this NAT binding is not an ALG binding. - `ftpctrl', indicates that this NAT binding is a FTP control binding. - `ftpdata, indicates that this NAT binding is a FTP data binding. - `tftpctrl', indicates that this NAT binding is a TFTP control binding. - `icmpecho', indicates that this NAT binding is an ICMP echo binding.")
etsysNatBindingFtpDataChannelCount = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 16), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 65535), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingFtpDataChannelCount.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingFtpDataChannelCount.setDescription("This object contains the NAT Binding FTP Data channel. This value is only significant if the etsysNatBindingAlgType is ftpctrl(2), otherwise this object will return a value of '0'.")
etsysNatBindingIcmpFwdIdent = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 17), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 65535), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingIcmpFwdIdent.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingIcmpFwdIdent.setDescription("This object contains the NAT Binding ICMP Forward ID. This value is only significant if the etsysNatBindingAlgType is icmpecho(5), otherwise this object will return a value of '0'.")
etsysNatBindingIcmpRevIdent = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 18), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 65535), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingIcmpRevIdent.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingIcmpRevIdent.setDescription("This object contains the NAT Binding ICMP Reverse ID. This value is only significant if the etsysNatBindingAlgType is icmpecho(5), otherwise this object will return a value of '0'.")
etsysNatBindingHWConns = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 19), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingHWConns.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingHWConns.setDescription('This object contains the total number of hardware connections currently active using this NAT binding.')
etsysNatBindingCreationDate = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 20), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingCreationDate.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingCreationDate.setDescription('The creation date / time for this NAT binding entry.')
etsysNatBindingExpirationDate = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 21), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingExpirationDate.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingExpirationDate.setDescription("The expiration date / time for this NAT binding entry. If the NAT binding expire time (etsysNatBindingExpireTime) is '0' there is no expiration date and this object will return the current date / time.")
etsysNatBindingIdleTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 22), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingIdleTime.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingIdleTime.setDescription('The consecutive number of seconds this NAT binding entry has been idle.')
etsysNatBindingExpireTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 23), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNatBindingExpireTime.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingExpireTime.setDescription("The number of idle seconds left before this NAT binding entry will age out. A value of '0' indicates that this NAT binding will not age out.")
etsysNatBindingClear = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 2, 5, 1, 24), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysNatBindingClear.setStatus('current')
if mibBuilder.loadTexts: etsysNatBindingClear.setDescription("Setting this object to a value of true(1) will cause this etsysNatBindingEntry to be deleted from the etsysNatBindingTable if the corresponding etsysNatBindingRuleType is 'dynamic(2)' or 'dynamicReserved(4)' otherwise the binding entry will not be deleted. Setting this object to a value of false(2) has no effect. This object will always return a value of false(2).")
etsysNatMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 3, 1))
etsysNatMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 3, 2))
etsysNatMIBGlobalStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 3, 1, 1)).setObjects(("ENTERASYS-NAT-MIB", "etsysNatStatsPoolsUsed"), ("ENTERASYS-NAT-MIB", "etsysNatStatsListRulesUsed"), ("ENTERASYS-NAT-MIB", "etsysNatStatsStaticRulesUsed"), ("ENTERASYS-NAT-MIB", "etsysNatStatsAddressUsed"), ("ENTERASYS-NAT-MIB", "etsysNatStatsPortMapsUsed"), ("ENTERASYS-NAT-MIB", "etsysNatStatsBindingsCurrent"), ("ENTERASYS-NAT-MIB", "etsysNatStatsBindingsHigh"), ("ENTERASYS-NAT-MIB", "etsysNatStatsBindingsDeleted"), ("ENTERASYS-NAT-MIB", "etsysNatStatsBindingsTotal"), ("ENTERASYS-NAT-MIB", "etsysNatStatsBindingsExhausted"), ("ENTERASYS-NAT-MIB", "etsysNatStatsBindingsMaxReached"), ("ENTERASYS-NAT-MIB", "etsysNatStatsBindingsNoIpAddr"), ("ENTERASYS-NAT-MIB", "etsysNatStatsBindingsNoPortmapPort"), ("ENTERASYS-NAT-MIB", "etsysNatStatsBindingsNoFtpALG"), ("ENTERASYS-NAT-MIB", "etsysNatStatsBindingsPerSecond"), ("ENTERASYS-NAT-MIB", "etsysNatStatsClear"), ("ENTERASYS-NAT-MIB", "etsysNatStatsClearDateAndTime"), ("ENTERASYS-NAT-MIB", "etsysNatStatsTranslationProtocolRulesCount"), ("ENTERASYS-NAT-MIB", "etsysNatStatsMinTimeoutValue"), ("ENTERASYS-NAT-MIB", "etsysNatStatsMaxTimeoutValue"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysNatMIBGlobalStatsGroup = etsysNatMIBGlobalStatsGroup.setStatus('current')
if mibBuilder.loadTexts: etsysNatMIBGlobalStatsGroup.setDescription('The information group for NAT global resources and operations.')
etsysNatMIBGlobalIpv4ConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 3, 1, 2)).setObjects(("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigLogTranslations"), ("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigInspectDNS"), ("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigFtpCtrlPort"), ("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigMaxEntries"), ("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigTimeout"), ("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigUdpTimeout"), ("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigTcpTimeout"), ("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigFtpTimeout"), ("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigDnsTimeout"), ("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigIcmpTimeout"), ("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigFinRstTimeout"), ("ENTERASYS-NAT-MIB", "etsysNatIpv4ConfigFinRstTimeoutHalfClosedStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysNatMIBGlobalIpv4ConfigGroup = etsysNatMIBGlobalIpv4ConfigGroup.setStatus('current')
if mibBuilder.loadTexts: etsysNatMIBGlobalIpv4ConfigGroup.setDescription('The information group for NAT IPv4 global resources and operations.')
etsysNatMIBTranslationProtocolRulesTableGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 3, 1, 3)).setObjects(("ENTERASYS-NAT-MIB", "etsysNatTranslationTimeout"), ("ENTERASYS-NAT-MIB", "etsysNatTranslationOneShot"), ("ENTERASYS-NAT-MIB", "etsysNatTranslationRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysNatMIBTranslationProtocolRulesTableGroup = etsysNatMIBTranslationProtocolRulesTableGroup.setStatus('current')
if mibBuilder.loadTexts: etsysNatMIBTranslationProtocolRulesTableGroup.setDescription('The information group for NAT global IPv4 Translation Protocol Rules configuration.')
etsysNatMIBNatPoolTableGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 3, 1, 4)).setObjects(("ENTERASYS-NAT-MIB", "etsysNatPoolFirstIpAddr"), ("ENTERASYS-NAT-MIB", "etsysNatPoolLastIpAddr"), ("ENTERASYS-NAT-MIB", "etsysNatPoolPrefixLen"), ("ENTERASYS-NAT-MIB", "etsysNatPoolNextIpAddr"), ("ENTERASYS-NAT-MIB", "etsysNatPoolAddrCount"), ("ENTERASYS-NAT-MIB", "etsysNatPoolAddrUsed"), ("ENTERASYS-NAT-MIB", "etsysNatPoolAddrAlloc"), ("ENTERASYS-NAT-MIB", "etsysNatPoolOutOfAddrs"), ("ENTERASYS-NAT-MIB", "etsysNatPoolPortAlloc"), ("ENTERASYS-NAT-MIB", "etsysNatPoolOutOfPorts"), ("ENTERASYS-NAT-MIB", "etsysNatPoolConns"), ("ENTERASYS-NAT-MIB", "etsysNatPoolHits"), ("ENTERASYS-NAT-MIB", "etsysNatPoolListRulesCount"), ("ENTERASYS-NAT-MIB", "etsysNatPoolLsnatVservers"), ("ENTERASYS-NAT-MIB", "etsysNatPoolRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysNatMIBNatPoolTableGroup = etsysNatMIBNatPoolTableGroup.setStatus('current')
if mibBuilder.loadTexts: etsysNatMIBNatPoolTableGroup.setDescription('The information group for NAT Pool configurations.')
etsysNatMIBNatListRuleTableGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 3, 1, 5)).setObjects(("ENTERASYS-NAT-MIB", "etsysNatListRulePoolName"), ("ENTERASYS-NAT-MIB", "etsysNatListRuleIfIndex"), ("ENTERASYS-NAT-MIB", "etsysNatListRuleOverloaded"), ("ENTERASYS-NAT-MIB", "etsysNatListRuleConns"), ("ENTERASYS-NAT-MIB", "etsysNatListRuleHits"), ("ENTERASYS-NAT-MIB", "etsysNatListRuleRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysNatMIBNatListRuleTableGroup = etsysNatMIBNatListRuleTableGroup.setStatus('current')
if mibBuilder.loadTexts: etsysNatMIBNatListRuleTableGroup.setDescription('The information group for NAT List rule configurations.')
etsysNatMIBNatStaticRuleTableGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 3, 1, 6)).setObjects(("ENTERASYS-NAT-MIB", "etsysNatStaticRuleOverloaded"), ("ENTERASYS-NAT-MIB", "etsysNatStaticRuleConns"), ("ENTERASYS-NAT-MIB", "etsysNatStaticRuleHits"), ("ENTERASYS-NAT-MIB", "etsysNatStaticRuleReservedBindingId"), ("ENTERASYS-NAT-MIB", "etsysNatStaticRuleRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysNatMIBNatStaticRuleTableGroup = etsysNatMIBNatStaticRuleTableGroup.setStatus('current')
if mibBuilder.loadTexts: etsysNatMIBNatStaticRuleTableGroup.setDescription('The information group for NAT static rule configurations.')
etsysNatMIBNatBindingTableGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 3, 1, 7)).setObjects(("ENTERASYS-NAT-MIB", "etsysNatBindingState"), ("ENTERASYS-NAT-MIB", "etsysNatBindingAddressType"), ("ENTERASYS-NAT-MIB", "etsysNatBindingForwardSrcIp"), ("ENTERASYS-NAT-MIB", "etsysNatBindingForwardSrcPort"), ("ENTERASYS-NAT-MIB", "etsysNatBindingForwardDstIp"), ("ENTERASYS-NAT-MIB", "etsysNatBindingForwardDstPort"), ("ENTERASYS-NAT-MIB", "etsysNatBindingReverseSrcIp"), ("ENTERASYS-NAT-MIB", "etsysNatBindingReverseSrcPort"), ("ENTERASYS-NAT-MIB", "etsysNatBindingReverseDstIp"), ("ENTERASYS-NAT-MIB", "etsysNatBindingReverseDstPort"), ("ENTERASYS-NAT-MIB", "etsysNatBindingRuleType"), ("ENTERASYS-NAT-MIB", "etsysNatBindingPoolName"), ("ENTERASYS-NAT-MIB", "etsysNatBindingProtocol"), ("ENTERASYS-NAT-MIB", "etsysNatBindingAlgType"), ("ENTERASYS-NAT-MIB", "etsysNatBindingFtpDataChannelCount"), ("ENTERASYS-NAT-MIB", "etsysNatBindingIcmpFwdIdent"), ("ENTERASYS-NAT-MIB", "etsysNatBindingIcmpRevIdent"), ("ENTERASYS-NAT-MIB", "etsysNatBindingHWConns"), ("ENTERASYS-NAT-MIB", "etsysNatBindingCreationDate"), ("ENTERASYS-NAT-MIB", "etsysNatBindingExpirationDate"), ("ENTERASYS-NAT-MIB", "etsysNatBindingIdleTime"), ("ENTERASYS-NAT-MIB", "etsysNatBindingExpireTime"), ("ENTERASYS-NAT-MIB", "etsysNatBindingClear"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysNatMIBNatBindingTableGroup = etsysNatMIBNatBindingTableGroup.setStatus('current')
if mibBuilder.loadTexts: etsysNatMIBNatBindingTableGroup.setDescription('The information group for NAT bindings.')
etsysNatMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 5624, 1, 2, 75, 3, 2, 1)).setObjects(("ENTERASYS-NAT-MIB", "etsysNatMIBGlobalStatsGroup"), ("ENTERASYS-NAT-MIB", "etsysNatMIBGlobalIpv4ConfigGroup"), ("ENTERASYS-NAT-MIB", "etsysNatMIBTranslationProtocolRulesTableGroup"), ("ENTERASYS-NAT-MIB", "etsysNatMIBNatPoolTableGroup"), ("ENTERASYS-NAT-MIB", "etsysNatMIBNatListRuleTableGroup"), ("ENTERASYS-NAT-MIB", "etsysNatMIBNatStaticRuleTableGroup"), ("ENTERASYS-NAT-MIB", "etsysNatMIBNatBindingTableGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysNatMIBCompliance = etsysNatMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: etsysNatMIBCompliance.setDescription('The compliance statement for implementation of the Router Application Services NAT functionality.')
mibBuilder.exportSymbols("ENTERASYS-NAT-MIB", etsysNatStaticRuleInsideVrfName=etsysNatStaticRuleInsideVrfName, etsysNatIpv4ConfigFinRstTimeout=etsysNatIpv4ConfigFinRstTimeout, etsysNatListRuleDirection=etsysNatListRuleDirection, etsysNatStaticRuleHits=etsysNatStaticRuleHits, etsysNatStatsPortMapsUsed=etsysNatStatsPortMapsUsed, etsysNatStaticRuleEntry=etsysNatStaticRuleEntry, etsysNatIpv4ConfigFtpTimeout=etsysNatIpv4ConfigFtpTimeout, etsysNatIpv4ConfigTimeout=etsysNatIpv4ConfigTimeout, etsysNatStatsBindingsDeleted=etsysNatStatsBindingsDeleted, etsysNatBindingId=etsysNatBindingId, etsysNatMIBCompliance=etsysNatMIBCompliance, etsysNatBindingTable=etsysNatBindingTable, etsysNatBindingProtocol=etsysNatBindingProtocol, etsysNatMIBNatPoolTableGroup=etsysNatMIBNatPoolTableGroup, etsysNatGlobalIpv4Config=etsysNatGlobalIpv4Config, etsysNatBindingExpirationDate=etsysNatBindingExpirationDate, etsysNatConformance=etsysNatConformance, etsysNatStatsMinTimeoutValue=etsysNatStatsMinTimeoutValue, etsysNatStatsPoolsUsed=etsysNatStatsPoolsUsed, etsysNatStatsBindingsNoIpAddr=etsysNatStatsBindingsNoIpAddr, etsysNatPoolNextIpAddr=etsysNatPoolNextIpAddr, etsysNatPoolLsnatVservers=etsysNatPoolLsnatVservers, etsysNatStaticRuleConns=etsysNatStaticRuleConns, etsysNatStatsBindingsNoPortmapPort=etsysNatStatsBindingsNoPortmapPort, etsysNatListRuleEntry=etsysNatListRuleEntry, etsysNatStatsBindingsPerSecond=etsysNatStatsBindingsPerSecond, etsysNatPoolListRulesCount=etsysNatPoolListRulesCount, etsysNatStaticRuleProtocol=etsysNatStaticRuleProtocol, etsysNatStatsAddressUsed=etsysNatStatsAddressUsed, etsysNatIpv4ConfigIcmpTimeout=etsysNatIpv4ConfigIcmpTimeout, etsysNatPoolPrefixLen=etsysNatPoolPrefixLen, etsysNatPoolAddrUsed=etsysNatPoolAddrUsed, etsysNatBindingForwardDstIp=etsysNatBindingForwardDstIp, etsysNatPoolLastIpAddr=etsysNatPoolLastIpAddr, etsysNatBindingForwardSrcPort=etsysNatBindingForwardSrcPort, etsysNatTranslationRowStatus=etsysNatTranslationRowStatus, etsysNatTables=etsysNatTables, etsysNatListRuleOverloaded=etsysNatListRuleOverloaded, etsysNatTranslationProtocolRulesTable=etsysNatTranslationProtocolRulesTable, etsysNatListRuleMatchType=etsysNatListRuleMatchType, etsysNatListRulePoolName=etsysNatListRulePoolName, etsysNatBindingReverseDstIp=etsysNatBindingReverseDstIp, etsysNatMIBNatListRuleTableGroup=etsysNatMIBNatListRuleTableGroup, etsysNatStatsTranslationProtocolRulesCount=etsysNatStatsTranslationProtocolRulesCount, etsysNatIpv4ConfigTcpTimeout=etsysNatIpv4ConfigTcpTimeout, etsysNatStatsBindingsNoFtpALG=etsysNatStatsBindingsNoFtpALG, etsysNatBindingIcmpFwdIdent=etsysNatBindingIcmpFwdIdent, etsysNatMIBCompliances=etsysNatMIBCompliances, etsysNatListRuleConns=etsysNatListRuleConns, etsysNatListRuleName=etsysNatListRuleName, etsysNatBindingEntry=etsysNatBindingEntry, etsysNatStaticRuleOverloaded=etsysNatStaticRuleOverloaded, etsysNatGlobal=etsysNatGlobal, etsysNatTranslationProtocol=etsysNatTranslationProtocol, etsysNatPoolOutOfPorts=etsysNatPoolOutOfPorts, etsysNatIpv4ConfigDnsTimeout=etsysNatIpv4ConfigDnsTimeout, etsysNatListRuleRowStatus=etsysNatListRuleRowStatus, etsysNatBindingForwardSrcIp=etsysNatBindingForwardSrcIp, etsysNatStatsBindingsHigh=etsysNatStatsBindingsHigh, etsysNatListRuleInetVersion=etsysNatListRuleInetVersion, etsysNatBindingIdleTime=etsysNatBindingIdleTime, etsysNatPoolEntry=etsysNatPoolEntry, etsysNatGlobalStats=etsysNatGlobalStats, etsysNatBindingCreationDate=etsysNatBindingCreationDate, etsysNatStatsClear=etsysNatStatsClear, PYSNMP_MODULE_ID=etsysNatMIB, etsysNatStaticRuleAddressType=etsysNatStaticRuleAddressType, etsysNatStaticRuleLocalPort=etsysNatStaticRuleLocalPort, etsysNatStatsBindingsExhausted=etsysNatStatsBindingsExhausted, etsysNatListRuleTable=etsysNatListRuleTable, etsysNatStaticRuleGlobalIpAddr=etsysNatStaticRuleGlobalIpAddr, etsysNatStaticRuleRowStatus=etsysNatStaticRuleRowStatus, etsysNatBindingPoolName=etsysNatBindingPoolName, etsysNatStaticRuleGlobalPort=etsysNatStaticRuleGlobalPort, etsysNatBindingHWConns=etsysNatBindingHWConns, etsysNatStaticRuleDirection=etsysNatStaticRuleDirection, etsysNatIpv4ConfigUdpTimeout=etsysNatIpv4ConfigUdpTimeout, etsysNatMIBNatStaticRuleTableGroup=etsysNatMIBNatStaticRuleTableGroup, etsysNatBindingExpireTime=etsysNatBindingExpireTime, etsysNatPoolHits=etsysNatPoolHits, etsysNatPoolPortAlloc=etsysNatPoolPortAlloc, etsysNatPoolAddrCount=etsysNatPoolAddrCount, etsysNatBindingState=etsysNatBindingState, etsysNatListRuleInsideVrfName=etsysNatListRuleInsideVrfName, etsysNatStatsBindingsMaxReached=etsysNatStatsBindingsMaxReached, etsysNatStatsBindingsCurrent=etsysNatStatsBindingsCurrent, etsysNatIpv4ConfigFtpCtrlPort=etsysNatIpv4ConfigFtpCtrlPort, etsysNatPoolAddrAlloc=etsysNatPoolAddrAlloc, etsysNatStatsListRulesUsed=etsysNatStatsListRulesUsed, etsysNatTranslationOneShot=etsysNatTranslationOneShot, etsysNatMIBGlobalIpv4ConfigGroup=etsysNatMIBGlobalIpv4ConfigGroup, etsysNatStaticRuleReservedBindingId=etsysNatStaticRuleReservedBindingId, etsysNatBindingReverseSrcPort=etsysNatBindingReverseSrcPort, etsysNatBindingReverseDstPort=etsysNatBindingReverseDstPort, etsysNatStatsMaxTimeoutValue=etsysNatStatsMaxTimeoutValue, etsysNatStaticRuleTable=etsysNatStaticRuleTable, etsysNatMIBTranslationProtocolRulesTableGroup=etsysNatMIBTranslationProtocolRulesTableGroup, etsysNatIpv4ConfigLogTranslations=etsysNatIpv4ConfigLogTranslations, etsysNatTranslationTimeout=etsysNatTranslationTimeout, etsysNatPoolName=etsysNatPoolName, etsysNatBindingAlgType=etsysNatBindingAlgType, etsysNatListRuleHits=etsysNatListRuleHits, etsysNatTranslationInetVersion=etsysNatTranslationInetVersion, etsysNatIpv4ConfigInspectDNS=etsysNatIpv4ConfigInspectDNS, etsysNatBindingReverseSrcIp=etsysNatBindingReverseSrcIp, etsysNatBindingForwardDstPort=etsysNatBindingForwardDstPort, etsysNatMIB=etsysNatMIB, etsysNatPoolFirstIpAddr=etsysNatPoolFirstIpAddr, etsysNatStaticRuleMatchType=etsysNatStaticRuleMatchType, etsysNatIpv4ConfigMaxEntries=etsysNatIpv4ConfigMaxEntries, etsysNatTranslationPort=etsysNatTranslationPort, etsysNatIpv4ConfigFinRstTimeoutHalfClosedStatus=etsysNatIpv4ConfigFinRstTimeoutHalfClosedStatus, etsysNatStaticRuleLocalIpAddr=etsysNatStaticRuleLocalIpAddr, etsysNatMIBNatBindingTableGroup=etsysNatMIBNatBindingTableGroup, etsysNatStatsStaticRulesUsed=etsysNatStatsStaticRulesUsed, etsysNatPoolOutOfAddrs=etsysNatPoolOutOfAddrs, etsysNatMIBGlobalStatsGroup=etsysNatMIBGlobalStatsGroup, etsysNatBindingRuleType=etsysNatBindingRuleType, etsysNatTranslationProtocolRulesEntry=etsysNatTranslationProtocolRulesEntry, etsysNatBindingIcmpRevIdent=etsysNatBindingIcmpRevIdent, etsysNatStatsBindingsTotal=etsysNatStatsBindingsTotal, etsysNatPoolConns=etsysNatPoolConns, etsysNatPoolAddressType=etsysNatPoolAddressType, etsysNatBindingFtpDataChannelCount=etsysNatBindingFtpDataChannelCount, etsysNatBindingAddressType=etsysNatBindingAddressType, etsysNatListRuleIfIndex=etsysNatListRuleIfIndex, etsysNatMIBGroups=etsysNatMIBGroups, etsysNatPoolTable=etsysNatPoolTable, etsysNatBindingClear=etsysNatBindingClear, etsysNatStatsClearDateAndTime=etsysNatStatsClearDateAndTime, etsysNatPoolRowStatus=etsysNatPoolRowStatus)
| python |
# Generated by Django 2.1.7 on 2019-04-02 16:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homepage', '0003_auto_20190330_2350'),
]
operations = [
migrations.AlterField(
model_name='post',
name='caption',
field=models.CharField(blank=True, max_length=2200),
),
]
| python |
import json
import maya.cmds as mc
__author__ = 'Lee Dunham'
__version__ = '1.1.0'
SHADER_MAPPING_NODE = 'ld_shader_mapping_node'
TRANSPARENT_SHADER_NAME = 'ld_transparencyShader'
# ------------------------------------------------------------------------------
def _get_shading_engine(node):
for grp in mc.ls(type='shadingEngine'):
if mc.sets(node, isMember=grp):
return grp
return None
# ------------------------------------------------------------------------------
def get_shader_mapping_node():
if mc.objExists(SHADER_MAPPING_NODE):
return SHADER_MAPPING_NODE
mc.createNode('network', n=SHADER_MAPPING_NODE)
mc.addAttr(SHADER_MAPPING_NODE, ln='shader_mapping', dt='string')
return SHADER_MAPPING_NODE
def get_shader_mappings():
node = get_shader_mapping_node()
return json.loads(mc.getAttr(node + '.shader_mapping', type='string'))
def set_shader_mappings(data, update=False):
if update:
_data = get_shader_mappings()
_data.update(data)
data = _data
node = get_shader_mapping_node()
mc.setAttr(
node + '.shader_mapping',
json.dumps(data),
type='string',
)
def get_shader_mapping_for_node(node):
data = get_shader_mappings()
return data.get(node)
# ------------------------------------------------------------------------------
def toggle_transparency(object_list=None):
"""
Toggle the transparency of objects or components.
:param object_list: List of objects to affect. Use selection if None given.
:type object_list: list(str) / None
"""
object_list = object_list or mc.ls(sl=True)
if not object_list:
return
shader = TRANSPARENT_SHADER_NAME
if not mc.objExists(shader):
mc.shadingNode('lambert', asShader=True, n=shader)
mc.setAttr(shader + '.transparency', 1, 1, 1)
mc.select(object_list)
mc.hyperShade(assign=shader)
def main():
toggle_transparency()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
main()
| python |
# --------------
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# code starts here
df = pd.read_csv(path)
print(df.head())
X = df.drop('list_price', axis=1)
y = df.list_price
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=6)
# code ends here
# --------------
import matplotlib.pyplot as plt
# code starts here
cols = X_train.columns
fig, axes = plt.subplots(nrows = 3 , ncols = 3)
for i in range(3):
for j in range(3):
col = cols[i * 3 + j]
axes[i,j].scatter(X_train[col],y_train)
# code ends here
# --------------
# Code starts here
corr = X_train.corr()
# print(corr)
gt_75 = corr > 0.75
lt_75 = corr < 0.75
X_train.drop(['play_star_rating', 'val_star_rating'], axis=1, inplace=True)
X_test.drop(['play_star_rating', 'val_star_rating'], axis=1, inplace=True)
# Code ends here
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
# Code starts here
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(mse)
print(r2)
# Code ends here
# --------------
# Code starts here
residual = y_test - y_pred
residual.hist()
# Code ends here
| python |
from x_rebirth_station_calculator.station_data.station_base import Ware
names = {'L044': 'Quantum Tubes',
'L049': 'Quantumröhren'}
QuantumTubes = Ware(names)
| python |
from tests import BaseTestCase
import json
from base64 import b64encode
class TestUserRegistration(BaseTestCase):
def setUp(self):
""" Sets up the test client"""
super(TestUserRegistration, self).setUp()
def test_user_registration(self):
# successful user registration
payload = dict(username="john", password="password123")
response = self.test_app.post(
'/api/v1/auth/register', data=payload)
message = str(response.data, encoding='utf-8')
self.assertEqual(response.status_code, 201)
self.assertIn('successfully added', message)
def test_user_registration_of_an_already_existing_user(self):
# register user
payload = dict(username="john", password="password123")
response = self.test_app.post('/api/v1/auth/register', data=payload)
message = str(response.data, encoding='utf-8')
self.assertEqual(response.status_code, 201)
self.assertIn('successfully added', message)
# re-register the user
payload = dict(username="john", password="password123")
response = self.test_app.post('/api/v1/auth/register', data=payload)
message = str(response.data, encoding='utf-8')
self.assertEqual(response.status_code, 409)
self.assertIn('already exists', message)
def test_user_registration_with_incomplete_data(self):
# missing password
payload = dict(username="john")
response = self.test_app.post('/api/v1/auth/register', data=payload)
message = str(response.data, encoding='utf-8')
self.assertEqual(response.status_code, 400)
self.assertIn('password not provided', message)
# missing username
payload = dict(Password="password123")
response = self.test_app.post('/api/v1/auth/register', data=payload)
message = str(response.data, encoding='utf-8')
self.assertEqual(response.status_code, 400)
self.assertIn('username not provided', message)
class TestUserLogin(BaseTestCase):
def setUp(self):
""" Sets up the test client"""
super(TestUserLogin, self).setUp()
# register the user to use in tests
payload = dict(username="john", password="password123")
response = self.test_app.post('/api/v1/auth/register', data=payload)
print(response.data)
def test_user_login(self):
# successful user login
payload = dict(username="john", password="password123")
response = self.test_app.post('/api/v1/auth/login', data=payload)
message = str(response.data, encoding='utf-8')
self.assertEqual(response.status_code, 200)
self.assertIn('Authorization', message)
def test_user_login_with_incorrect_credentials(self):
# with wrong password
payload = dict(username="john", password="wrongpass")
response = self.test_app.post('/api/v1/auth/login', data=payload)
message = str(response.data, encoding='utf-8')
self.assertEqual(response.status_code, 403)
self.assertIn('Invalid password', message)
# with non-existent username
payload = dict(username="nonexistent", password="password123")
response = self.test_app.post('/api/v1/auth/login', data=payload)
message = str(response.data, encoding='utf-8')
self.assertEqual(response.status_code, 403)
self.assertIn(' username not found', message)
# def test_getting_an_authentication_token(self):
# username = "john"
# password = "password123"
# header = {'Authorization': 'Bearer ' + b64encode(bytes(
# (username + ":" + password), 'ascii')).decode('ascii')}
# response = self.test_app.get('/api/v1/auth/token', headers=header)
# message = str(response.data, encoding='utf-8')
# #self.assertEqual(response.status_code, 200)
# self.assertIn("token", message)
def test_accessing_index_resource_with_a_token(self):
# with authentication
payload = dict(username="john", password="password123")
response = self.test_app.post('/api/v1/auth/login', data=payload)
received_data = str(response.data, 'utf-8')
token = json.loads(received_data)['Authorization']
print("Token: ", str(token))
header = {'Authorization': token}
response = self.test_app.get('api/v1/', headers=header)
received_data = str(response.data, 'utf-8')
self.assertEqual(response.status_code, 200)
self.assertIn('Welcome to Bucketlist API', received_data)
# without authentication
response = self.test_app.get('api/v1/')
self.assertEqual(response.status_code, 401)
received_data = str(response.data, 'utf-8')
self.assertIn('Unauthorized', received_data)
| python |
#!/usr/bin/env python
import argparse, os, sys, signal
sourcedir=os.path.dirname(os.path.abspath(__file__))
cwdir=os.getcwd()
sys.path.append(sourcedir)
from pythonmods import runsubprocess
def default_sigpipe():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def positiveint(x):
x = int(x)
if x < 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" %x)
return x
def batchsizeint(x):
x = int(x)
if x < 2:
raise argparse.ArgumentTypeError("%s is too small; batch size must be greater than 1" %x)
if x > 500:
raise argparse.ArgumentTypeError("%s is too large; batch size must not exceed 500" %x)
return x
parser = argparse.ArgumentParser(description='bacterialBercow: bringing order to bacterial sequences',add_help=False)
#Help options
help_group = parser.add_argument_group('Help')
help_group.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')
#General options
general_group = parser.add_argument_group('General options')
general_group.add_argument('-o','--out', help='Output directory (required)', required=True, type=str)
general_group.add_argument('-t','--threads', help='Number of threads to use (default: 1)', default=1, type=positiveint)
#NCBI query and retrieval options
ncbi_group = parser.add_argument_group('NCBI query and retrieval options')
ncbi_group.add_argument('-e','--emailaddress', help="User's email address which will be provided as an argument to edirect econtact -email (required if retrieving data from NCBI)", required=False, type=str)
ncbi_group.add_argument('--taxonomyquery', help='Taxonomy search query term to be supplied to the edirect eseach -query argument (default: bacteria[porgn:__txid2])', default="bacteria[porgn:__txid2])", type=str)
ncbi_group.add_argument('--datequery', help='Date search query term to be supplied to the edirect eseach -query argument (e.g. "2017/01/01"[PDAT] : "3000"[PDAT] would retrieve records since 2017) (not required)', required=False, type=str)
ncbi_group.add_argument('-s','--dbsource', help='Database source; refseq or refseq_genbank (default: refseq_genbank)', default="refseq_genbank", choices=["refseq","refseq_genbank"],type=str)
ncbi_group.add_argument('--deduplicationmethod', help='Specify how identical sequences should be deduplicated; either "all" duplicates are removed; otherwise, duplicates are removed if they share biosample accession id + "submitter" metadata; or "bioproject" accession id; or "both" submitter metadata and bioproject accession id (default: "both")', default="both", choices=["both","submitter","bioproject","all"],type=str)
ncbi_group.add_argument('-b','--batchsize', help='Number of accession nucleotide records to retrieve per edirect query (default: 200; min: 2; max: 500)', default=200, type=batchsizeint)
#NCBI pipeline step customisation (specifying starting and stopping points)
steps_group = parser.add_argument_group('Customising NCBI pipeline steps (specifying starting / stopping points)')
steps_group.add_argument('--accessions', help='A text file containing NCBI plasmid accessions in the first column; if provided, these accessions will be retrieved, rather than retrieving plasmid accessions using a query term (default: retrieve accessions using a query term)',required=False)
steps_group.add_argument('--retrieveaccessionsonly', action='store_true',help='If flag is provided, stop after retrieving and filtering NCBI accessions (default: do not stop)',required=False)
steps_group.add_argument('--retrievesequencesonly', action='store_true',help='If flag is provided, stop after retrieving deduplicated sequences from NCBI filtered accessions (default: do not stop)',required=False)
steps_group.add_argument('--restartwithsequences', action='store_true',help='If flag is provided, re-start the pipeline using sequences retrieved from NCBI',required=False)
#In-house contig options
contig_group = parser.add_argument_group('Customising in-house contig pipeline steps')
contig_group.add_argument('--inhousesequences', help='A fasta file containing uncharacterised bacterial contig nucleotide sequences; if provided, these contigs will be typed using rmlst and replicon loci to determine whether they are likely to be plasmids or chromosomal (default: retrieve sequences from NCBI)',required=False)
contig_group.add_argument('--typing', help='Specifies what sequence typing to perform (only applicable if in-house sequences are provided using --inhousesequences flag); either "replicon", "rmlst" typing or "both" (default: both)',default="both",choices=["both","replicon","rmlst"],required=False)
contig_group.add_argument('--contigsamples', help='A tsv file containing contig names in the first column and associated sample names in the second column',required=False)
contig_group.add_argument('--contigcompleteness', help='A tsv file containing contig names in the first column and contig completeness information in the second column (accepted contig completeness descriptions: circular,complete,complete_linear,linear,incomplete,unknown)',required=False)
#contig_group.add_argument('--sampleoutput', action='store_true',help='If flag is provided, output a file with typing information at the sample-level (--contigsamples must be provided)',required=False)
contig_group.add_argument('--typedcontigsonly', action='store_true',help='If flag is provided, only include contigs that have a detected rMLST/replicon type in the contig output file',required=False)
args = parser.parse_args()
outputpath=os.path.relpath(args.out, cwdir)
#check databases downloaded
rmlstdbexists=os.path.exists('%s/databases/rmlstalleles/blastdbs'%sourcedir)
plasmidfinderdbexists=os.path.exists('%s/databases/plasmidfinder_db/blastdbs'%sourcedir)
if rmlstdbexists==False or plasmidfinderdbexists==False:
if rmlstdbexists==False and plasmidfinderdbexists==False:
sys.exit('Error: the rMLST database and the PlasmidFinder database must be installed first (see README)')
elif rmlstdbexists==False:
sys.exit('Error: the rMLST database must be installed first (see README)')
else:
sys.exit('Error: the PlasmidFinder database must be installed first (see README)')
#check --sampleoutput flag used correctly if provided
#if args.sampleoutput==True and args.contigsamples==None:
# sys.exit('Error: --sampleoutput is only possible if the --contigsamples flag is provided, to specify sample groupings')
if args.contigsamples!=None:
args.sampleoutput=True #always produce sample-level output if args.contigsamples is provided
cmdArgs=['mkdir -p %s'%outputpath]
runsubprocess(cmdArgs,shell=True)
###retrieve accessions and sequences from NCBI
if args.inhousesequences==None and args.restartwithsequences==False:
if args.accessions==None:
if args.datequery==None:
datepresent="absent"
else:
datepresent=="present"
runsubprocess(['bash','%s/downloadaccessions.sh'%sourcedir,datepresent,str(args.taxonomyquery),str(args.datequery),str(args.dbsource),outputpath])
print('Retrieved accessions from NCBI')
runsubprocess(['python','%s/filteraccessions.py'%sourcedir,outputpath])
print('Finished initial filtering of accessions based on accession title text')
else:
runsubprocess(['bash','%s/downloaduseraccessions.sh'%sourcedir,str(args.accessions),outputpath])
print('Retrieved accessions from NCBI')
runsubprocess(['python','%s/filteraccessions.py'%sourcedir,outputpath])
print('Finished initial filtering of accessions based on accession title text')
###retrieve sequences if args.retrieveaccessionsonly is false
if args.retrieveaccessionsonly==True:
sys.exit()
else:
runsubprocess(['bash','%s/downloadsequences.sh'%sourcedir,str(args.batchsize),str(args.emailaddress),outputpath])
print('Downloaded sequences from NCBI')
runsubprocess(['python','%s/deduplicateseqs.py'%sourcedir,str(args.deduplicationmethod),outputpath])
print('Deduplicated sequences using deduplication method: %s'%str(args.deduplicationmethod))
if args.retrieveaccessionsonly==True:
sys.exit()
if args.retrievesequencesonly==True:
sys.exit()
###characterise sequences to identify plasmids
cmdArgs=['mkdir -p %s/plasmidfinder'%outputpath]
runsubprocess(cmdArgs,shell=True)
cmdArgs=['mkdir -p %s/rmlst'%outputpath]
runsubprocess(cmdArgs,shell=True)
enterobacteriaceaedbpath='%s/databases/plasmidfinder_db/blastdbs/enterobacteriaceaedb'%sourcedir
gram_positivedbpath='%s/databases/plasmidfinder_db/blastdbs/gram_positivedb'%sourcedir
rmlstdbpath='%s/databases/rmlstalleles/blastdbs'%sourcedir
rmlstprofilepath='%s/databases/rmlstalleles'%sourcedir
if args.inhousesequences==None:
runsubprocess(['python', '%s/plasmidfinder.py'%sourcedir,'enterobacteriaceae',enterobacteriaceaedbpath,str(args.threads),outputpath,'ncbi',sourcedir])
print('Finished BLAST searching Enterobacteriaceae PlasmidFinder database')
runsubprocess(['python', '%s/plasmidfinder.py'%sourcedir,'gram_positive',gram_positivedbpath,str(args.threads),outputpath,'ncbi',sourcedir])
print('Finished BLAST searching Gram-positive PlasmidFinder database')
runsubprocess(['python', '%s/rmlst.py'%sourcedir,rmlstdbpath,str(args.threads),outputpath,'ncbi',sourcedir])
print('Finished BLAST searching rMLST database')
runsubprocess(['python', '%s/finalfilter.py'%sourcedir, rmlstprofilepath,outputpath, 'ncbi','enterobacteriaceae', 'gram_positive'])
else:
cmdArgs=["cat %s | bioawk -c fastx '{print $name,length($seq)}' > %s/seqlengths.tsv"%(str(args.inhousesequences),outputpath)]
runsubprocess(cmdArgs,shell=True)
if args.typing=='replicon' or args.typing=='both':
runsubprocess(['python', '%s/plasmidfinder.py'%sourcedir,'enterobacteriaceae',enterobacteriaceaedbpath,str(args.threads),outputpath,'user',sourcedir,str(args.inhousesequences)])
print('Finished BLAST searching Enterobacteriaceae PlasmidFinder database')
runsubprocess(['python', '%s/plasmidfinder.py'%sourcedir,'gram_positive',gram_positivedbpath,str(args.threads),outputpath,'user',sourcedir,str(args.inhousesequences)])
print('Finished BLAST searching Gram-positive PlasmidFinder database')
if args.typing=='rmlst' or args.typing=='both':
runsubprocess(['python', '%s/rmlst.py'%sourcedir,rmlstdbpath,str(args.threads),outputpath,'user',sourcedir,str(args.inhousesequences)])
print('Finished BLAST searching rMLST database')
runsubprocess(['python', '%s/finalfilter.py'%sourcedir, rmlstprofilepath,outputpath,'user',str(args.typing),'enterobacteriaceae', 'gram_positive',str(args.contigcompleteness),str(args.contigsamples),str(args.sampleoutput),str(args.typedcontigsonly)])
cmdArgs=["rm %s/seqlengths.tsv"%outputpath]
runsubprocess(cmdArgs,shell=True)
print('Finished running bacterialBercow!')
###OLD CODE
##Replicon and rMLST typing options
#typing_group = parser.add_argument_group('Replicon and rMLST typing options')
#typing_group.add_argument('--typing', help='Specifies what sequence typing to perform (only applicable if in-house sequences are provided using --inhousesequences flag); either "replicon", "rmlst" typing or "both" (default: both)',default="both",choices=["both","replicon","rmlst"],required=False)
#typing_group.add_argument('--enterobacdbpath', help='Path to the "enterobacteriaceae" plasmidfinder BLAST database (default: databases/plasmidfinder/enterobacteriaceae/enterobacteriaceaedb)',required=False)
#typing_group.add_argument('--gramposdbpath', help='Path to the "gram_positive" plasmidfinder BLAST database (default: databases/plasmidfinder/gram_positive/gram_positivedb)',required=False)
#typing_group.add_argument('--rmlstdbpath', help='Path to the directory used to store the rmlst blast database files (default: databases/rmlstalleles/blastdbs)',required=False)
#typing_group.add_argument('--rmlstprofilepath', help='Path to the directory used to store the rmlst profile file (default: databases/rmlstalleles)',required=False)
# if args.enterobacdbpath==None:
# enterobacteriaceaedbpath='%s/databases/plasmidfinder/enterobacteriaceae/enterobacteriaceaedb'%sourcedir
# else:
# enterobacteriaceaedbpath=str(args.enterobacdbpath)
# if args.gramposdbpath==None:
# gram_positivedbpath='%s/databases/plasmidfinder/gram_positive/gram_positivedb'%sourcedir
# else:
# gram_positivedbpath=str(args.gramposdbpath)
# if args.rmlstdbpath==None:
# rmlstdbpath='%s/databases/rmlstalleles/blastdbs'%sourcedir
# else:
# rmlstdbpath=str(args.rmlstdbpath)
# if args.rmlstprofilepath==None:
# rmlstprofilepath='%s/databases/rmlstalleles'%sourcedir
# else:
# rmlstprofilepath=str(args.rmlstprofilepath)
| python |
"""
Compute the overall accuracy of a confusion matrix
"""
from __future__ import print_function
import sys
from optparse import OptionParser
import numpy as np
import cpa.util
from cpa.profiling.confusion import confusion_matrix, load_confusion
parser = OptionParser("usage: %prog [options] CONFUSION")
parser.add_option('-f', dest='float', action='store_true', help='use floating-point accuracies')
parser.add_option('-o', dest='output_filename', help='file to store the profiles in')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments')
(input_filename,) = args
confusion = load_confusion(input_filename)
cm = confusion_matrix(confusion, 'if'[options.float or 0])
acc = 100.0 * np.diag(cm).sum() / cm.sum()
def write_output(f):
print('%.0f%%' % acc, file=f)
if options.output_filename:
with cpa.util.replace_atomically(options.output_filename) as f:
write_output(f)
else:
write_output(sys.stdout)
| python |
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""KD tree data structure for searching N-dimensional vectors (DEPRECATED).
The KD tree data structure can be used for all kinds of searches that
involve N-dimensional vectors. For example, neighbor searches (find all points
within a radius of a given point) or finding all point pairs in a set
that are within a certain radius of each other. See "Computational Geometry:
Algorithms and Applications" (Mark de Berg, Marc van Kreveld, Mark Overmars,
Otfried Schwarzkopf).
This module is DEPRECATED; its replacement is Bio.PDB.kdtrees.
"""
from .KDTree import KDTree
import warnings
from Bio import BiopythonDeprecationWarning
warnings.warn(
"Bio.KDTree has been deprecated, and we intend to remove it"
" in a future release of Biopython. Please use Bio.PDB.kdtrees"
" instead, which is functionally very similar.",
BiopythonDeprecationWarning,
)
| python |
from sanic import Blueprint
from sanic.exceptions import NotFound, Unauthorized, ServerError, Forbidden
from sanic.response import json
from utils import error, success
ex = Blueprint('exception')
@ex.exception(Unauthorized)
async def unauthorized(request, exception):
""" 用于处理账号错误 """
return error(message=f'{exception}', status=401)
@ex.exception(ServerError)
async def server_error(request, exception):
""" 用于处理系统错误 """
return error(message=f'{exception}', status=500)
@ex.exception(Forbidden)
async def forbidden(request, exception):
""" 处理权限错误 """
return error(message=f'{exception}', status=403)
@ex.exception(NotFound)
async def not_found(request, exception):
""" 处理 404 """
return error(message=f'URL {request.url} Not Found')
| python |
from setuptools import setup
with open("README.md") as f:
long_description = f.read()
# tests_require = ["vcrpy>=1.10.3",]
setup(
name="monkeytools",
version="0.4",
description="A personal collection of algorithms and tools for the standard code monkey.",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
author="Matthew DeVerna",
author_email="mdeverna@iu.edu",
url="https://github.com/mr-devs/monkeytools",
project_urls={
"Documentation": "https://github.com/mr-devs/monkeytools",
"Issue Tracker": "https://github.com/mr-devs/monkeytools/issues",
"Source Code": "https://github.com/mr-devs/monkeytools",
},
download_url="https://github.com/mr-devs/monkeytools",
packages=["monkeytools"],
# install_requires=[],
# tests_require=tests_require,
python_requires=">=3.5",
) | python |
from .swt import Seawat
from .swtvdf import SeawatVdf
| python |
from util.fileops import FileOps
from util.cli import CLI
import subprocess
import os
class BackBlazeB2:
def __init__(self):
self.fileops = FileOps()
self.cli = CLI()
self.bucket = self.fileops.bb_bucket
def authorize(self):
subprocess.run([self.fileops.blaze,"authorize-account"])
def lsBucket(self):
proc = subprocess.run([self.fileops.blaze,"ls",self.bucket], stdout=subprocess.PIPE)
outDecode = proc.stdout.decode("utf-8").split()
try:
# outDecode[0]
get_id = subprocess.run([self.fileops.blaze,"list-file-names",self.bucket, outDecode[0]], stdout=subprocess.PIPE)
idDecode = get_id.stdout.decode("utf-8").split()
fileName=outDecode[0]
fileId=idDecode[17]
return fileName, fileId[1:-2]
except:
return None, None
def deleteb2(self,fn, fid):
subprocess.run([self.fileops.blaze,"delete-file-version", fn, fid])
def cpBucket(self):
os.chdir(self.fileops.snapshots)
#delete current S3 snapshot
currentb2_name, currentb2_id = self.lsBucket()
if currentb2_name != None:
self.deleteb2(currentb2_name, currentb2_id)
#get current
l,f = self.fileops.get_folders()
#zip current
self.fileops.createZip(l)
current = l+".zip"
#upload current
subprocess.run([self.fileops.blaze,"upload-file",self.bucket,current,current])
#delete zip
self.fileops.cleanZip(current)
def restore(self):
os.chdir(self.fileops.snapshots)
#get current and download
currentb2_name, currentb2_id = self.lsBucket()
#download
subprocess.run([self.fileops.blaze,"download-file-by-name",self.bucket,currentb2_name,currentb2_name])
#unzip
self.fileops.unzipZip(currentb2_name)
#cleanup zip
self.fileops.cleanZip(currentb2_name)
#import new snapshot
self.cli.import_snap(currentb2_name[:-4])
def menu_options(self):
print("--authorizeB2","configures authorizes BackBlaze B2 connection")
print("--uploadB2", "uploads most recent snapshot to BackBlaze B2")
print("--downloadB2", "downloads most recent snapshot from BackBlaze B2 and imports into database")
def menu(self, option):
if option=="--authorizeB2":
self.authorize()
elif option=="--uploadB2":
self.cpBucket()
elif option=="--downloadB2":
self.restore()
| python |
import sys
from ga144 import GA144
#import draw
if __name__ == '__main__':
g = GA144()
g.loadprogram(sys.argv[2])
# v = draw.Viz(g.active())
# v.render("pictures/%s.png" % sys.argv[2])
g.download(sys.argv[1], 460800)
| python |
import time
import pytest
import rfernet
def test_sanity():
key = rfernet.Fernet.generate_new_key()
# Generates random string already so why not?
plain = rfernet.Fernet.generate_new_key().encode()
fernet = rfernet.Fernet(key)
encrypted = fernet.encrypt(plain)
assert fernet.decrypt(encrypted) == plain
encrypted = fernet.encrypt(plain)
assert fernet.decrypt_with_ttl(encrypted, 1000) == plain
def test_error_ttl():
key = rfernet.Fernet.generate_new_key()
# Generates random string already so why not?
plain = rfernet.Fernet.generate_new_key().encode()
fernet = rfernet.Fernet(key)
encrypted = fernet.encrypt(plain)
with pytest.raises(rfernet.DecryptionError):
time.sleep(2)
fernet.decrypt_with_ttl(encrypted, 1)
def test_invalid_key():
with pytest.raises(ValueError):
rfernet.Fernet("asd")
def test_decryption_failure():
fernet_1 = rfernet.Fernet(rfernet.Fernet.generate_new_key())
fernet_2 = rfernet.Fernet(rfernet.Fernet.generate_new_key())
encrypted = fernet_1.encrypt(rfernet.Fernet.generate_new_key().encode())
with pytest.raises(rfernet.DecryptionError):
fernet_2.decrypt(encrypted)
def test_multifernet_sanity():
keys = [rfernet.Fernet.generate_new_key() for _ in range(6)]
ferns = [rfernet.Fernet(k) for k in keys]
mfern = rfernet.MultiFernet(keys)
for encryptor in ferns:
cypher = encryptor.encrypt(b'hello there')
decyphered = mfern.decrypt(cypher)
assert decyphered == b'hello there'
def test_multifernet_enc():
keys = [rfernet.Fernet.generate_new_key() for _ in range(6)]
fern = rfernet.Fernet(keys[0])
mfern = rfernet.MultiFernet(keys)
for plaintext in [
b'hello there',
b'',
b'why'
]:
single_cypher = fern.encrypt(plaintext)
multi_cypher = mfern.encrypt(plaintext)
assert mfern.decrypt(single_cypher) == fern.decrypt(multi_cypher) == plaintext
def test_mfern_invalid_key():
with pytest.raises(ValueError):
rfernet.MultiFernet([rfernet.Fernet.generate_new_key(), "asd", rfernet.Fernet.generate_new_key()])
def test_mfern_decryption_failure():
keys = [rfernet.Fernet.generate_new_key() for _ in range(6)]
fern = rfernet.Fernet(rfernet.Fernet.generate_new_key())
mfern = rfernet.MultiFernet(keys)
cypher = fern.encrypt(b'hello there')
with pytest.raises(rfernet.DecryptionError):
mfern.decrypt(cypher)
| python |
import time
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from market_maker.settings import settings
# ----------------------------------------------------------------------------------------------------------------------
# Config
base_url = 'https://fxadk.com/api/'
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
session.mount('https://', HTTPAdapter(max_retries=retries))
# ----------------------------------------------------------------------------------------------------------------------
# Public API
class FxAdkImpl(object):
def __init__(self, api_key, api_secret):
self.api_key = api_key
self.api_secret = api_secret
self.max_attempts = 5
def get_post_json_impl(self, url, data, attempt=1):
if attempt > 1:
print('Attempt %i' % attempt)
try:
res = session.post(url, data)
except:
time.sleep(settings.API_ERROR_INTERVAL)
if attempt > self.max_attempts:
raise
return self.get_post_json_impl(url, data, attempt=attempt+1)
try:
return res.json()
except:
print('FxADK error: %s' % res.content)
time.sleep(settings.API_ERROR_INTERVAL)
if attempt > self.max_attempts:
raise
return self.get_post_json_impl(url, data, attempt=attempt+1)
def get_post_json(self, url, data):
print('Calling %s' % url)
post_json = self.get_post_json_impl(url, data)
time.sleep(settings.API_REST_INTERVAL)
return post_json
def get_currency_details(self, url='%s%s' % (base_url, 'getCurrencies')):
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
}
res_json = self.get_post_json(url, data)
return res_json
def get_pair_details(self, pair='ADK/BTC', url='%s%s' % (base_url, 'getPairDetails')):
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
'pair': pair,
}
res_json = self.get_post_json(url, data)
return res_json
def get_market_history(self, pair='ADK/BTC', url='%s%s' % (base_url, 'getMarketHistory')):
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
'pair': pair,
}
res_json = self.get_post_json(url, data)
return res_json
def get_buy_orders(self, pair='ADK/BTC', url='%s%s' % (base_url, 'getBuyOrders')):
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
'pair': pair,
}
res_json = self.get_post_json(url, data)
return res_json
def get_sell_orders(self, pair='ADK/BTC', url='%s%s' % (base_url, 'getSellOrders')):
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
'pair': pair,
}
res_json = self.get_post_json(url, data)
return res_json
# ----------------------------------------------------------------------------------------------------------------------
# Private API
ORDER_ID_KEY = 'orderid'
def create_order(self, amount=0.00000011, price=0.0, order='limit', type='buy', pair='ADK/BTC', url='%s%s' % (base_url, 'createOrder')):
asset = pair.split('/')[0]
pair = pair.replace('/', '_') # this will probably not be needed in the future
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
'amount': amount,
'price': price,
'order': order,
'type': type,
'pair': pair,
}
res_json = self.get_post_json(url, data)
if self.ORDER_ID_KEY in res_json:
order_id = res_json[self.ORDER_ID_KEY]
print('Created order %s' % order_id)
return res_json # return the whole order object
print(res_json)
raise RuntimeError('Failed to create order to %s %s %s' % (type, amount, asset))
def cancel_order(self, order_id, url='%s%s' % (base_url, 'cancelOrder')):
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
'orderid': order_id,
}
res_json = self.get_post_json(url, data)
if res_json.get('status') != 'success':
raise RuntimeError('Failed to cancel order %s' % order_id)
print('Successfully cancelled order %s' % order_id)
def get_trade_history(self, pair='ADK/BTC', url='%s%s' % (base_url, 'getTradeHistory')):
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
'pair': pair,
}
res_json = self.get_post_json(url, data)
return res_json
def get_cancel_history(self, pair='ADK/BTC', url='%s%s' % (base_url, 'getCancelHistory')):
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
'pair': pair,
}
res_json = self.get_post_json(url, data)
return res_json
def get_stop_orders(self, pair='ADK/BTC', url='%s%s' % (base_url, 'getStopOrders')):
"""These are active stop loss orders"""
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
'pair': pair,
}
res_json = self.get_post_json(url, data)
return res_json
def get_open_orders(self, pair='ADK/BTC', url='%s%s' % (base_url, 'getOpenOrders')):
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
'pair': pair,
}
res_json = self.get_post_json(url, data)
return res_json
def get_withdraw_history(self, url='%s%s' % (base_url, 'getWithdrawhistory')):
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
}
res_json = self.get_post_json(url, data)
return res_json
def get_deposit_history(self, url='%s%s' % (base_url, 'getDeposithistory')):
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
}
res_json = self.get_post_json(url, data)
return res_json
def get_account_balance(self, url='%s%s' % (base_url, 'getAccountbalance')):
"""Get account balance"""
data = {
'api_key': self.api_key,
'api_secret': self.api_secret,
}
res_json = self.get_post_json(url, data)
return res_json
| python |
from spaceNetUtilities import labelTools as lT
import os
import glob
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-imgDir", "--imgDir", type=str,
help="Directory of Raster Images")
parser.add_argument("-geoDir", "--geojsonDir", type=str,
help="Directory of geojson files")
parser.add_argument("-o", "--outputCSV", type=str,
help="Output File Name and Location for CSV")
parser.add_argument("-pixPrecision", "--pixelPrecision", type=int,
help="Number of decimal places to include for pixel, uses round(xPix, pixPrecision)"
"Default = 2",
default=2)
parser.add_argument("--CreateProposalFile", help="Create ProposalsFile",
action="store_true")
parser.add_argument("-strip", "--stripOutFromGeoJson", type=str,
help="string delimited")
parser.add_argument("--DontstripFirstUnderScore", action="store_false")
args = parser.parse_args()
rasterDirectory = args.imgDir
geoJsonDirectory = args.geojsonDir
outputCSVFileName = args.outputCSV
createProposalFile = args.CreateProposalFile
if args.stripOutFromGeoJson:
stripList = args.stripOutFromGeoJson.split(' ')
else:
stripList =[]
#band3directory = '/usr/local/share/data/AOI_1_RIO/processed2/3band'
#band8directory = '/usr/local/share/data/AOI_1_RIO/processed2/8band'
#geoJsonDirectory = '/usr/local/share/data/AOI_1_RIO/processed2/geojson'
jsonList = []
chipSummaryList = []
#AOI_2_RIO_3Band_img997.tif
#AOI_2_RIO_img635.geojson
# find RasterPrecursor
rasterList = glob.glob(os.path.join(rasterDirectory, '*.tif'))
rasterPrefix = os.path.basename(rasterList[0])
rasterPrefix = rasterPrefix.split("_")[0]
geoJsonList = glob.glob(os.path.join(geoJsonDirectory, '*.geojson'))
for imageId in geoJsonList:
imageId = os.path.basename(imageId)
rasterName = imageId.replace('.geojson','.tif')
for stripItem in stripList:
rasterName = rasterName.replace(stripItem, '')
if args.DontstripFirstUnderScore:
rasterName = rasterPrefix+"_"+rasterName.split('_',1)[1]
else:
rasterName = rasterPrefix+"_"+rasterName
print(imageId)
print(os.path.join(rasterDirectory,rasterName))
chipSummary = {'chipName': os.path.join(rasterDirectory, rasterName),
'geoVectorName': os.path.join(geoJsonDirectory, imageId),
'imageId': os.path.splitext(imageId)[0]}
chipSummaryList.append(chipSummary)
print("starting")
lT.createCSVSummaryFile(chipSummaryList, outputCSVFileName,
replaceImageID=rasterPrefix+"_",
createProposalsFile=createProposalFile,
pixPrecision=args.pixelPrecision)
print("finished") | python |
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
from ..types import PublicChatType
class GetCreatedPublicChats(BaseObject):
"""
Returns a list of public chats of the specified type, owned by the user
:param type_: Type of the public chats to return
:type type_: :class:`PublicChatType`
"""
ID: str = Field("getCreatedPublicChats", alias="@type")
type_: PublicChatType = Field(..., alias='type')
@staticmethod
def read(q: dict) -> GetCreatedPublicChats:
return GetCreatedPublicChats.construct(**q)
| python |
"""Module containing the ShearSplink pipelines."""
import logging
from pathlib import Path
from cutadapt import seqio
import pandas as pd
import pysam
from pyim.external.cutadapt import cutadapt, cutadapt_summary
from pyim.external.bowtie2 import bowtie2
from pyim.external.util import flatten_arguments
from pyim.model import Insertion
from pyim.util.path import shorten_path, extract_suffix
from .base import Pipeline, register_pipeline
from ..util import extract_insertions
DEFAULT_OVERLAP = 3
DEFAULT_ERROR_RATE = 0.1
class ShearSplinkPipeline(Pipeline):
"""ShearSplink pipeline.
Analyzes (single-end) sequencing data that was prepared using the
ShearSplink protocol. Sequence reads are expected to have the following
structure::
[Transposon][Genomic][Linker]
Here, ``transposon`` refers to the flanking part of the transposon
sequence, ``linker`` to the flanking linker sequence and ``genomic``
to the genomic DNA located in between (which varies per insertion).
The linker sequence is optional and may be omitted if the linker is not
included in sequencing.
The pipeline essentially performs the following steps:
- If contaminants are provided, sequence reads are filtered
(using Cutadapt) for the contaminant sequences.
- The remaining reads are trimmed to remove the transposon and
linker sequences, leaving only genomic sequences. Reads without
the transposon/linker sequences are dropped, as we cannot be certain
of their origin. (Note that the linker is optional and is only
trimmed if a linker is given).
- The genomic reads are aligned to the reference genome.
- The resulting alignment is used to identify insertions.
Note that this pipeline does **NOT** support multiplexed datasets (which is
the default output of the ShearSplink protocol). For multiplexed datasets,
use the ``MultiplexedShearSplinkPipeline``.
Parameters
----------
transposon_path : Path
Path to the (flanking) transposon sequence (fasta).
bowtie_index_path : Path
Path to the bowtie index.
linker_path : Path
Path to the linker sequence (fasta).
contaminant_path : Path
Path to file containing contaminant sequences (fasta). If provided,
sequences are filtered for these sequences before extracting genomic
sequences for alignment.
min_length : int
Minimum length for genomic reads to be kept for alignment.
min_support : int
Minimum support for insertions to be kept in the final output.
min_mapq : int
Minimum mapping quality of alignments to be used for
identifying insertions.
merge_distance : int
Maximum distance within which insertions are merged. Used to merge
insertions that occur within close vicinity, which is typically due
to slight variations in alignments.
bowtie_options : Dict[str, Any]
Dictionary of extra options for Bowtie.
min_overlaps : Dict[str, int]
Minimum overlap required to recognize the transposon, linker and
contaminant sequences (see Cutadapts documentation for more
information). Keys of the dictionary indicate to which sequence the
overlap corresponds and should be one of the following: ``linker``,
``transposon`` or ``contaminant``.
error_rates : Dict[str, float]
Maximum error rate to use when recognizing transposon, linker and
contaminant sequences (see Cutadapts documentation for more
information). Keys should be the same as for ``min_overlaps``.
"""
def __init__(self,
transposon_path,
bowtie_index_path,
linker_path=None,
contaminant_path=None,
min_length=15,
min_support=2,
min_mapq=23,
merge_distance=None,
bowtie_options=None,
min_overlaps=None,
error_rates=None):
super().__init__()
self._transposon_path = transposon_path
self._linker_path = linker_path
self._contaminant_path = contaminant_path
self._index_path = bowtie_index_path
self._min_length = min_length
self._min_support = min_support
self._min_mapq = min_mapq
self._merge_distance = merge_distance
self._bowtie_options = bowtie_options or {}
self._min_overlaps = min_overlaps or {}
self._error_rates = error_rates or {}
@classmethod
def configure_args(cls, parser):
cls._setup_base_args(parser, paired=False)
parser.description = 'ShearSplink pipeline'
# Paths to various sequences.
seq_options = parser.add_argument_group('Sequences')
seq_options.add_argument(
'--transposon',
type=Path,
required=True,
help='Fasta file containing the transposon sequence.')
seq_options.add_argument(
'--contaminants',
type=Path,
default=None,
help='Fasta file containing contaminant sequences.')
seq_options.add_argument(
'--linker',
type=Path,
default=None,
help='Fasta file containing the linker sequence.')
# Trimming options (used for cutadapt).
trim_options = parser.add_argument_group('Trimming')
trim_options.add_argument(
'--min_length',
type=int,
default=15,
help='Minimum length for (trimmed) genomic sequences.')
trim_options.add_argument(
'--contaminant_error',
default=0.1,
type=float,
help='Maximum error rate for matching contaminants.')
trim_options.add_argument(
'--contaminant_overlap',
default=3,
type=int,
help='Minimum overlap for matching contaminants.')
trim_options.add_argument(
'--transposon_error',
default=0.1,
type=float,
help='Maximum error rate for matching the transposon.')
trim_options.add_argument(
'--transposon_overlap',
default=3,
type=int,
help='Minimum overlap for matching the transposon.')
trim_options.add_argument(
'--linker_error',
default=0.1,
type=float,
help='Maximum error rate for matching the linker.')
trim_options.add_argument(
'--linker_overlap',
default=3,
type=int,
help='Minimum overlap for matching the linker.')
align_options = parser.add_argument_group('Alignment')
align_options.add_argument(
'--bowtie_index',
type=Path,
required=True,
help='Bowtie2 index to use for alignment.')
align_options.add_argument(
'--local',
default=False,
action='store_true',
help='Use local alignment.')
ins_options = parser.add_argument_group('Insertions')
ins_options.add_argument(
'--min_mapq',
type=int,
default=23,
help=('Minimum mapping quality for reads '
'used to identify insertions.'))
ins_options.add_argument(
'--merge_distance',
type=int,
default=None,
help=('Distance within which insertions (from same '
'sample) are merged.'))
ins_options.add_argument(
'--min_support',
type=int,
default=2,
help='Minimum support for insertions.')
@classmethod
def _extract_args(cls, args):
bowtie_options = {'--local': args.local}
min_overlaps = {
'contaminant': args.contaminant_overlap,
'transposon': args.transposon_overlap,
'linker': args.linker_overlap
}
error_rates = {
'contaminant': args.contaminant_error,
'transposon': args.transposon_error,
'linker': args.linker_error
}
return dict(
transposon_path=args.transposon,
bowtie_index_path=args.bowtie_index,
linker_path=args.linker,
contaminant_path=args.contaminants,
min_length=args.min_length,
min_support=args.min_support,
min_mapq=args.min_mapq,
merge_distance=args.merge_distance,
bowtie_options=bowtie_options,
min_overlaps=min_overlaps,
error_rates=error_rates)
def run(self, read_path, output_dir, read2_path=None):
if read2_path is not None:
raise ValueError('Pipeline does not support paired-end data')
logger = logging.getLogger()
# Ensure output dir exists.
output_dir.mkdir(exist_ok=True, parents=True)
# Extract genomic sequences and align to reference.
genomic_path = self._extract_genomic(read_path, output_dir, logger)
alignment_path = self._align(genomic_path, output_dir, logger)
# Extract insertions from bam file.
bam_file = pysam.AlignmentFile(str(alignment_path))
try:
insertions = extract_insertions(
iter(bam_file),
func=_process_alignment,
merge_dist=self._merge_distance,
min_mapq=self._min_mapq,
min_support=self._min_support,
logger=logger)
finally:
bam_file.close()
# Write insertions to output file.
insertion_path = output_dir / 'insertions.txt'
ins_frame = Insertion.to_frame(insertions)
ins_frame.to_csv(str(insertion_path), sep='\t', index=False)
def _extract_genomic(self, read_path, output_dir, logger):
"""Extracts the genomic part of sequence reads."""
# Log parameters
if logger is not None:
logger.info('Extracting genomic sequences')
logger.info(' %-18s: %s', 'Transposon',
shorten_path(self._transposon_path))
logger.info(' %-18s: %s', 'Linker',
shorten_path(self._linker_path))
logger.info(' %-18s: %s', 'Contaminants',
shorten_path(self._contaminant_path))
logger.info(' %-18s: %s', 'Minimum length', self._min_length)
# Get suffix to use for intermediate/genomic files.
suffix = extract_suffix(read_path)
# Track interim files for cleaning.
interim_files = []
if self._contaminant_path is not None:
# Remove contaminants.
contaminant_out_path = output_dir / (
'trimmed_contaminant' + suffix)
contaminant_opts = {
'-g': 'file:' + str(self._contaminant_path),
'--discard-trimmed': True,
'-O': self._min_overlaps.get('contaminant', DEFAULT_OVERLAP),
'-e': self._error_rates.get('contaminant', DEFAULT_ERROR_RATE)
}
process = cutadapt(read_path, contaminant_out_path,
contaminant_opts)
if logger is not None:
summary = cutadapt_summary(process.stdout, padding=' ')
logger.info('Trimmed contaminant sequences' + summary)
interim_files.append(contaminant_out_path)
else:
contaminant_out_path = read_path
if self._linker_path is not None:
# Remove linker.
linker_out_path = output_dir / ('trimmed_linker' + suffix)
linker_opts = {
'-a': 'file:' + str(self._linker_path),
'--discard-untrimmed': True,
'-O': self._min_overlaps.get('linker', DEFAULT_OVERLAP),
'-e': self._error_rates.get('linker', DEFAULT_ERROR_RATE)
}
process = cutadapt(contaminant_out_path, linker_out_path,
linker_opts)
if logger is not None:
summary = cutadapt_summary(process.stdout, padding=' ')
logger.info('Trimmed linker sequence' + summary)
interim_files.append(linker_out_path)
else:
linker_out_path = contaminant_out_path
# Trim transposon and check minimum length.
transposon_opts = {
'-g': 'file:' + str(self._transposon_path),
'--discard-untrimmed': True,
'-O': self._min_overlaps.get('transposon', DEFAULT_OVERLAP),
'-e': self._error_rates.get('transposon', DEFAULT_ERROR_RATE)
}
if self._min_length is not None:
transposon_opts['--minimum-length'] = self._min_length
genomic_path = output_dir / ('genomic' + suffix)
process = cutadapt(linker_out_path, genomic_path, transposon_opts)
if logger is not None:
summary = cutadapt_summary(process.stdout, padding=' ')
logger.info('Trimmed transposon sequence and filtered '
'for length' + summary)
# Clean-up interim files.
for file_path in interim_files:
file_path.unlink()
return genomic_path
def _align(self, read_path, output_dir, logger):
"""Aligns genomic reads to the reference genome using Bowtie."""
# Log parameters
if logger is not None:
logger.info('Aligning to reference')
logger.info(' %-18s: %s', 'Reference',
shorten_path(self._index_path))
logger.info(' %-18s: %s', 'Bowtie options',
flatten_arguments(self._bowtie_options))
alignment_path = output_dir / 'alignment.bam'
bowtie2(
[read_path],
index_path=self._index_path,
output_path=alignment_path,
options=self._bowtie_options,
verbose=True)
return alignment_path
register_pipeline(name='shearsplink', pipeline=ShearSplinkPipeline)
def _process_alignment(aln):
"""Analyzes an alignment to determine the tranposon/linker breakpoints."""
ref = aln.reference_name
if aln.is_reverse:
transposon_pos = aln.reference_end
linker_pos = aln.reference_start
strand = -1
else:
transposon_pos = aln.reference_start
linker_pos = aln.reference_end
strand = 1
return (ref, transposon_pos, strand), linker_pos
class MultiplexedShearSplinkPipeline(ShearSplinkPipeline):
"""ShearSplink pipeline supporting multiplexed reads.
Analyzes multiplexed (single-end) sequencing data that was prepared using
the ShearSplink protocol. Sequence reads are expected to have the following
structure::
[Barcode][Transposon][Genomic][Linker]
Here, the ``transposon``, ``genomic`` and ``linker`` sequences are the
same as for the ``ShearSplinkPipeline``. The ``barcode`` sequence is an
index that indicates which sample the read originated for.
Barcode sequences should be provided using the ``barcode_path`` argument.
The optional ``barcode_mapping`` argument can be used to map barcodes to
sample names.
Parameters
----------
transposon_path : Path
Path to the (flanking) transposon sequence (fasta).
bowtie_index_path : Path
Path to the bowtie index.
barcode_path :
Path to barcode sequences (fasta).
barcode_mapping : Path
Path to a tsv file specifying a mapping from barcodes to sample names.
Should contain ``sample`` and ``barcode`` columns.
linker_path : Path
Path to the linker sequence (fasta).
contaminant_path : Path
Path to file containing contamintant sequences (fasta). If provided,
sequences are filtered for these sequences before extracting genomic
sequences for alignment.
min_length : int
Minimum length for genomic reads to be kept for alignment.
min_support : int
Minimum support for insertions to be kept in the final output.
min_mapq : int
Minimum mapping quality of alignments to be used for
identifying insertions.
merge_distance : int
Maximum distance within which insertions are merged. Used to merge
insertions that occur within close vicinity, which is typically due
to slight variations in alignments.
bowtie_options : Dict[str, Any]
Dictionary of extra options for Bowtie.
min_overlaps : Dict[str, int]
Minimum overlap required to recognize the transposon, linker and
contamintant sequences (see Cutadapts documentation for more
information). Keys of the dictionary indicate to which sequence the
overlap corresponds and should be one of the following: ``linker``,
``transposon`` or ``contaminant``.
error_rates : Dict[str, float]
Maximum error rate to use when recognizing transposon, linker and
contamintant sequences (see Cutadapts documentation for more
information). Keys should be the same as for ``min_overlaps``.
"""
def __init__(self,
transposon_path,
bowtie_index_path,
barcode_path,
barcode_mapping=None,
linker_path=None,
contaminant_path=None,
min_length=15,
min_support=2,
min_mapq=23,
merge_distance=0,
bowtie_options=None,
min_overlaps=None,
error_rates=None):
super().__init__(
transposon_path=transposon_path,
bowtie_index_path=bowtie_index_path,
linker_path=linker_path,
contaminant_path=contaminant_path,
min_length=min_length,
min_support=min_support,
min_mapq=min_mapq,
merge_distance=merge_distance,
bowtie_options=bowtie_options,
min_overlaps=min_overlaps,
error_rates=error_rates)
self._barcode_path = barcode_path
self._barcode_mapping = barcode_mapping
@classmethod
def configure_args(cls, parser):
super().configure_args(parser)
parser.add_argument('--barcodes', required=True, type=Path)
parser.add_argument(
'--barcode_mapping', required=False, type=Path, default=None)
@classmethod
def _extract_args(cls, args):
arg_dict = super()._extract_args(args)
if args.barcode_mapping is not None:
map_df = pd.read_csv(args.barcode_mapping, sep='\t')
arg_dict['barcode_mapping'] = dict(
zip(map_df['barcode'], map_df['sample']))
else:
arg_dict['barcode_mapping'] = None
arg_dict['barcode_path'] = args.barcodes
return arg_dict
def run(self, read_path, output_dir, read2_path=None):
if read2_path is not None:
raise ValueError('Pipeline does not support paired-end data')
logger = logging.getLogger()
# Ensure output dir exists.
output_dir.mkdir(exist_ok=True, parents=True)
# Extract genomic sequences and align to reference.
genomic_path = self._extract_genomic(read_path, output_dir, logger)
alignment_path = self._align(genomic_path, output_dir, logger)
# Map reads to specific barcodes/samples.
logger.info('Extracting barcode/sample mapping')
logger.info(' %-18s: %s', 'Barcodes',
shorten_path(self._barcode_path))
read_map = self._get_barcode_mapping(read_path)
# Extract insertions from bam file.
bam_file = pysam.AlignmentFile(str(alignment_path))
try:
insertions = extract_insertions(
iter(bam_file),
func=_process_alignment,
group_func=lambda aln: read_map.get(aln.query_name, None),
merge_dist=self._merge_distance,
min_mapq=self._min_mapq,
min_support=self._min_support,
logger=logger)
finally:
bam_file.close()
# Write insertions to output file.
insertion_path = output_dir / 'insertions.txt'
ins_frame = Insertion.to_frame(insertions)
ins_frame.to_csv(str(insertion_path), sep='\t', index=False)
def _get_barcode_mapping(self, read_path):
# Read barcode sequences.
with seqio.open(str(self._barcode_path)) as barcode_file:
barcodes = list(barcode_file)
# Extract read --> barcode mapping.
with seqio.open(str(read_path)) as reads:
return _extract_barcode_mapping(reads, barcodes,
self._barcode_mapping)
register_pipeline(
name='shearsplink-multiplexed', pipeline=MultiplexedShearSplinkPipeline)
def _extract_barcode_mapping(reads, barcodes, barcode_mapping=None):
# Create barcode/sample dict.
barcode_dict = {bc.name: bc.sequence for bc in barcodes}
if barcode_mapping is not None:
barcode_dict = {sample: barcode_dict[barcode]
for barcode, sample in barcode_mapping.items()}
# Build mapping.
mapping = {}
for read in reads:
# Check each barcode for match in read.
matched = [k for k, v in barcode_dict.items() if v in read.sequence]
if len(matched) == 1:
# Record single matches.
name = read.name.split()[0]
mapping[name] = matched[0]
elif len(matched) > 1:
logging.warning('Skipping %s due to multiple matching barcodes',
read.name.split()[0])
return mapping
| python |
#Esta é uma biblioteca básica para a criação dos dicionários que serão utilizados
#na serialização JSON que será enviada para aplicação
#Importando o módulo timedelta da biblioteca datetime
from datetime import timedelta
#CLASSES
class DispositivoEnvio:
def __init__(self, idD = None, noLoc = None, noDisp = None, stLum = ''):
self.idDispositivo = idD
self.localDispositivo = noLoc
self.nomeDispositivo = noDisp
self.statusLuminosidade = stLum
class OcorrenciaEnvio:
def __init__(self,vlTmp = None,dtReg = None, hrReg = None):
self.temperatura = vlTmp
self.dataRegistro = dtReg
self.horaRegistrada = hrReg
class OcorrenciasDict:
def __init__(self,ocs = None):
if ocs is None:
ocs = []
self.ocorrencias = ocs
class DispositivosDict:
def __init__(self,dps = None):
if dps is None:
dps = []
self.dispositivos = dps
class UltTempDict:
def __init__(self,diffTmpHr = None):
self.ultimaTemperatura = diffTmpHr
class DiffTempHoraDict:
def __init__(self,diffTemp = None,diffHr = None):
self.diferencaTemperatura = diffTemp
self.diferencaMin = diffHr
class FreqDispDict:
def __init__(self,freqDisp = None):
self.frequenciaDoDispositivo = freqDisp
#FIM DAS CLASSES
#FUNCTIONS
#Esta função gera um objeto contendo dicionários com os dados da tabela de ocorrências
# Parâmetros: resultado de uma pesquisa 'SELECT' na tabela tb_ocorrencia
# Retorno: um objeto com os dicionários
def getOcorrenciaDict(res):
ocDict = OcorrenciasDict()
for row in res:
oc = OcorrenciaEnvio(float(row[2]),str(row[4]),str(row[5]))
ocDict.ocorrencias.append(vars(oc))
return vars(ocDict)
#Esta função gera um objeto contendo dicionários com os dados da tabela de dispositivos
# Parâmetros: resultado de uma pesquisa 'SELECT' na tabela tb_dispositivo e tb_ocorrencia,
# com o status da luminosidade de cada dispositivo
# Retorno: um objeto com os dicionários
def getDispositivosDict(res):
dpDict = DispositivosDict()
for row in res:
dp = DispositivoEnvio(row[0],str(row[2]),str(row[1]), str(row[6]))
dpDict.dispositivos.append(vars(dp))
return vars(dpDict)
#Esta função gera um objeto contendo um dicionário com o valor a última temperatura
# Parâmetros: resultado de uma pesquisa 'SELECT' na tabela tb_ocorrencia, com a temperatura da
# última ocorrência
# Retorno: um objeto com o dicionário
def getUltTempDict(res):
ultTempDict = UltTempDict()
for row in res:
oc = OcorrenciaEnvio(float(row[0]),str(row[2]),str(row[1]))
ultTempDict.ultimaTemperatura = vars(oc)
return vars(ultTempDict)
#Esta função gera um objeto contendo um dicionário com o valor da diferença entra as temperaturas e o tempo
#em minutos da ultima ocorrência de cada dispositivo
# Parâmetros: resultado de uma pesquisa 'SELECT' com 'UNION ALL'na tabela tb_ocorrencia, com os valores de
# de temperatura e hora da última ocorrência de cada dispositivo
# Retorno: um objeto com o dicionário
def getDiffTempDict(res):
diffTempDict = DiffTempHoraDict()
i = 0
for i in range(0,len(res),1):
if(i > 0):
diffTempDict.diferencaTemperatura = round(abs(float(res[i][0]) - float(res[i - 1][0])),1)
diffTempDict.diferencaMin = int(round(abs(int(timedelta.total_seconds(res[i][1] - res[i - 1][1])) / 60),0))
return vars(diffTempDict)
#Esta função gera um objeto contendo um dicionário com o valor da frequência de envio de um dispositivo
# Parâmetros: resultado de uma pesquisa 'SELECT' na tabela tb_dispositivo, com a frequência de envio
# Retorno: um objeto com o dicionário
def getFreqDispDict(res):
freqDispDict = FreqDispDict()
for row in res:
freqDispDict.frequenciaDoDispositivo = float(row[0])
return vars(freqDispDict)
#Esta função concatena uma lista de dicionários para a serialização JSON
# Parâmetros: uma lista de dicionários de objetos
# Retorno: os dicionários concatenados
def concatDicts (listaDicts):
allDicts = {}
for dicio in listaDicts:
allDicts.update(dicio)
return allDicts
#FIM DAS FUNCTIONS | python |
import sys
import getopt
from learning.TruffleShuffle import TruffleShuffle
import os
from shutil import copyfile
import codecs
import shutil
import json
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def cluster(project_name, working_dir_str, copy_to_webapp=False):
#try to get the right directory to get the landmark online tools folder
if copy_to_webapp:
working = os.getcwd()
while not working.endswith('/src'):
(working,other) = os.path.split(working)
web_app_projects_dir = os.path.join(working, 'angular_flask/static/project_folders')
tf = TruffleShuffle(working_dir_str)
clusters = tf.do_truffle_shuffle(algorithm='rule_size')
clusterCount = 1
clusters_dir_str = os.path.join(working_dir_str, '../clusters')
if len(clusters) > 0:
for rule in sorted(clusters, key=lambda x: len(clusters[x]['MEMBERS']), reverse=True):
cluster_str = 'cluster' + format(clusterCount, '03')
clusterCount += 1
page_count = 0;
print cluster_str + " -- " + str(len(clusters[rule]['MEMBERS']))
if len(clusters[rule]['MEMBERS']) > 0:
#copy it into the local angular_flask web directory
markup_file = None
if copy_to_webapp and clusterCount <= 11:
blank = os.path.join(web_app_projects_dir, '_blank')
project_dir = os.path.join(web_app_projects_dir, project_name+"_"+cluster_str)
shutil.copytree(blank, project_dir)
markup_file = os.path.join(project_dir, 'learning', 'markup.json')
with codecs.open(markup_file, "r", "utf-8") as myfile:
json_str = myfile.read().encode('utf-8')
markup = json.loads(json_str)
cluster_dir_str = os.path.join(clusters_dir_str, cluster_str)
if not os.path.exists(cluster_dir_str):
os.makedirs(cluster_dir_str)
for page_id in clusters[rule]['MEMBERS']:
copyfile(os.path.join(working_dir_str, page_id), os.path.join(cluster_dir_str, page_id))
if copy_to_webapp and clusterCount <= 11:
if page_count < 7:
#and copy it to the web_app_dir if we have less than 7 there
copyfile(os.path.join(working_dir_str, page_id), os.path.join(project_dir, page_id))
markup['__URLS__'][page_id] = page_id
markup[page_id] = {}
page_count += 1
if copy_to_webapp and clusterCount <= 11:
with codecs.open(markup_file, "w", "utf-8") as myfile:
myfile.write(json.dumps(markup, sort_keys=True, indent=2, separators=(',', ': ')))
myfile.close()
else:
cluster_str = 'cluster' + format(clusterCount, '03')
#copy it into the local angular_flask web directory
markup_file = None
if copy_to_webapp:
blank = os.path.join(web_app_projects_dir, '_blank')
project_dir = os.path.join(web_app_projects_dir, project_name+"_"+cluster_str)
shutil.copytree(blank, project_dir)
markup_file = os.path.join(project_dir, 'learning', 'markup.json')
with codecs.open(markup_file, "r", "utf-8") as myfile:
json_str = myfile.read().encode('utf-8')
markup = json.loads(json_str)
clusterCount += 1
page_count = 0;
cluster_dir_str = os.path.join(clusters_dir_str, cluster_str)
if not os.path.exists(cluster_dir_str):
os.makedirs(cluster_dir_str)
for page_id in tf.get_page_manager().getPageIds():
copyfile(os.path.join(working_dir_str, page_id), os.path.join(cluster_dir_str, page_id))
if copy_to_webapp:
if page_count < 7:
#and copy it to the web_app_dir if we have less than 5 there
copyfile(os.path.join(working_dir_str, page_id), os.path.join(project_dir, page_id))
markup['__URLS__'][page_id] = page_id
markup[page_id] = {}
page_count += 1
if copy_to_webapp:
with codecs.open(markup_file, "w", "utf-8") as myfile:
myfile.write(json.dumps(markup, sort_keys=True, indent=2, separators=(',', ': ')))
myfile.close()
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "h", ["help"])
for opt in opts:
if opt in [('-h', ''), ('--help', '')]:
raise Usage('python extraction/PageClusterer.py [WORKING_DIR]')
if len(args) == 1:
directory = args[0]
#now loop through each and run the clustering
dirs = [f for f in os.listdir(directory) if os.path.isdir(os.path.join(directory, f))]
for sub_dir in dirs:
print '...clustering ' + sub_dir + '...'
cluster(file, os.path.join(directory, sub_dir, 'cdr'))
except getopt.error, msg:
raise Usage(msg)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
if __name__ == "__main__":
sys.exit(main()) | python |
import tensorflow as tf
import kerastuner as kt
from sacred import Experiment
from model.training import sharpe_loss, fit
from util.data import load_data, preprocess, split_train_test_validation, make_dataset, create_full_datasets
ex = Experiment()
@ex.config
def config():
data_dir = 'data'
alpha = 0.01
dropout = 0
learning_rate = 1e-4
patience = 10
epochs = 100
batch_size = 32
loss = sharpe_loss
target = 0.15
sequence_length = 60
def compile_lstm_model(loss, target, alpha, dropout, learning_rate) -> tf.keras.Model:
"""
Creates a lstm model based on the passed hyper parameter
:param target: target annual returns
:param loss: target loss function
:param learning_rate: learning rate
:param alpha: l1 regularization constant
:param dropout: dropout rate for lstm
:return:
"""
model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(50, return_sequences=True, dropout=dropout),
tf.keras.layers.Dense(units=1, activation='tanh', kernel_regularizer=tf.keras.regularizers.l1(alpha))
])
model.compile(loss=loss(model, target=target),
optimizer=tf.optimizers.Adam(learning_rate),
metrics=[loss(model, target=target)])
return model
@ex.command
def train_lstm(data_dir, alpha, dropout, loss, patience, epochs, learning_rate, target, batch_size, sequence_length):
train, validation, test = create_full_datasets(data_dir, sequence_length=sequence_length,
return_sequence=True, shift=1, batch_size=batch_size)
model = compile_lstm_model(loss=loss, target=target, alpha=alpha, dropout=dropout, learning_rate=learning_rate)
history = fit(model, train, validation, patience=patience, epochs=epochs)
@ex.automain
def search_params(data_dir, sequence_length, loss, target, batch_size):
print('starting parameter search...')
train, validation, test = create_full_datasets(data_dir, sequence_length=sequence_length,
return_sequence=True, shift=1, batch_size=batch_size)
def build_model(hp: kt.HyperParameters):
model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(hp.Int('units', min_value=32, max_value=256, step=32), return_sequences=True, dropout=hp.Float('dropout', 0, 0.5, step=0.1)),
tf.keras.layers.Dense(units=1, activation='tanh', kernel_regularizer=tf.keras.regularizers.l1(
hp.Float('alpha', 1e-3, 1e+1, sampling='log')))
])
model.compile(loss=loss(model, target=target),
optimizer=tf.optimizers.Adam(hp.Float('learning_rate', 1e-5, 1e-1,
sampling='log')),
metrics=[loss(model, target=target)])
return model
tuner = kt.Hyperband(
build_model,
objective='val_loss',
max_epochs=30,
hyperband_iterations=2)
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=3,
mode='min')
tuner.search(train, epochs=30,
validation_data=validation,
callbacks=[early_stopping])
best_model = tuner.get_best_models(1)[0]
best_hyperparameters = tuner.get_best_hyperparameters(1)[0]
print(best_hyperparameters)
| python |
# Generated by Django 2.1.5 on 2019-11-22 05:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tab', '0012_merge_20191017_0109'),
]
operations = [
migrations.AlterField(
model_name='judge',
name='ballot_code',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='team',
name='team_code',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
]
| python |
from django.core.exceptions import ImproperlyConfigured
import pytest
from tests.factories import AttachmentFactory, AttachmentFileTypeFactory
from unicef_attachments import utils
from unicef_attachments.models import AttachmentFlat, FileType
from unicef_attachments.permissions import AttachmentPermissions
from demo.sample.models import AttachmentFlatOverride
from demo.sample.permissions import AttachmentPermOverride
from demo.sample.utils import denormalize, filepath_prefix
pytestmark = pytest.mark.django_db
def test_get_filepath_prefix_func_default():
assert utils.get_filepath_prefix_func() == utils._filepath_prefix
def test_get_filepath_prefix_func_override(settings):
settings.ATTACHMENT_FILEPATH_PREFIX_FUNC = "demo.sample.utils.filepath_prefix"
assert utils.get_filepath_prefix_func() == filepath_prefix
def test_get_filepath_prefix_func_invalid(settings):
settings.ATTACHMENT_FILEPATH_PREFIX_FUNC = "demo.wrong.filepath_prefix"
with pytest.raises(ImproperlyConfigured):
utils.get_filepath_prefix_func()
def test_get_attachment_flat_model_default():
assert utils.get_attachment_flat_model() == AttachmentFlat
def test_get_attachment_flat_model_override(settings):
settings.ATTACHMENT_FLAT_MODEL = "demo.sample.models.AttachmentFlatOverride"
assert utils.get_attachment_flat_model() == AttachmentFlatOverride
def test_get_attachment_flat_model_invalid(settings):
settings.ATTACHMENT_FLAT_MODEL = "demo.sample.wrong.AttachmentFlatOverride"
with pytest.raises(ImproperlyConfigured):
utils.get_attachment_flat_model()
def test_get_attachment_permissions_default():
assert utils.get_attachment_permissions() == AttachmentPermissions
def test_get_attachment_permissions_override(settings):
settings.ATTACHMENT_PERMISSIONS = "demo.sample.permissions.AttachmentPermOverride"
assert utils.get_attachment_permissions() == AttachmentPermOverride
def test_get_attachment_permissions_invalid(settings):
settings.ATTACHMENT_PERMISSIONS = "demo.sample.wrong.AttachmentPermOverride"
with pytest.raises(ImproperlyConfigured):
utils.get_attachment_permissions()
def test_get_denormalize_func_default():
assert utils.get_denormalize_func() == utils.denormalize_attachment
def test_get_denormalize_func_override(settings):
settings.ATTACHMENT_DENORMALIZE_FUNC = "demo.sample.utils.denormalize"
assert utils.get_denormalize_func() == denormalize
def test_get_denormalize_func_invalid(settings):
settings.ATTACHMENT_DENORMALIZE_FUNC = "demo.sample.wrong.denormalize"
with pytest.raises(ImproperlyConfigured):
utils.get_denormalize_func()
def test_get_matching_key(file_type):
key = (file_type.label.lower(), file_type.name.lower())
# name matches
name_key = ("something", file_type.name.lower())
assert name_key == utils.get_matching_key(file_type, [name_key])
# label matches
label_key = (file_type.label.lower(), "something")
assert label_key == utils.get_matching_key(file_type, [label_key])
# no matches
assert key == utils.get_matching_key(file_type, [("some", "thing")])
def test_cleanup_file_types():
file_type_1 = AttachmentFileTypeFactory(
label="Other",
name="something",
)
file_type_2 = AttachmentFileTypeFactory(
label="Other",
name="different",
group=["ft2"],
)
file_type_3 = AttachmentFileTypeFactory(
label="PD",
name="pd",
group=["ft3"],
)
file_type_4 = AttachmentFileTypeFactory(
label="FT4",
name="something",
group=["ft4"],
)
attachment_1 = AttachmentFactory(file_type=file_type_1)
attachment_2 = AttachmentFactory(file_type=file_type_2)
attachment_3 = AttachmentFactory(file_type=file_type_3)
attachment_4 = AttachmentFactory(file_type=file_type_4)
utils.cleanup_filetypes()
attachment_1.refresh_from_db()
assert attachment_1.file_type == file_type_1
attachment_2.refresh_from_db()
assert attachment_2.file_type == file_type_1
attachment_3.refresh_from_db()
assert attachment_3.file_type == file_type_3
attachment_4.refresh_from_db()
assert attachment_4.file_type == file_type_1
assert not FileType.objects.filter(pk=file_type_2.pk).exists()
assert not FileType.objects.filter(pk=file_type_4.pk).exists()
file_type_1.refresh_from_db()
assert file_type_1.group == ["ft2", "ft4"]
| python |
from ambra_sdk.service.filtering import Filter, FilterCondition
from ambra_sdk.service.sorting import Sorter, SortingOrder
class TestStudy:
"""Test Study."""
def test_study_list(
self,
api,
account,
readonly_study,
):
"""Test study list."""
studies = api \
.Study \
.list() \
.set_rows_in_page(5000) \
.filter_by(
Filter(
'phi_namespace',
FilterCondition.equals,
account.account.namespace_id,
),
) \
.all()
assert len(list(studies)) == 1
assert len(list(studies[:3])) == 1
assert len(list(studies[1:4])) == 0 # NOQA:WPS507
def test_study_list_only(self, api, account, readonly_study):
"""Test study list sorting."""
studies = api \
.Study \
.list() \
.filter_by(
Filter(
'phi_namespace',
FilterCondition.equals,
account.account.namespace_id,
),
) \
.only({'study': ['uuid']}) \
.all()
assert len(list(studies)) == 1
study = studies.first()
assert 'uuid' in study
assert len(study) == 1
def test_study_filtering(
self,
api,
account,
readonly_study,
readonly_study2,
):
"""Test study list filtering."""
# name in stady2
patient_name = 'AAAA'
filt = Filter(
field_name='patient_name',
condition=FilterCondition.equals,
value=patient_name,
)
studies = api \
.Study \
.list() \
.only({'study': ['patient_name']}) \
.filter_by(
Filter(
'phi_namespace',
FilterCondition.equals,
account.account.namespace_id,
),
) \
.filter_by(filt) \
.all()
assert len(list(studies[:3])) == 1
assert studies.first().patient_name == patient_name
def test_study_sorting(
self,
api,
account,
readonly_study,
readonly_study2,
):
"""Test study list sorting."""
sorter = Sorter(
field_name='patient_name',
order=SortingOrder.ascending,
)
studies = api \
.Study \
.list() \
.only({'study': ['uuid']}) \
.filter_by(
Filter(
'phi_namespace',
FilterCondition.equals,
account.account.namespace_id,
),
) \
.sort_by(sorter) \
.all()
studies = [study.uuid for study in studies]
r_sorter = Sorter(
field_name='patient_name',
order=SortingOrder.descending,
)
r_studies = api \
.Study \
.list() \
.only({'study': ['uuid']}) \
.filter_by(
Filter(
'phi_namespace',
FilterCondition.equals,
account.account.namespace_id,
),
) \
.sort_by(r_sorter) \
.all()
r_studies = [study.uuid for study in r_studies]
assert list(reversed(studies)) == r_studies
| python |
import texts
#handles the backgrounds
#GLOBALS
masterfreeskill3 = 0
masterskillBG = []
masterextralang = 0
miscdictBG = {}
mastertools = []
masterfeats = []
masterequip = []
class Background(object):
customskill = 0
customlang = 0
bgskills = []
bgFeat = []
tools = []
equip = []
def __init__(self, name, extra_languages):
self.name = name
self.extra_languages = extra_languages
def start(self):
def start2():
global masterfreeskill3
global masterskillBG
global masterextralang
global mastertools
global masterfeats
global miscdictBG
masterfeats.extend(self.bgFeat)
masterskillBG.extend(self.bgskills)
mastertools.extend(self.tools)
masterextralang += self.extra_languages
masterextralang += self.customlang
masterfreeskill3 += self.customskill
miscdictBG.update({"BACKGROUND" : self.name})
# print(masterskillBG)
##THIS IS FOR CUSTOM BG ONLY
if self.name == "Custom":
print("A custom background is made from the following:\nOne feature among those mentioned in a 5e background (PHB pg. 127-141)\nAny two skill proficiencies\nA total of two tool or language proficiencies from existing D&D backgrounds")
def bgskills_adder(self):
def tool_adder(self, num):
while num > 0:
temptools = []
newskill = input("Please type a tool:\n")
if newskill in temptools:
print("Don't add the same tools twice")
tool_adder()
else:
temptools.append(newskill)
num -= 1
self.tools.extend(temptools)
print("You have selected:", self.tools)
num = input("How many languages will you add to your custom background?")
if num == "1":
self.customlang += 1
tool_adder(self, 1)
print("You will be able to select any language. You may select one tool proficiency")
elif num == "2":
self.customlang += 2
print("You will be able to select any two languages. You will gain no new tool profiencies from your background")
elif num == "0":
print("You gain no languages but will be ble to select two tools")
Background.tool_adder(self, 2)
else:
print(texts.invalid)
Background.bgskills_adder(self)
def feat_adder(self):
feat = str(input("Please select a Background feat. from D&D 5e. Make sure to talk it over with your DM\n Type your feat. below\n"))
self.bgfeat.append(feat)
bgskills_adder(self)
feat_adder(self)
start2()
else:
start2()
class Acolyte(Background):
bgskills = ["Insight", "Religion"]
bgFeat = ["Shelter of the Faithful(pg. 127)"]
equip = ["A holy symbol(a gift to you when you entered the priesthood)", "a prayer book or prayer wheel", "5 sticks of incense", "vestments", "an set of common clothes", "a pouch containing 15 gp"]
class Charlatan(Background):
bgskills = ["Deception", "Sleight of Hand"]
bgFeat = ["False Identity(pg. 128)"]
tools = ["Disguise Kit", "Forgery Kit"]
equip = ["a set of fine clothes", "a disguise kit", "tools of the con of your choice: ten stoppered bottles filled with colorful liquid OR a set of weighted dice OR a deck of marked cards OR a signet ring of an imaginary duke", "a pouch containing 15 GP"]
class Criminal(Background):
bgskills = ["Deception", "Stealth"]
bgFeat = ["Criminal Contact(pg. 129)"]
tools = ["One Type of Gaming Set", "Thieves' Tools"]
equip = ["a crowbar", "a set of dark common clothes including a hood", "a pouch containing 15 gp"]
class Entertainer(Background):
bgskills = ["Acrobatics", "Performance"]
bgFeat = ["By Popular Demand(pg. 130)"]
tools = ["Disguise Kit", "One Type of Musical Instrument"]
equip = ["one musical instrument", "a token from an adrmirer", "a costume", "a pouch containing 15 gp"]
class FolkHero(Background):
bgskills = ["Animal Handling", "Survival"]
bgFeat = ["Hospitality(pg. 131)"]
tools = ["One Type of Artisan's Tools", "Vehicles (land)"]
equip = ["a set of artisan's tools", "a shovel", "an iron pot", "a set of common clothes", "a pouch containing 10 gp"]
class GArtisan(Background):
bgskills = ["Insight", "Persuasion"]
bgFeat = ["Guild Membership(pg. 133)"]
tools = ["One Type of Artisan's Tools"]
equip = ["a set of artisan's tools", "aletter of introduction from your guild", "a set of traveler's clothes", "a pouch containing 15 gp"]
class Merchant(Background):
bgskills = ["Insight", "Persuasion"]
bgFeat = ["Guild Membership(pg. 133)"]
tools = ["Navigator's Tools OR An additional Language"]
equip = ["Navigator's Tools OR a mule and cart", "a letter of introduction from your guild", "a set of traveler's clothes", "a pouch containing 15 gp"]
class Hermit(Background):
bgskills = ["Medicine", "Religion"]
bgFeat = ["Discovery(pg. 134)"]
tools = ["Herbalism Kit"]
equip = ["a scroll case stuffed full of notes from your studies or prayers", "a winter blanket", "a set of common clothes", " an herbalism kit", "5 gp"]
class Noble(Background):
bgskills = ["History", "Persuasion"]
bgFeat = ["Position of Privilege(pg. 135)"]
tools = ["One Type of Gaming Set"]
equip = ["a set of fine clothes, a signet ring", "a scroll of pedigree", "a purse containing 25gp"]
class NobleKnight(Background):
bgskills = ["History", "Persuasion"]
bgFeat = ["Retainers(pg. 136)"]
tools = ["One Type of Gaming Set"]
equip = ["a set of fine clothes, a signet ring", "a scroll of pedigree", "a purse containing 25gp", "option: a banner or token from the noble you have sworn fealty or devotion to"]
class Outlander(Background):
bgskills = ["Athletics", "Survival"]
bgFeat = ["Wanderer(pg. 136)"]
tools = ["One Type of Musical Instrument"]
equip = ["a staff", "a hunting trap", "a trophy from an animal you killed", "a set of traveler's clothes", "a pouch containing 10 gp"]
class Sage(Background):
bgskills = ["Arcana", "History"]
bgFeat = ["Researcher(pg. 138)"]
equip = ["a bottle of black ink", "a quill", "a small knife", "a letter from a dead colleagu posing a question you have not yet been able to answer", "a set of common clothes", "a pouch containing 10 gp"]
class Sailor(Background):
bgskills = ["Athletics", "Perception"]
bgFeat = ["Ship's Passage(pg. 139)"]
tools = ["Navigator's Tools", "Vehicles(water)"]
equip = ["a belaying pin(club)", "50 feet of silk rope", "a lucky charm such as a rabbit's foot or small stone with a hole in the center(or you may roll for a random Trinket on page 160-161)", "a set of common clothes", "a pouch containing 10 gp"]
class Pirate(Background):
bgskills = ["Athletics", "Perception"]
bgFeat = ["Bad Reputation(pg. 139)"]
tools = ["Navigator's Tools", "Vehicles(water)"]
equip = ["a belaying pin(club)", "50 feet of silk rope", "a lucky charm such as a rabbit's foot or small stone with a hole in the center(or you may roll for a random Trinket on page 160-161)", "a set of common clothes", "a pouch containing 10 gp"]
class Soldier(Background):
bgskills = ["Athletics", "Intimidation"]
bgFeat = ["Military Rank(pg. 140)"]
tools = ["One Type of Gaming Set", "Vehicles(land)"]
equip = ["an insignia of rank", "a trophy taken from a fallen enemy(a dagger, broken blade, or piece of a hammer)", "a set of bone dice OR deck of cards", "a set of common clothes", "a pouch containing 10 gp"]
class Urchin(Background):
bgskills = ["Sleight of Hand", "Stealth"]
bgFeat = ["City Secrets(pg. 141)"]
tools = ["Disguise Kit", "Stealth"]
equip = ["a small knife", "a map of the city you grew up in", "a pet mouse", "a token to remember your parents by", "a set of common clothes", "a pouch containing 10 gp"]
class Custom(Background):
bgskills = []
bgFeat = []
tools = []
acolyte = Acolyte("Acolyte", 2)
charlatan = Charlatan("Charlatan", 0)
criminal = Criminal("Criminal", 0)
entertainer = Entertainer("Entertainer", 0)
folkhero = FolkHero("Folk Hero", 0)
gArtisan = GArtisan("Guild Artisan", 1)
merchant = Merchant("Merchant", 1)
hermit = Hermit("Hermit", 1)
noble = Noble("Noble", 1)
nobleknight = NobleKnight("Knight", 1)
outlander = Outlander("Outlander", 1)
sage = Sage("Sage", 2)
sailor = Sailor("Sailor", 0)
pirate = Pirate("Pirate", 0)
soldier = Soldier("Soldier", 0)
urchin = Urchin("Urchin", 0)
custom = Custom("Custom", 0)
| python |
#!/usr/bin/env python3
def sum_of_fibonacci_numbers_under(n):
total = 0
a = 1
b = 2
while b < n:
if b % 2 == 0:
total += b
a, b = b, a + b
return total
def solve():
return sum_of_fibonacci_numbers_under(4000000)
if __name__ == '__main__':
result = solve()
print(result)
| python |
from numpy import array, testing
from luga import languages
def test_sentences(text_examples):
responses = languages(text_examples["text"])
pred_langs = [response.name for response in responses]
pred_scores = [response.score > 0.5 for response in responses]
assert pred_langs == text_examples["lang"], "language detection failed"
assert all(pred_scores), "score went boom!"
def test_languages(text_examples):
responses = languages(
texts=text_examples["text"], threshold=0.7, only_language=True
)
assert responses == text_examples["lang"], "language detection failed"
def test_array_response(text_examples):
responses = languages(
texts=text_examples["text"], threshold=0.7, only_language=True, to_array=True
)
testing.assert_array_equal(
responses, array(text_examples["lang"]), err_msg="language detection failed"
)
| python |