code
stringlengths 2k
1.04M
| repo_path
stringlengths 5
517
| parsed_code
stringlengths 0
1.04M
| quality_prob
float64 0.02
0.95
| learning_prob
float64 0.02
0.93
|
---|---|---|---|---|
import pytest
from django.urls import reverse
from pytest_django.asserts import assertContains
from ...folders.tests.factories import FolderFactory
from ..models import Note
from .factories import NoteFactory, UserFactory
@pytest.fixture
def user():
return UserFactory()
@pytest.fixture
def note():
return NoteFactory()
@pytest.fixture
def folder():
return FolderFactory()
pytestmark = pytest.mark.django_db
def test_good_notes_list_view(client, user):
"""
Tests if /notes/ returns the correct view
:param client:
:param user:
:return:
"""
client.force_login(user)
response = client.get(reverse("notes:list"))
assertContains(response, "Note List")
def test_good_note_detail_view(client, user, note):
"""
Tests if /notes/{note.slug}/ returns the correct view
:param client:
:param user:
:param note: A note object created by the NoteFactory fixture
:return:
"""
client.force_login(user)
url = reverse("notes:detail", kwargs={"slug": note.slug})
response = client.get(url)
assertContains(response, note.title)
def test_good_note_create_view(client, user):
"""
Tests if /notes/add returns the correct view
:param client:
:param user:
:return:
"""
client.force_login(user)
url = reverse("notes:add")
response = client.get(url)
assert response.status_code == 200
def test_note_list_contains_2_notes(client, user):
"""
Creates two note objects and tests if both are listed
in /notes/
:param client:
:param user:
:return:
"""
note1 = NoteFactory()
note2 = NoteFactory()
client.force_login(user)
response = client.get(reverse("notes:list"))
assertContains(response, note1.title)
assertContains(response, note2.title)
def test_note_detail_contains_note_data(client, user, note):
"""
Tests if NoteDetailView displays the correct Note object
:param client:
:param user:
:param note:
:return:
"""
client.force_login(user)
response = client.get(reverse("notes:detail", kwargs={"slug": note.slug}))
assertContains(response, note.title)
assertContains(response, note.text)
assertContains(response, note.creator)
def test_note_create_form_valid(client, user, folder):
"""
Tests creating a note with valid input
:param client:
:param user:
:param folder: A Folder object created by the FolderFactory fixture
:return:
"""
client.force_login(user)
form_data = {
"title": "Test Note",
"text": "Here's a note for testing.",
"parent_folder": folder.id,
}
url = reverse("notes:add")
response = client.post(url, form_data)
assert response.status_code == 302
note = Note.objects.get(title="Test Note")
assert note.text == form_data["text"]
assert note.creator == user
assert note.parent_folder == folder
def test_note_create_correct_title(client, user):
"""
Tests if requesting the NoteCreateView returns the correct template,
as NoteCreateView and NoteCreateView use the same template file
:param client:
:param user:
:return:
"""
client.force_login(user)
url = reverse("notes:add")
response = client.get(url)
assertContains(response, "Add Note")
def test_good_note_update_view(client, user, note):
"""
Tests if requesting the NoteUpdateView returns the correct template,
as NoteCreateView and NoteCreateView use the same template file
:param client:
:param user:
:param note: A Note object created by the NoteFactory fixture
:return:
"""
client.force_login(user)
url = reverse("notes:update", kwargs={"slug": note.slug})
response = client.get(url)
assertContains(response, "Update Note")
def test_note_update(client, user, note, folder):
"""
Tests updating a note given valid input
:param client:
:param user:
:param note: A Note object created by the NoteFactory fixture
:param folder: A Folder object created by the FolderFactory fixture
:return:
"""
client.force_login(user)
url = reverse("notes:update", kwargs={"slug": note.slug})
form_data = {
"title": note.title,
"text": "something new",
"parent_folder": folder.id,
}
response = client.post(url, form_data)
assert response.status_code == 302
note.refresh_from_db()
assert note.text == "something new"
assert note.parent_folder == folder | sib_notes/notes/tests/test_views.py | import pytest
from django.urls import reverse
from pytest_django.asserts import assertContains
from ...folders.tests.factories import FolderFactory
from ..models import Note
from .factories import NoteFactory, UserFactory
@pytest.fixture
def user():
return UserFactory()
@pytest.fixture
def note():
return NoteFactory()
@pytest.fixture
def folder():
return FolderFactory()
pytestmark = pytest.mark.django_db
def test_good_notes_list_view(client, user):
"""
Tests if /notes/ returns the correct view
:param client:
:param user:
:return:
"""
client.force_login(user)
response = client.get(reverse("notes:list"))
assertContains(response, "Note List")
def test_good_note_detail_view(client, user, note):
"""
Tests if /notes/{note.slug}/ returns the correct view
:param client:
:param user:
:param note: A note object created by the NoteFactory fixture
:return:
"""
client.force_login(user)
url = reverse("notes:detail", kwargs={"slug": note.slug})
response = client.get(url)
assertContains(response, note.title)
def test_good_note_create_view(client, user):
"""
Tests if /notes/add returns the correct view
:param client:
:param user:
:return:
"""
client.force_login(user)
url = reverse("notes:add")
response = client.get(url)
assert response.status_code == 200
def test_note_list_contains_2_notes(client, user):
"""
Creates two note objects and tests if both are listed
in /notes/
:param client:
:param user:
:return:
"""
note1 = NoteFactory()
note2 = NoteFactory()
client.force_login(user)
response = client.get(reverse("notes:list"))
assertContains(response, note1.title)
assertContains(response, note2.title)
def test_note_detail_contains_note_data(client, user, note):
"""
Tests if NoteDetailView displays the correct Note object
:param client:
:param user:
:param note:
:return:
"""
client.force_login(user)
response = client.get(reverse("notes:detail", kwargs={"slug": note.slug}))
assertContains(response, note.title)
assertContains(response, note.text)
assertContains(response, note.creator)
def test_note_create_form_valid(client, user, folder):
"""
Tests creating a note with valid input
:param client:
:param user:
:param folder: A Folder object created by the FolderFactory fixture
:return:
"""
client.force_login(user)
form_data = {
"title": "Test Note",
"text": "Here's a note for testing.",
"parent_folder": folder.id,
}
url = reverse("notes:add")
response = client.post(url, form_data)
assert response.status_code == 302
note = Note.objects.get(title="Test Note")
assert note.text == form_data["text"]
assert note.creator == user
assert note.parent_folder == folder
def test_note_create_correct_title(client, user):
"""
Tests if requesting the NoteCreateView returns the correct template,
as NoteCreateView and NoteCreateView use the same template file
:param client:
:param user:
:return:
"""
client.force_login(user)
url = reverse("notes:add")
response = client.get(url)
assertContains(response, "Add Note")
def test_good_note_update_view(client, user, note):
"""
Tests if requesting the NoteUpdateView returns the correct template,
as NoteCreateView and NoteCreateView use the same template file
:param client:
:param user:
:param note: A Note object created by the NoteFactory fixture
:return:
"""
client.force_login(user)
url = reverse("notes:update", kwargs={"slug": note.slug})
response = client.get(url)
assertContains(response, "Update Note")
def test_note_update(client, user, note, folder):
"""
Tests updating a note given valid input
:param client:
:param user:
:param note: A Note object created by the NoteFactory fixture
:param folder: A Folder object created by the FolderFactory fixture
:return:
"""
client.force_login(user)
url = reverse("notes:update", kwargs={"slug": note.slug})
form_data = {
"title": note.title,
"text": "something new",
"parent_folder": folder.id,
}
response = client.post(url, form_data)
assert response.status_code == 302
note.refresh_from_db()
assert note.text == "something new"
assert note.parent_folder == folder | 0.574156 | 0.643385 |
import pyttsx3
import datetime
import wikipedia
import webbrowser as wb
import os
import speech_recognition as sr
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
print("Speaking....")
engine.runAndWait()
def wishme():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning sir")
elif hour>=12 and hour<18:
speak("Good Afternoon sir")
else:
speak("Good Evening sir")
speak("I am jarvis, here to help you, tell me what to do")
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
r.energy_threshold = 2
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception:
# print(e)
print("I haven't got it, say that again please")
return "None"
return query
if __name__ == "__main__":
wishme()
while True:
query = takeCommand().lower()
#logic for AI
if "wikipedia" in query:
speak("searching wikipedia......")
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
print(results)
speak("According to wikipedia")
speak(results)
elif "youtube" in query:
speak("opening youtube.....")
wb.open("youtube.com")
elif "open github" in query:
speak("opening github.....")
wb.open("github.com")
elif "open google" in query:
wb.open("google.com")
speak("opening Google")
elif "play random video" in query:
vid_dir = "E:\\Videos"
video = os.listdir(vid_dir)
random_file = random.choice(video)
os.startfile(os.path.join(vid_dir, random_file))
elif ".com" in query:
wb.open(".com"+query)
speak(f"opening {query}")
elif "what is the time now" in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"the time now is{strTime}")
elif "open stackoverflow" in query:
wb.open("stackoverflow.com")
elif "who are you" in query:
speak("I am EESHAAN's personal assistant")
elif "what is your name" in query:
speak("I am Jaarvis sir")
elif "play video" in query:
speak("playing you favourite video, sir")
os.startfile("E:\\Don Diablo - We Are Love.mp4")
elif "open amazon" in query:
wb.open("www.amazon.in")
elif "open twitter" in query:
wb.open("www.twitter.com")
elif "speak alphabets" in query:
speak("Speaking alphabets")
for alpha in range(65, 91):
speak(chr(alpha)) | Aspire The AI.py | import pyttsx3
import datetime
import wikipedia
import webbrowser as wb
import os
import speech_recognition as sr
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
print("Speaking....")
engine.runAndWait()
def wishme():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning sir")
elif hour>=12 and hour<18:
speak("Good Afternoon sir")
else:
speak("Good Evening sir")
speak("I am jarvis, here to help you, tell me what to do")
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
r.energy_threshold = 2
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception:
# print(e)
print("I haven't got it, say that again please")
return "None"
return query
if __name__ == "__main__":
wishme()
while True:
query = takeCommand().lower()
#logic for AI
if "wikipedia" in query:
speak("searching wikipedia......")
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
print(results)
speak("According to wikipedia")
speak(results)
elif "youtube" in query:
speak("opening youtube.....")
wb.open("youtube.com")
elif "open github" in query:
speak("opening github.....")
wb.open("github.com")
elif "open google" in query:
wb.open("google.com")
speak("opening Google")
elif "play random video" in query:
vid_dir = "E:\\Videos"
video = os.listdir(vid_dir)
random_file = random.choice(video)
os.startfile(os.path.join(vid_dir, random_file))
elif ".com" in query:
wb.open(".com"+query)
speak(f"opening {query}")
elif "what is the time now" in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"the time now is{strTime}")
elif "open stackoverflow" in query:
wb.open("stackoverflow.com")
elif "who are you" in query:
speak("I am EESHAAN's personal assistant")
elif "what is your name" in query:
speak("I am Jaarvis sir")
elif "play video" in query:
speak("playing you favourite video, sir")
os.startfile("E:\\Don Diablo - We Are Love.mp4")
elif "open amazon" in query:
wb.open("www.amazon.in")
elif "open twitter" in query:
wb.open("www.twitter.com")
elif "speak alphabets" in query:
speak("Speaking alphabets")
for alpha in range(65, 91):
speak(chr(alpha)) | 0.094224 | 0.062674 |
import argparse
import os
import re
import unicodedata
import Levenshtein
import pandas
import pandas as pd
import regex
from bz2file import BZ2File
from tqdm import tqdm
from wiktionary_de_parser import Parser
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--wiktionary', type=str, default='dewiktionary-20210701-pages-articles.xml.bz2')
parser.add_argument('--output', type=argparse.FileType('w'), default='wiktionary_relations.tsv')
args = parser.parse_args()
bz = BZ2File(args.wiktionary)
substantive = []
adjektive = []
verben = []
derivs = []
adj_targets = ["bar", "en", "erig", "ern", "fach", "frei", "haft", "ig", "isch",
"lich", "los", "mäßig", "sam", "sch"]
subst_targets = ["chen", "e", "ei", "el", "en", "er", "heit", "ien", "iker", "in", "keit", "lein", "ler", "ling",
"mut",
"nis", "rich", "sal", "schaft", "sel", "tum", "ung"]
with tqdm(unit='B', unit_scale=True, smoothing=0.05, total=os.path.getsize(args.wiktionary)) as pbar:
for record in Parser(bz):
pbar.update(bz._fp.tell() - pbar.n)
if not regex.fullmatch(r'\p{L}+', record['title']): continue
if 'langCode' not in record or record['langCode'] != 'de':
continue
if re.search(r'\|Adjektiv\|', record['wikitext']):
try:
target, lemma, base = process_deriv(record, adj_targets)
derivs.append(['adj_' + target, lemma, base])
except:
pass
if re.search(r'\|Substantiv\|', record['wikitext']):
try:
target, lemma, base = process_deriv(record, subst_targets)
derivs.append(['subst_' + target, lemma, base])
except:
pass
if 'flexion' in record.keys():
flexion = record["flexion"]
wortart = list(record["pos"].keys())[0]
if wortart == "Substantiv":
substantive.append(flexion)
if wortart == "Adjektiv":
adjektive.append(flexion)
if wortart == "Verb":
verben.append(flexion)
flexion["Infinitiv"] = record["title"]
print_verb_infl(verben, args.output)
print_adj_infl(adjektive, args.output)
print_subst_infl(substantive, args.output)
print_deriv(derivs, args.output)
def process_deriv(record, targets):
for t in targets:
lemma = record['title']
if not lemma.endswith(t): continue
herkunft = re.search(r'{{(Herkunft|Ableitung)}}[^{]*(\[\[Ableitung]][^{]*){{', record['wikitext'], re.MULTILINE)
if herkunft is None: continue
herkunft = herkunft.group(2).replace('\n', ' ')
if not re.search(r"''\[\[-" + t + "]]", herkunft): continue
base = [b[0] for b in regex.findall(r"''\[\[(\p{L}+)]](.,;)?''", herkunft)]
def check_prefix(a, b):
return unicodedata.normalize('NFD', a[0]).lower() != unicodedata.normalize('NFD', b[0]).lower()
if len(base) == 0: continue
if len(base) == 1:
candidate = base[0]
if not check_prefix(candidate, lemma): continue
return t, lemma, candidate
else:
# heuristic by closest levenshtein distance
distances = [(b, Levenshtein.distance(lemma.lower(), b.lower() + t)) for b in base if check_prefix(lemma, b)]
candidate, dist = min(distances, key=lambda x: x[1])
if dist <= 3:
return t, lemma, candidate
def print_subst_infl(substantive, out):
substantive = pd.DataFrame(substantive)
labels = dict([('Nominativ Singular', 'nom_sg'), ('Nominativ Plural', 'nom_pl'), ('Dativ Plural', 'dat_pl'),
('Genitiv Singular', 'gen_sg')])
substantive = substantive[labels.keys()].dropna().rename(columns=labels)
substantive.drop_duplicates(subset='nom_sg', keep=False, inplace=True)
substantive = substantive[
substantive.applymap(lambda x: len(x) >= 2 and regex.fullmatch(r'\w+', x) is not None).all(axis=1)]
for col in labels.values():
if col == 'nom_sg': continue
if col == 'nom_pl' or col == 'dat_pl':
selection = substantive[substantive['dat_pl'] != substantive['nom_pl']][['nom_sg', col]]
else:
selection = substantive[['nom_sg', col]]
selection = selection[selection.apply(lambda x: x == selection[col]).sum(axis=1) == 1].drop_duplicates()
for i, row in selection.iterrows():
print('infl_subst_' + col, row['nom_sg'], row[col], sep='\t', file=out)
def print_adj_infl(adjektive, out):
adjektive = pd.DataFrame(adjektive)
adjektive.drop_duplicates(subset='Positiv', keep=False, inplace=True)
for col in ['Komparativ', 'Superlativ']:
selection = adjektive[adjektive.apply(lambda x: x == adjektive[col]).sum(axis=1) == 1][
['Positiv', col]].drop_duplicates()
for i, row in selection.iterrows():
print('infl_adj_' + col.lower(), row['Positiv'], row[col], sep='\t', file=out)
def print_verb_infl(verben, out):
verben = pd.DataFrame(verben)
verben = verben.drop(verben[verben.Präsens_ich.isna()].index)
labels = dict([('Infinitiv', 'inf'), ('Präsens_ich', 'sg_1p_präsens'), ('Präsens_du', 'sg_2p_präsens'),
('Präteritum_ich', 'sg_1p_prät_indikativ'), ('Partizip II', 'partizip_perfekt'),
('Konjunktiv II_ich', 'sg_1p_prät_konjunktiv')])
verben = verben[labels.keys()].dropna().rename(columns=labels)
# verben.drop_duplicates(subset='inf', inplace=True)
verben = verben[verben.applymap(lambda x: len(x) >= 2 and regex.fullmatch(r'\w+', x) is not None).all(axis=1)]
for col in labels.values():
if col == 'inf': continue
selection = verben[verben.apply(lambda x: x == verben[col]).sum(axis=1) == 1][['inf', col]].drop_duplicates()
for i, row in selection.iterrows():
print('infl_verb_' + col, row['inf'], row[col], sep='\t', file=out)
def print_deriv(derivs, out):
df = pandas.DataFrame(derivs, columns=['derivation', 'base', 'lemma'])
df.drop_duplicates(subset=['derivation', 'base'], keep=False, inplace=True)
for row in df.sort_values('derivation').itertuples():
print('derivations_' + row.derivation, row.lemma, row.base, sep='\t', file=out)
if __name__ == "__main__":
main() | testsets/wiktionary_extraction.py | import argparse
import os
import re
import unicodedata
import Levenshtein
import pandas
import pandas as pd
import regex
from bz2file import BZ2File
from tqdm import tqdm
from wiktionary_de_parser import Parser
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--wiktionary', type=str, default='dewiktionary-20210701-pages-articles.xml.bz2')
parser.add_argument('--output', type=argparse.FileType('w'), default='wiktionary_relations.tsv')
args = parser.parse_args()
bz = BZ2File(args.wiktionary)
substantive = []
adjektive = []
verben = []
derivs = []
adj_targets = ["bar", "en", "erig", "ern", "fach", "frei", "haft", "ig", "isch",
"lich", "los", "mäßig", "sam", "sch"]
subst_targets = ["chen", "e", "ei", "el", "en", "er", "heit", "ien", "iker", "in", "keit", "lein", "ler", "ling",
"mut",
"nis", "rich", "sal", "schaft", "sel", "tum", "ung"]
with tqdm(unit='B', unit_scale=True, smoothing=0.05, total=os.path.getsize(args.wiktionary)) as pbar:
for record in Parser(bz):
pbar.update(bz._fp.tell() - pbar.n)
if not regex.fullmatch(r'\p{L}+', record['title']): continue
if 'langCode' not in record or record['langCode'] != 'de':
continue
if re.search(r'\|Adjektiv\|', record['wikitext']):
try:
target, lemma, base = process_deriv(record, adj_targets)
derivs.append(['adj_' + target, lemma, base])
except:
pass
if re.search(r'\|Substantiv\|', record['wikitext']):
try:
target, lemma, base = process_deriv(record, subst_targets)
derivs.append(['subst_' + target, lemma, base])
except:
pass
if 'flexion' in record.keys():
flexion = record["flexion"]
wortart = list(record["pos"].keys())[0]
if wortart == "Substantiv":
substantive.append(flexion)
if wortart == "Adjektiv":
adjektive.append(flexion)
if wortart == "Verb":
verben.append(flexion)
flexion["Infinitiv"] = record["title"]
print_verb_infl(verben, args.output)
print_adj_infl(adjektive, args.output)
print_subst_infl(substantive, args.output)
print_deriv(derivs, args.output)
def process_deriv(record, targets):
for t in targets:
lemma = record['title']
if not lemma.endswith(t): continue
herkunft = re.search(r'{{(Herkunft|Ableitung)}}[^{]*(\[\[Ableitung]][^{]*){{', record['wikitext'], re.MULTILINE)
if herkunft is None: continue
herkunft = herkunft.group(2).replace('\n', ' ')
if not re.search(r"''\[\[-" + t + "]]", herkunft): continue
base = [b[0] for b in regex.findall(r"''\[\[(\p{L}+)]](.,;)?''", herkunft)]
def check_prefix(a, b):
return unicodedata.normalize('NFD', a[0]).lower() != unicodedata.normalize('NFD', b[0]).lower()
if len(base) == 0: continue
if len(base) == 1:
candidate = base[0]
if not check_prefix(candidate, lemma): continue
return t, lemma, candidate
else:
# heuristic by closest levenshtein distance
distances = [(b, Levenshtein.distance(lemma.lower(), b.lower() + t)) for b in base if check_prefix(lemma, b)]
candidate, dist = min(distances, key=lambda x: x[1])
if dist <= 3:
return t, lemma, candidate
def print_subst_infl(substantive, out):
substantive = pd.DataFrame(substantive)
labels = dict([('Nominativ Singular', 'nom_sg'), ('Nominativ Plural', 'nom_pl'), ('Dativ Plural', 'dat_pl'),
('Genitiv Singular', 'gen_sg')])
substantive = substantive[labels.keys()].dropna().rename(columns=labels)
substantive.drop_duplicates(subset='nom_sg', keep=False, inplace=True)
substantive = substantive[
substantive.applymap(lambda x: len(x) >= 2 and regex.fullmatch(r'\w+', x) is not None).all(axis=1)]
for col in labels.values():
if col == 'nom_sg': continue
if col == 'nom_pl' or col == 'dat_pl':
selection = substantive[substantive['dat_pl'] != substantive['nom_pl']][['nom_sg', col]]
else:
selection = substantive[['nom_sg', col]]
selection = selection[selection.apply(lambda x: x == selection[col]).sum(axis=1) == 1].drop_duplicates()
for i, row in selection.iterrows():
print('infl_subst_' + col, row['nom_sg'], row[col], sep='\t', file=out)
def print_adj_infl(adjektive, out):
adjektive = pd.DataFrame(adjektive)
adjektive.drop_duplicates(subset='Positiv', keep=False, inplace=True)
for col in ['Komparativ', 'Superlativ']:
selection = adjektive[adjektive.apply(lambda x: x == adjektive[col]).sum(axis=1) == 1][
['Positiv', col]].drop_duplicates()
for i, row in selection.iterrows():
print('infl_adj_' + col.lower(), row['Positiv'], row[col], sep='\t', file=out)
def print_verb_infl(verben, out):
verben = pd.DataFrame(verben)
verben = verben.drop(verben[verben.Präsens_ich.isna()].index)
labels = dict([('Infinitiv', 'inf'), ('Präsens_ich', 'sg_1p_präsens'), ('Präsens_du', 'sg_2p_präsens'),
('Präteritum_ich', 'sg_1p_prät_indikativ'), ('Partizip II', 'partizip_perfekt'),
('Konjunktiv II_ich', 'sg_1p_prät_konjunktiv')])
verben = verben[labels.keys()].dropna().rename(columns=labels)
# verben.drop_duplicates(subset='inf', inplace=True)
verben = verben[verben.applymap(lambda x: len(x) >= 2 and regex.fullmatch(r'\w+', x) is not None).all(axis=1)]
for col in labels.values():
if col == 'inf': continue
selection = verben[verben.apply(lambda x: x == verben[col]).sum(axis=1) == 1][['inf', col]].drop_duplicates()
for i, row in selection.iterrows():
print('infl_verb_' + col, row['inf'], row[col], sep='\t', file=out)
def print_deriv(derivs, out):
df = pandas.DataFrame(derivs, columns=['derivation', 'base', 'lemma'])
df.drop_duplicates(subset=['derivation', 'base'], keep=False, inplace=True)
for row in df.sort_values('derivation').itertuples():
print('derivations_' + row.derivation, row.lemma, row.base, sep='\t', file=out)
if __name__ == "__main__":
main() | 0.212886 | 0.219129 |
no_yes = {'NO': '90004', 'YES': '90003'}
no_yes_1 = {'No': '1066', 'Yes': '1065'}
no_yes_pregnant = {'No': '90081', 'Yes': '90082'}
relationship = {
'AUNT/UNCLE': '90281', 'BROTHER/SISTER': '90285', 'CHILD': '90280',
'COUSIN': '90286', 'FRIEND': '5618', 'GRANDCHILD': '90284', 'GRANDPARENT': '90283',
'NIECE/NEPHEW': '90282', 'NOTFAMILY': '90287', 'PARENT/FARTHER/MOTHER': '90279',
'SPOUSE/PARTNER': '90288'
}
mode_of_delivery = {
'BREECH': '1172',
'CESAREAN SECTION': '1171',
'Forceps or Vacuum Extractor Delivery': '163005',
'Other': '5622',
'SPONTANEOUS VAGINAL DELIVERY/NORMAL': '1170'
}
mothers_arvs_for_pmtct = {
'ART': '163012',
'Life long ART': '99786',
'NONE': '1107',
'No ART': '99782',
'Unknown': '1067',
'sd NVP': '163009',
'sd NVP + AZT': '163010',
'sd NVP + AZT/3TC': '163011'
}
infant_arvs = {
'Daily NVP from birth through b/feeding': '163013',
'Daily NVP from birth to 6 weeks': '162966',
'NVP taken after 72 hours of birth': '99789',
'No ARVs taken at birth': '99790',
'Received NVP within 72 hours of birth': '99788',
'Unknown': '1067',
'sd NVP': '163009',
'sd NVP + AZT': '163010'
}
result = {
'NEGATIVE/NEG': '664',
'POSITIVE/POS': '703'
}
feeding_status = {
'BREASTFED EXCLUSIVELY/EBF': '5526',
'Complementary Feeding/CF': '99791',
'MIXED FEEDING/MF': '6046',
'No Longer Breastfeeding/NLB': '99793',
'REPLACEMENT FEEDING/RF': '99089',
'Weaning/W': '99792'
}
final_outcome = {
'Discharged Negative': '99427',
'Referred to ART Clinic': '99430',
'Lost': '5240',
'Transferred': '90306',
'Died': '99112'
}
entry_point = {
'PMTCT': '90012',
'TB': '90016',
'YCC': '99593',
'Outreach': '90019',
'Out Patient': '90013',
'STI': '90015',
'Inpatient': '90018',
'Other': '90002'
}
test_type = {
'PCR Test': '163006',
'Antibody Tes/AB': '163007'
}
muac = {
'Red': '99028',
'Yellow': '99029',
'Green': '99027'
}
clinical_assessment_codes = {
'WELL': '162848',
'LN': '162847',
'WL': '832',
'F': '90103',
'OT': '90130',
'EI': '162849',
'G': '162850',
'C': '107',
'ADR': '162851',
'RDR': '162852',
'PNEU': '42',
'RASH': '512',
'Others': '90002'
}
development_milestone = {
'Smilling': '162855',
'Sitting': '162857',
'Walking': '162856',
'Controling the head': '162858',
'Transfer Objects from one hand to hand': '162859',
'Cognition': '162860',
'Rolling Over': '162861',
'Crawl': '162862',
'Stand': '162863'
}
immunization_codes = {
'BCG': '886',
'OPV-0': '162831',
'OPV-1': '162832',
'OPV-2': '162833',
'OPV-3': '162834',
'DPT': '781',
'DPT-Hep+Hib1': '162835',
'DPT-HepB + Hib2': '162836',
'DPT-HepB + Hib3': '162837',
'MEASLES': '63',
'De-Worming': '162838',
'Vitamin A': '99356',
'PCV-1': '163014',
'PCV-2': '163015',
'PCV-3': '163016',
'Not Done': '90002'
}
drugs = {
'Amitriptilline': '931',
'Amoxicillin': '265',
'Cetrizine': '779',
'Ciprofloxacine': '740',
'Cloxacillin': '922',
'Coartem': '99266',
'Dapsone': '90171',
'Doxycycline': '95',
'EH': '1108',
'Erythromicin': '272',
'Fluconazole': '747',
'Ibuprofen': '912',
'Ketoconazole': '926',
'Mebendazole': '244',
'Metronidazole': '237',
'Multivitamins': '461',
'Nutrition Support': '99054',
'Nystatin': '919',
'Paracetamol': '89',
'RH': '1194',
'RHE': '99127',
'RHZ': '768',
'RHZE': '1131',
'RHZES': '99128',
'Septrin': '916',
'Vit B Complex': '329',
'Other specify': '90002',
}
tb_status = {
'No signs/1': '90079',
'Suspect/2': '90073',
'Diagnosed with Tb/3': '90078',
'Tb Rx/4': '90071'
}
side_effects = {
'AB/ABDOMINAL PAIN': '90100', 'Anaemia/ANEMIA': '90099',
'BN/BURNING': '90095',
'CNS/DIZZINESS': '90117',
'Diarrhoea/DIARRHOEA': '90092',
'FAT/FAT DISTRIBUTION CHANGES': '90116',
'Fatigue/FATIGUE': '90093',
'Headache/HEADACHE': '90094',
'Jaundice/JAUNDICE': '90115',
'Nausea/NAUSEA': '90091',
'Rash/RASH': '90098', 'Other specify:/OTHER SPECIFY': '90002',
}
symptoms = {
'Cough/COUGH': '90132',
'DB/BREATHING DIFFICULTY': '90133',
'Dementia/DEMENTIA': '90128',
'Enceph/ENCEPHALITIS': '90129',
'Fever/FEVER': '90131',
'GUD/GENITAL ULCER DISEASE': '90138',
'Headache/HEADACHE': '90094',
'IRIS/IMMUNE RECONSTITUTION INFLAMMATORY SYNDROME': '90134',
'Malaria/PRESUMED': '123',
'Pneumonia/PNEUMONIA': '90127',
'PID/PELVIC INFLAMMATORY DISEASE': '90137',
'RTI/RESPIRATORY TRACT INFECTION, NOS': '999',
'Thrush/THRUSH': '90130',
'UD/URETHRAL DISCHARGE': '90136',
'Ulcers/ULCERS': '90139',
'URTI/RESPIRATORY TRACT INFECTION, UPPER': '106',
'Weight loss/WEIGHT LOSS': '90135',
'Zoster/ZOSTER': '90126',
'KS/KAPOSIS SARCOMA': '507',
'CCM/MENINGITIS, CRYPTOCOCCAL': '1294',
'Other specify:/OTHER SPECIFY': '90002'
}
malnutrition = {
'MAM/MODERATE ACUTE MALNUTRITION': '99271',
'SAM/SEVERE ACUTE MALNUTRITION': '99272',
'SAMO/SEVERE ACUTE MALNUTRITION WITH OEDEMA': '99273',
'PWG/PA/POOR WEIGHT GAIN / POOR APPETITE': '99274',
}
clinical_stage = {
'1/HIV WHO CLINICAL STAGE 1': '90033',
'2/HIV WHO CLINICAL STAGE 2': '90034',
'3/HIV WHO CLINICAL STAGE 3': '90035',
'4/HIV WHO CLINICAL STAGE 4': '90036',
'T1/HIV WHO CLINICAL STAGE T1': '90293',
'T2/HIV WHO CLINICAL STAGE T2': '90294',
'T3/HIV WHO CLINICAL STAGE T3': '90295',
'T4/HIV WHO CLINICAL STAGE T4': '90296'
}
functional_status = {
'Amb/AMBULATORY': '90037',
'Work/WORKING': '90038',
'Bed/BEDRIDDEN': '90039'
}
adherence = {
'Good/GOOD ADHERENCE': '90156',
'Fair/FAIR ADHERENCE': '90157',
'Poor/POOR ADHERENCE': '90158'
} | core/common.py | no_yes = {'NO': '90004', 'YES': '90003'}
no_yes_1 = {'No': '1066', 'Yes': '1065'}
no_yes_pregnant = {'No': '90081', 'Yes': '90082'}
relationship = {
'AUNT/UNCLE': '90281', 'BROTHER/SISTER': '90285', 'CHILD': '90280',
'COUSIN': '90286', 'FRIEND': '5618', 'GRANDCHILD': '90284', 'GRANDPARENT': '90283',
'NIECE/NEPHEW': '90282', 'NOTFAMILY': '90287', 'PARENT/FARTHER/MOTHER': '90279',
'SPOUSE/PARTNER': '90288'
}
mode_of_delivery = {
'BREECH': '1172',
'CESAREAN SECTION': '1171',
'Forceps or Vacuum Extractor Delivery': '163005',
'Other': '5622',
'SPONTANEOUS VAGINAL DELIVERY/NORMAL': '1170'
}
mothers_arvs_for_pmtct = {
'ART': '163012',
'Life long ART': '99786',
'NONE': '1107',
'No ART': '99782',
'Unknown': '1067',
'sd NVP': '163009',
'sd NVP + AZT': '163010',
'sd NVP + AZT/3TC': '163011'
}
infant_arvs = {
'Daily NVP from birth through b/feeding': '163013',
'Daily NVP from birth to 6 weeks': '162966',
'NVP taken after 72 hours of birth': '99789',
'No ARVs taken at birth': '99790',
'Received NVP within 72 hours of birth': '99788',
'Unknown': '1067',
'sd NVP': '163009',
'sd NVP + AZT': '163010'
}
result = {
'NEGATIVE/NEG': '664',
'POSITIVE/POS': '703'
}
feeding_status = {
'BREASTFED EXCLUSIVELY/EBF': '5526',
'Complementary Feeding/CF': '99791',
'MIXED FEEDING/MF': '6046',
'No Longer Breastfeeding/NLB': '99793',
'REPLACEMENT FEEDING/RF': '99089',
'Weaning/W': '99792'
}
final_outcome = {
'Discharged Negative': '99427',
'Referred to ART Clinic': '99430',
'Lost': '5240',
'Transferred': '90306',
'Died': '99112'
}
entry_point = {
'PMTCT': '90012',
'TB': '90016',
'YCC': '99593',
'Outreach': '90019',
'Out Patient': '90013',
'STI': '90015',
'Inpatient': '90018',
'Other': '90002'
}
test_type = {
'PCR Test': '163006',
'Antibody Tes/AB': '163007'
}
muac = {
'Red': '99028',
'Yellow': '99029',
'Green': '99027'
}
clinical_assessment_codes = {
'WELL': '162848',
'LN': '162847',
'WL': '832',
'F': '90103',
'OT': '90130',
'EI': '162849',
'G': '162850',
'C': '107',
'ADR': '162851',
'RDR': '162852',
'PNEU': '42',
'RASH': '512',
'Others': '90002'
}
development_milestone = {
'Smilling': '162855',
'Sitting': '162857',
'Walking': '162856',
'Controling the head': '162858',
'Transfer Objects from one hand to hand': '162859',
'Cognition': '162860',
'Rolling Over': '162861',
'Crawl': '162862',
'Stand': '162863'
}
immunization_codes = {
'BCG': '886',
'OPV-0': '162831',
'OPV-1': '162832',
'OPV-2': '162833',
'OPV-3': '162834',
'DPT': '781',
'DPT-Hep+Hib1': '162835',
'DPT-HepB + Hib2': '162836',
'DPT-HepB + Hib3': '162837',
'MEASLES': '63',
'De-Worming': '162838',
'Vitamin A': '99356',
'PCV-1': '163014',
'PCV-2': '163015',
'PCV-3': '163016',
'Not Done': '90002'
}
drugs = {
'Amitriptilline': '931',
'Amoxicillin': '265',
'Cetrizine': '779',
'Ciprofloxacine': '740',
'Cloxacillin': '922',
'Coartem': '99266',
'Dapsone': '90171',
'Doxycycline': '95',
'EH': '1108',
'Erythromicin': '272',
'Fluconazole': '747',
'Ibuprofen': '912',
'Ketoconazole': '926',
'Mebendazole': '244',
'Metronidazole': '237',
'Multivitamins': '461',
'Nutrition Support': '99054',
'Nystatin': '919',
'Paracetamol': '89',
'RH': '1194',
'RHE': '99127',
'RHZ': '768',
'RHZE': '1131',
'RHZES': '99128',
'Septrin': '916',
'Vit B Complex': '329',
'Other specify': '90002',
}
tb_status = {
'No signs/1': '90079',
'Suspect/2': '90073',
'Diagnosed with Tb/3': '90078',
'Tb Rx/4': '90071'
}
side_effects = {
'AB/ABDOMINAL PAIN': '90100', 'Anaemia/ANEMIA': '90099',
'BN/BURNING': '90095',
'CNS/DIZZINESS': '90117',
'Diarrhoea/DIARRHOEA': '90092',
'FAT/FAT DISTRIBUTION CHANGES': '90116',
'Fatigue/FATIGUE': '90093',
'Headache/HEADACHE': '90094',
'Jaundice/JAUNDICE': '90115',
'Nausea/NAUSEA': '90091',
'Rash/RASH': '90098', 'Other specify:/OTHER SPECIFY': '90002',
}
symptoms = {
'Cough/COUGH': '90132',
'DB/BREATHING DIFFICULTY': '90133',
'Dementia/DEMENTIA': '90128',
'Enceph/ENCEPHALITIS': '90129',
'Fever/FEVER': '90131',
'GUD/GENITAL ULCER DISEASE': '90138',
'Headache/HEADACHE': '90094',
'IRIS/IMMUNE RECONSTITUTION INFLAMMATORY SYNDROME': '90134',
'Malaria/PRESUMED': '123',
'Pneumonia/PNEUMONIA': '90127',
'PID/PELVIC INFLAMMATORY DISEASE': '90137',
'RTI/RESPIRATORY TRACT INFECTION, NOS': '999',
'Thrush/THRUSH': '90130',
'UD/URETHRAL DISCHARGE': '90136',
'Ulcers/ULCERS': '90139',
'URTI/RESPIRATORY TRACT INFECTION, UPPER': '106',
'Weight loss/WEIGHT LOSS': '90135',
'Zoster/ZOSTER': '90126',
'KS/KAPOSIS SARCOMA': '507',
'CCM/MENINGITIS, CRYPTOCOCCAL': '1294',
'Other specify:/OTHER SPECIFY': '90002'
}
malnutrition = {
'MAM/MODERATE ACUTE MALNUTRITION': '99271',
'SAM/SEVERE ACUTE MALNUTRITION': '99272',
'SAMO/SEVERE ACUTE MALNUTRITION WITH OEDEMA': '99273',
'PWG/PA/POOR WEIGHT GAIN / POOR APPETITE': '99274',
}
clinical_stage = {
'1/HIV WHO CLINICAL STAGE 1': '90033',
'2/HIV WHO CLINICAL STAGE 2': '90034',
'3/HIV WHO CLINICAL STAGE 3': '90035',
'4/HIV WHO CLINICAL STAGE 4': '90036',
'T1/HIV WHO CLINICAL STAGE T1': '90293',
'T2/HIV WHO CLINICAL STAGE T2': '90294',
'T3/HIV WHO CLINICAL STAGE T3': '90295',
'T4/HIV WHO CLINICAL STAGE T4': '90296'
}
functional_status = {
'Amb/AMBULATORY': '90037',
'Work/WORKING': '90038',
'Bed/BEDRIDDEN': '90039'
}
adherence = {
'Good/GOOD ADHERENCE': '90156',
'Fair/FAIR ADHERENCE': '90157',
'Poor/POOR ADHERENCE': '90158'
} | 0.335024 | 0.115486 |
from .interval_limit import IntervalLimit
def local_compare_to(a, b):
return -1 if a < b else 1 if a > b else 0
class Interval:
@staticmethod
def closed(lower, upper):
return Interval(lower, True, upper, True)
@staticmethod
def open(lower, upper):
return Interval(lower, False, upper, False)
@staticmethod
def over(lower, lower_included, upper, upper_included):
return Interval(lower, lower_included, upper, upper_included)
def __init__(self, *args):
assert len(args) == 2 or len(args) == 4
if len(args) == 2:
self._create_with_intervals(*args)
elif len(args) == 4:
self._create_with_boundaries(*args)
def _create_with_intervals(self, lower, upper):
assert isinstance(lower, IntervalLimit)
assert isinstance(upper, IntervalLimit)
assert lower.is_lower()
assert upper.is_upper()
assert lower.compare_to(upper) <= 0
self._lower_limit_object = lower
self._upper_limit_object = upper
def _create_with_boundaries(self, lower, is_lower_closed, upper, is_upper_closed):
self._create_with_intervals(IntervalLimit.lower(is_lower_closed, lower),
IntervalLimit.upper(is_upper_closed, upper))
def __hash__(self):
return hash((hash(self._lower_limit_object), hash(self._upper_limit_object)))
def __eq__(self, other):
return self.equals(other)
def equals(self, other):
if other is None:
return False
this_empty = self.is_empty()
other_empty = other.is_empty()
if this_empty and other_empty:
return True
if this_empty != other_empty:
return False
this_single = self.is_single_element()
other_single = other.is_single_element()
if this_single and other_single:
return self.lower_limit() == other.lower_limit()
if this_single != other_single:
return False
return self.compare_to(other) == 0
def new_of_same_type(self, lower, is_lower_closed, upper, is_upper_closed):
return Interval(lower, is_lower_closed, upper, is_upper_closed)
def empty_of_same_type(self):
return self.new_of_same_type(self.lower_limit(), False, self.lower_limit(), False)
def compare_to(self, other):
if not self.upper_limit() == other.upper_limit():
return local_compare_to(self.upper_limit(), other.upper_limit())
if self.includes_lower_limit() and not other.includes_lower_limit():
return -1
if not self.includes_lower_limit() and other.includes_lower_limit():
return 1
return local_compare_to(self.lower_limit(), other.lower_limit())
def is_open(self):
return not self.includes_lower_limit() and not self.includes_upper_limit()
def is_closed(self):
return self.includes_lower_limit() and self.includes_upper_limit()
def is_empty(self):
return self.is_open() and self.upper_limit() == self.lower_limit()
def upper_limit(self):
return self._upper_limit_object.get_value()
def includes_upper_limit(self):
return self._upper_limit_object.is_closed()
def has_upper_limit(self):
return self._upper_limit_object is not None
def lower_limit(self):
return self._lower_limit_object.get_value()
def includes_lower_limit(self):
return self._lower_limit_object.is_closed()
def has_lower_limit(self):
return self._lower_limit_object is not None
def is_single_element(self):
return self.upper_limit() == self.lower_limit() and not self.is_empty()
def is_below(self, value):
if not self.has_upper_limit():
return False
comparison = local_compare_to(self.upper_limit(), value)
return comparison < 0 or (comparison == 0 and not self.includes_upper_limit())
def is_above(self, value):
if not self.has_lower_limit():
return False
comparison = local_compare_to(self.lower_limit(), value)
return comparison > 0 or (comparison == 0 and not self.includes_lower_limit())
def includes(self, value):
return not self.is_below(value) and not self.is_above(value)
def covers(self, other):
lower_comparison = local_compare_to(self.lower_limit(), other.lower_limit())
lower_pass = self.includes(other.lower_limit()) or (lower_comparison == 0 and not other.includes_lower_limit())
upper_comparison = local_compare_to(self.upper_limit(), other.upper_limit())
upper_pass = self.includes(other.upper_limit()) or (upper_comparison == 0 and not other.includes_upper_limit())
return lower_pass and upper_pass
def intersects(self, other):
comparison = local_compare_to(self.greater_of_lower_limits(other), self.lesser_of_upper_limits(other))
if comparison < 0:
return True
if comparison > 0:
return False
return self._greater_of_lower_included_in_intersection(other) and \
self._lesser_of_upper_included_in_intersection(other)
def lesser_of_lower_limits(self, other):
if self.lower_limit() is None:
return None
lower_comparison = local_compare_to(self.lower_limit(), other.lower_limit())
if lower_comparison <= 0:
return self.lower_limit()
return other.lower_limit()
def greater_of_lower_limits(self, other):
if self.lower_limit() is None:
return other.lower_limit()
lower_comparison = local_compare_to(self.lower_limit(), other.lower_limit())
if lower_comparison >= 0:
return self.lower_limit()
return other.lower_limit()
def lesser_of_upper_limits(self, other):
if self.upper_limit() is None:
return other.upperLimit();
upper_comparison = local_compare_to(self.upper_limit(), other.upper_limit())
if upper_comparison <= 0:
return self.upper_limit()
return other.upper_limit()
def greater_of_upper_limits(self, other):
if self.upper_limit()is None:
return None
upper_comparison = local_compare_to(self.upper_limit(), other.upper_limit())
if upper_comparison >= 0:
return self.upper_limit()
return other.upper_limit()
def _greater_of_lower_included_in_intersection(self, other):
limit = self.greater_of_lower_limits(other)
return self.includes(limit) and other.includes(limit)
def _lesser_of_upper_included_in_intersection(self, other):
limit = self.lesser_of_upper_limits(other)
return self.includes(limit) and other.includes(limit)
def _greater_of_lower_included_in_union(self, other):
limit = self.greater_of_lower_limits(other)
return self.includes(limit) or other.includes(limit)
def _lesser_of_upper_included_in_union(self, other):
limit = self.lesser_of_upper_limits(other)
return self.includes(limit) or other.includes(limit)
def intersect(self, other):
intersect_lower_bound = self.greater_of_lower_limits(other)
intersect_upper_bound = self.lesser_of_upper_limits(other)
if local_compare_to(intersect_lower_bound, intersect_upper_bound) > 0:
return self.empty_of_same_type()
return self.new_of_same_type(intersect_lower_bound, self._greater_of_lower_included_in_intersection(other),
intersect_upper_bound, self._lesser_of_upper_included_in_intersection(other))
def gap(self, other):
if self.intersects(other):
return self.empty_of_same_type()
return self.new_of_same_type(self.lesser_of_upper_limits(other),
not self._lesser_of_upper_included_in_union(other),
self.greater_of_lower_limits(other),
not self._greater_of_lower_included_in_union(other))
# see: http://en.wikipedia.org/wiki/Set_theoretic_complement
def complement_relative_to(self, other):
interval_sequence = []
if not self.intersects(other):
interval_sequence.append(other)
return interval_sequence
left = self._left_complement_relative_to(other)
if left is not None:
interval_sequence.append(left)
right = self._right_complement_relative_to(other)
if right is not None:
interval_sequence.append(right)
return interval_sequence
def _left_complement_relative_to(self, other):
if self.includes(self.lesser_of_lower_limits(other)):
return None
if self.lower_limit() == other.lower_limit() and not other.includes_lower_limit():
return None
return self.new_of_same_type(other.lower_limit(), other.includes_lower_limit(), self.lower_limit(), not self.includes_lower_limit())
def _right_complement_relative_to(self, other):
if self.includes(self.greater_of_upper_limits(other)):
return None
if self.upper_limit() == other.upper_limit() and not other.includes_upper_limit():
return None
return self.new_of_same_type(self.upper_limit(), not self.includes_upper_limit(), other.upper_limit(), other.includes_upper_limit())
def __str__(self):
return "{}{}..{}{}".format("(" if self._lower_limit_object.is_opened() else "[",
self.lower_limit(),
self.upper_limit(),
")" if self._upper_limit_object.is_opened() else "]",) | timeandmoneypy/intervals/interval.py | from .interval_limit import IntervalLimit
def local_compare_to(a, b):
return -1 if a < b else 1 if a > b else 0
class Interval:
@staticmethod
def closed(lower, upper):
return Interval(lower, True, upper, True)
@staticmethod
def open(lower, upper):
return Interval(lower, False, upper, False)
@staticmethod
def over(lower, lower_included, upper, upper_included):
return Interval(lower, lower_included, upper, upper_included)
def __init__(self, *args):
assert len(args) == 2 or len(args) == 4
if len(args) == 2:
self._create_with_intervals(*args)
elif len(args) == 4:
self._create_with_boundaries(*args)
def _create_with_intervals(self, lower, upper):
assert isinstance(lower, IntervalLimit)
assert isinstance(upper, IntervalLimit)
assert lower.is_lower()
assert upper.is_upper()
assert lower.compare_to(upper) <= 0
self._lower_limit_object = lower
self._upper_limit_object = upper
def _create_with_boundaries(self, lower, is_lower_closed, upper, is_upper_closed):
self._create_with_intervals(IntervalLimit.lower(is_lower_closed, lower),
IntervalLimit.upper(is_upper_closed, upper))
def __hash__(self):
return hash((hash(self._lower_limit_object), hash(self._upper_limit_object)))
def __eq__(self, other):
return self.equals(other)
def equals(self, other):
if other is None:
return False
this_empty = self.is_empty()
other_empty = other.is_empty()
if this_empty and other_empty:
return True
if this_empty != other_empty:
return False
this_single = self.is_single_element()
other_single = other.is_single_element()
if this_single and other_single:
return self.lower_limit() == other.lower_limit()
if this_single != other_single:
return False
return self.compare_to(other) == 0
def new_of_same_type(self, lower, is_lower_closed, upper, is_upper_closed):
return Interval(lower, is_lower_closed, upper, is_upper_closed)
def empty_of_same_type(self):
return self.new_of_same_type(self.lower_limit(), False, self.lower_limit(), False)
def compare_to(self, other):
if not self.upper_limit() == other.upper_limit():
return local_compare_to(self.upper_limit(), other.upper_limit())
if self.includes_lower_limit() and not other.includes_lower_limit():
return -1
if not self.includes_lower_limit() and other.includes_lower_limit():
return 1
return local_compare_to(self.lower_limit(), other.lower_limit())
def is_open(self):
return not self.includes_lower_limit() and not self.includes_upper_limit()
def is_closed(self):
return self.includes_lower_limit() and self.includes_upper_limit()
def is_empty(self):
return self.is_open() and self.upper_limit() == self.lower_limit()
def upper_limit(self):
return self._upper_limit_object.get_value()
def includes_upper_limit(self):
return self._upper_limit_object.is_closed()
def has_upper_limit(self):
return self._upper_limit_object is not None
def lower_limit(self):
return self._lower_limit_object.get_value()
def includes_lower_limit(self):
return self._lower_limit_object.is_closed()
def has_lower_limit(self):
return self._lower_limit_object is not None
def is_single_element(self):
return self.upper_limit() == self.lower_limit() and not self.is_empty()
def is_below(self, value):
if not self.has_upper_limit():
return False
comparison = local_compare_to(self.upper_limit(), value)
return comparison < 0 or (comparison == 0 and not self.includes_upper_limit())
def is_above(self, value):
if not self.has_lower_limit():
return False
comparison = local_compare_to(self.lower_limit(), value)
return comparison > 0 or (comparison == 0 and not self.includes_lower_limit())
def includes(self, value):
return not self.is_below(value) and not self.is_above(value)
def covers(self, other):
lower_comparison = local_compare_to(self.lower_limit(), other.lower_limit())
lower_pass = self.includes(other.lower_limit()) or (lower_comparison == 0 and not other.includes_lower_limit())
upper_comparison = local_compare_to(self.upper_limit(), other.upper_limit())
upper_pass = self.includes(other.upper_limit()) or (upper_comparison == 0 and not other.includes_upper_limit())
return lower_pass and upper_pass
def intersects(self, other):
comparison = local_compare_to(self.greater_of_lower_limits(other), self.lesser_of_upper_limits(other))
if comparison < 0:
return True
if comparison > 0:
return False
return self._greater_of_lower_included_in_intersection(other) and \
self._lesser_of_upper_included_in_intersection(other)
def lesser_of_lower_limits(self, other):
if self.lower_limit() is None:
return None
lower_comparison = local_compare_to(self.lower_limit(), other.lower_limit())
if lower_comparison <= 0:
return self.lower_limit()
return other.lower_limit()
def greater_of_lower_limits(self, other):
if self.lower_limit() is None:
return other.lower_limit()
lower_comparison = local_compare_to(self.lower_limit(), other.lower_limit())
if lower_comparison >= 0:
return self.lower_limit()
return other.lower_limit()
def lesser_of_upper_limits(self, other):
if self.upper_limit() is None:
return other.upperLimit();
upper_comparison = local_compare_to(self.upper_limit(), other.upper_limit())
if upper_comparison <= 0:
return self.upper_limit()
return other.upper_limit()
def greater_of_upper_limits(self, other):
if self.upper_limit()is None:
return None
upper_comparison = local_compare_to(self.upper_limit(), other.upper_limit())
if upper_comparison >= 0:
return self.upper_limit()
return other.upper_limit()
def _greater_of_lower_included_in_intersection(self, other):
limit = self.greater_of_lower_limits(other)
return self.includes(limit) and other.includes(limit)
def _lesser_of_upper_included_in_intersection(self, other):
limit = self.lesser_of_upper_limits(other)
return self.includes(limit) and other.includes(limit)
def _greater_of_lower_included_in_union(self, other):
limit = self.greater_of_lower_limits(other)
return self.includes(limit) or other.includes(limit)
def _lesser_of_upper_included_in_union(self, other):
limit = self.lesser_of_upper_limits(other)
return self.includes(limit) or other.includes(limit)
def intersect(self, other):
intersect_lower_bound = self.greater_of_lower_limits(other)
intersect_upper_bound = self.lesser_of_upper_limits(other)
if local_compare_to(intersect_lower_bound, intersect_upper_bound) > 0:
return self.empty_of_same_type()
return self.new_of_same_type(intersect_lower_bound, self._greater_of_lower_included_in_intersection(other),
intersect_upper_bound, self._lesser_of_upper_included_in_intersection(other))
def gap(self, other):
if self.intersects(other):
return self.empty_of_same_type()
return self.new_of_same_type(self.lesser_of_upper_limits(other),
not self._lesser_of_upper_included_in_union(other),
self.greater_of_lower_limits(other),
not self._greater_of_lower_included_in_union(other))
# see: http://en.wikipedia.org/wiki/Set_theoretic_complement
def complement_relative_to(self, other):
interval_sequence = []
if not self.intersects(other):
interval_sequence.append(other)
return interval_sequence
left = self._left_complement_relative_to(other)
if left is not None:
interval_sequence.append(left)
right = self._right_complement_relative_to(other)
if right is not None:
interval_sequence.append(right)
return interval_sequence
def _left_complement_relative_to(self, other):
if self.includes(self.lesser_of_lower_limits(other)):
return None
if self.lower_limit() == other.lower_limit() and not other.includes_lower_limit():
return None
return self.new_of_same_type(other.lower_limit(), other.includes_lower_limit(), self.lower_limit(), not self.includes_lower_limit())
def _right_complement_relative_to(self, other):
if self.includes(self.greater_of_upper_limits(other)):
return None
if self.upper_limit() == other.upper_limit() and not other.includes_upper_limit():
return None
return self.new_of_same_type(self.upper_limit(), not self.includes_upper_limit(), other.upper_limit(), other.includes_upper_limit())
def __str__(self):
return "{}{}..{}{}".format("(" if self._lower_limit_object.is_opened() else "[",
self.lower_limit(),
self.upper_limit(),
")" if self._upper_limit_object.is_opened() else "]",) | 0.84075 | 0.534491 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsxt_policy_ip_block
short_description: Create or Delete a Policy IP Block
description:
Creates or deletes a Policy IP Block.
Required attributes include id and display_name.
version_added: "2.8"
author: <NAME>
extends_documentation_fragment:
- vmware.ansible_for_nsxt.vmware_nsxt
options:
id:
description: The id of the Policy IP Block.
required: false
type: str
description:
description: IP Block description.
type: str
cidr:
description:
- A contiguous IP address space represented by network address
and prefix length
- Represents a network address and the prefix length which will
be associated with a layer-2 broadcast domain. Support only IPv4
CIDR.
required: true
type: str
'''
EXAMPLES = '''
- name: create IP Block
nsxt_policy_ip_block:
hostname: "10.10.10.10"
nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt
nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key
validate_certs: False
id: test-ip-blk
display_name: test-ip-blk
state: "present"
cidr: "192.168.0.0/16"
'''
RETURN = '''# '''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource
from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import IP_BLOCK_URL
from ansible.module_utils._text import to_native
class NSXTIpBlock(NSXTBaseRealizableResource):
@staticmethod
def get_resource_spec():
ip_block_arg_spec = {}
ip_block_arg_spec.update(
cidr=dict(
required=True,
type='str'
)
)
return ip_block_arg_spec
@staticmethod
def get_resource_base_url(baseline_args=None):
return IP_BLOCK_URL
if __name__ == '__main__':
ip_block = NSXTIpBlock()
ip_block.realize() | plugins/modules/nsxt_policy_ip_block.py |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsxt_policy_ip_block
short_description: Create or Delete a Policy IP Block
description:
Creates or deletes a Policy IP Block.
Required attributes include id and display_name.
version_added: "2.8"
author: <NAME>
extends_documentation_fragment:
- vmware.ansible_for_nsxt.vmware_nsxt
options:
id:
description: The id of the Policy IP Block.
required: false
type: str
description:
description: IP Block description.
type: str
cidr:
description:
- A contiguous IP address space represented by network address
and prefix length
- Represents a network address and the prefix length which will
be associated with a layer-2 broadcast domain. Support only IPv4
CIDR.
required: true
type: str
'''
EXAMPLES = '''
- name: create IP Block
nsxt_policy_ip_block:
hostname: "10.10.10.10"
nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt
nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key
validate_certs: False
id: test-ip-blk
display_name: test-ip-blk
state: "present"
cidr: "192.168.0.0/16"
'''
RETURN = '''# '''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource
from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import IP_BLOCK_URL
from ansible.module_utils._text import to_native
class NSXTIpBlock(NSXTBaseRealizableResource):
@staticmethod
def get_resource_spec():
ip_block_arg_spec = {}
ip_block_arg_spec.update(
cidr=dict(
required=True,
type='str'
)
)
return ip_block_arg_spec
@staticmethod
def get_resource_base_url(baseline_args=None):
return IP_BLOCK_URL
if __name__ == '__main__':
ip_block = NSXTIpBlock()
ip_block.realize() | 0.736211 | 0.123234 |
import pickle
import numpy as np
import theano
import theano.tensor as tt
from theano.tests import unittest_tools as utt
from exoplanet.theano_ops.starry import (
GetCl,
GetClRev,
LimbDark,
RadiusFromOccArea,
)
from exoplanet.theano_ops.driver import SimpleLimbDark
class TestGetCl(utt.InferShapeTester):
def setUp(self):
super(TestGetCl, self).setUp()
self.op_class = GetCl
self.op = GetCl()
def test_basic(self):
x = tt.dvector()
f = theano.function([x], self.op(x))
inp = np.array([-1, 0.3, 0.2, 0.5])
out = f(inp)
utt.assert_allclose(np.array([-0.85, 2.5, -0.425, 0.1]), out)
def test_infer_shape(self):
x = tt.dvector()
self._compile_and_check(
[x], [self.op(x)], [np.asarray(np.random.rand(5))], self.op_class
)
def test_grad(self):
utt.verify_grad(self.op, [np.array([-1, 0.3, 0.2, 0.5])])
class TestGetClRev(utt.InferShapeTester):
def setUp(self):
super(TestGetClRev, self).setUp()
self.op_class = GetClRev
self.op = GetClRev()
def test_basic(self):
x = tt.dvector()
f = theano.function([x], self.op(x))
inp = np.array([-1, 0.3, 0.2, 0.5])
out = f(inp)
utt.assert_allclose(np.array([0, 1.3, 2.05, 3.53]), out)
def test_infer_shape(self):
x = tt.dvector()
self._compile_and_check(
[x], [self.op(x)], [np.asarray(np.random.rand(5))], self.op_class
)
class TestLimbDark(utt.InferShapeTester):
def setUp(self):
super(TestLimbDark, self).setUp()
self.op_class = LimbDark
self.op = LimbDark()
def get_args(self):
c = tt.vector()
b = tt.vector()
r = tt.vector()
los = tt.vector()
f = theano.function([c, b, r, los], self.op(c, b, r, los)[0])
c_val = np.array([-0.85, 2.5, -0.425, 0.1])
b_val = np.linspace(-1.5, 1.5, 100)
r_val = 0.1 + np.zeros_like(b_val)
los_val = np.ones_like(b_val)
return f, [c, b, r, los], [c_val, b_val, r_val, los_val]
def test_basic(self):
f, _, in_args = self.get_args()
out = f(*in_args)
utt.assert_allclose(0.0, out[0])
utt.assert_allclose(0.0, out[-1])
def test_los(self):
f, _, in_args = self.get_args()
in_args[-1] = -np.ones_like(in_args[-1])
out = f(*in_args)
utt.assert_allclose(0.0, out)
def test_infer_shape(self):
f, args, arg_vals = self.get_args()
self._compile_and_check(args, self.op(*args), arg_vals, self.op_class)
def test_grad(self):
_, _, in_args = self.get_args()
func = lambda *args: self.op(*args)[0] # NOQA
utt.verify_grad(func, in_args)
class TestRadiusFromOccArea(utt.InferShapeTester):
def setUp(self):
super(TestRadiusFromOccArea, self).setUp()
self.op_class = RadiusFromOccArea
self.op = RadiusFromOccArea()
def get_args(self):
delta = tt.vector()
b = tt.vector()
f = theano.function([delta, b], self.op(delta, b))
delta_val = np.linspace(0.01, 0.99, 100)
b_val = np.linspace(0.01, 1.5, len(delta_val))
return f, [delta, b], [delta_val, b_val]
def test_basic(self):
f, t_args, v_args = self.get_args()
r = f(*v_args)
expect = -LimbDark()(
np.array([1.0 / np.pi, 0.0]), v_args[1], r, np.ones_like(r)
)[0].eval()
utt.assert_allclose(expect, v_args[0])
def test_infer_shape(self):
f, args, arg_vals = self.get_args()
self._compile_and_check(
args, [self.op(*args)], arg_vals, self.op_class
)
def test_grad(self):
func, t_args, v_args = self.get_args()
g = theano.function(
t_args, theano.grad(tt.sum(self.op(*t_args)), t_args)
)(*v_args)
eps = 1.234e-6
for n in range(len(t_args)):
v_args[n] += eps
plus = func(*v_args)
v_args[n] -= 2 * eps
minus = func(*v_args)
v_args[n] += eps
est = 0.5 * (plus - minus) / eps
utt.assert_allclose(est, g[n], atol=2 * eps)
def test_simple():
np.random.seed(4502934)
r = np.random.uniform(0, 1.0, 50)
b = np.random.uniform(-1.2, 1.2, len(r))
u = np.array([0.5, 0.3])
cl = GetCl()(tt.as_tensor_variable(u)).eval()
cl /= np.pi * (cl[0] + 2 * cl[1] / 3)
f0 = LimbDark()(cl, b, r, np.ones_like(b))[0].eval()
ld = SimpleLimbDark()
assert np.allclose(ld.apply(b, r), 0.0)
ld.set_u(u)
assert np.allclose(ld.apply(b, r), f0)
def test_simple_pickle():
np.random.seed(4502934)
r = np.random.uniform(0, 1.0, 50)
b = np.random.uniform(-1.2, 1.2, len(r))
u = np.array([0.5, 0.3])
ld = SimpleLimbDark()
ld.set_u(u)
f0 = ld.apply(b, r)
data = pickle.dumps(ld, -1)
ld2 = pickle.loads(data)
assert np.allclose(ld2.apply(b, r), f0) | tests/theano_ops/starry_test.py |
import pickle
import numpy as np
import theano
import theano.tensor as tt
from theano.tests import unittest_tools as utt
from exoplanet.theano_ops.starry import (
GetCl,
GetClRev,
LimbDark,
RadiusFromOccArea,
)
from exoplanet.theano_ops.driver import SimpleLimbDark
class TestGetCl(utt.InferShapeTester):
def setUp(self):
super(TestGetCl, self).setUp()
self.op_class = GetCl
self.op = GetCl()
def test_basic(self):
x = tt.dvector()
f = theano.function([x], self.op(x))
inp = np.array([-1, 0.3, 0.2, 0.5])
out = f(inp)
utt.assert_allclose(np.array([-0.85, 2.5, -0.425, 0.1]), out)
def test_infer_shape(self):
x = tt.dvector()
self._compile_and_check(
[x], [self.op(x)], [np.asarray(np.random.rand(5))], self.op_class
)
def test_grad(self):
utt.verify_grad(self.op, [np.array([-1, 0.3, 0.2, 0.5])])
class TestGetClRev(utt.InferShapeTester):
def setUp(self):
super(TestGetClRev, self).setUp()
self.op_class = GetClRev
self.op = GetClRev()
def test_basic(self):
x = tt.dvector()
f = theano.function([x], self.op(x))
inp = np.array([-1, 0.3, 0.2, 0.5])
out = f(inp)
utt.assert_allclose(np.array([0, 1.3, 2.05, 3.53]), out)
def test_infer_shape(self):
x = tt.dvector()
self._compile_and_check(
[x], [self.op(x)], [np.asarray(np.random.rand(5))], self.op_class
)
class TestLimbDark(utt.InferShapeTester):
def setUp(self):
super(TestLimbDark, self).setUp()
self.op_class = LimbDark
self.op = LimbDark()
def get_args(self):
c = tt.vector()
b = tt.vector()
r = tt.vector()
los = tt.vector()
f = theano.function([c, b, r, los], self.op(c, b, r, los)[0])
c_val = np.array([-0.85, 2.5, -0.425, 0.1])
b_val = np.linspace(-1.5, 1.5, 100)
r_val = 0.1 + np.zeros_like(b_val)
los_val = np.ones_like(b_val)
return f, [c, b, r, los], [c_val, b_val, r_val, los_val]
def test_basic(self):
f, _, in_args = self.get_args()
out = f(*in_args)
utt.assert_allclose(0.0, out[0])
utt.assert_allclose(0.0, out[-1])
def test_los(self):
f, _, in_args = self.get_args()
in_args[-1] = -np.ones_like(in_args[-1])
out = f(*in_args)
utt.assert_allclose(0.0, out)
def test_infer_shape(self):
f, args, arg_vals = self.get_args()
self._compile_and_check(args, self.op(*args), arg_vals, self.op_class)
def test_grad(self):
_, _, in_args = self.get_args()
func = lambda *args: self.op(*args)[0] # NOQA
utt.verify_grad(func, in_args)
class TestRadiusFromOccArea(utt.InferShapeTester):
def setUp(self):
super(TestRadiusFromOccArea, self).setUp()
self.op_class = RadiusFromOccArea
self.op = RadiusFromOccArea()
def get_args(self):
delta = tt.vector()
b = tt.vector()
f = theano.function([delta, b], self.op(delta, b))
delta_val = np.linspace(0.01, 0.99, 100)
b_val = np.linspace(0.01, 1.5, len(delta_val))
return f, [delta, b], [delta_val, b_val]
def test_basic(self):
f, t_args, v_args = self.get_args()
r = f(*v_args)
expect = -LimbDark()(
np.array([1.0 / np.pi, 0.0]), v_args[1], r, np.ones_like(r)
)[0].eval()
utt.assert_allclose(expect, v_args[0])
def test_infer_shape(self):
f, args, arg_vals = self.get_args()
self._compile_and_check(
args, [self.op(*args)], arg_vals, self.op_class
)
def test_grad(self):
func, t_args, v_args = self.get_args()
g = theano.function(
t_args, theano.grad(tt.sum(self.op(*t_args)), t_args)
)(*v_args)
eps = 1.234e-6
for n in range(len(t_args)):
v_args[n] += eps
plus = func(*v_args)
v_args[n] -= 2 * eps
minus = func(*v_args)
v_args[n] += eps
est = 0.5 * (plus - minus) / eps
utt.assert_allclose(est, g[n], atol=2 * eps)
def test_simple():
np.random.seed(4502934)
r = np.random.uniform(0, 1.0, 50)
b = np.random.uniform(-1.2, 1.2, len(r))
u = np.array([0.5, 0.3])
cl = GetCl()(tt.as_tensor_variable(u)).eval()
cl /= np.pi * (cl[0] + 2 * cl[1] / 3)
f0 = LimbDark()(cl, b, r, np.ones_like(b))[0].eval()
ld = SimpleLimbDark()
assert np.allclose(ld.apply(b, r), 0.0)
ld.set_u(u)
assert np.allclose(ld.apply(b, r), f0)
def test_simple_pickle():
np.random.seed(4502934)
r = np.random.uniform(0, 1.0, 50)
b = np.random.uniform(-1.2, 1.2, len(r))
u = np.array([0.5, 0.3])
ld = SimpleLimbDark()
ld.set_u(u)
f0 = ld.apply(b, r)
data = pickle.dumps(ld, -1)
ld2 = pickle.loads(data)
assert np.allclose(ld2.apply(b, r), f0) | 0.4917 | 0.509886 |
import logging
import os
class SkiaGoldProperties(object):
def __init__(self, args):
"""Abstract class to validate and store properties related to Skia Gold.
Args:
args: The parsed arguments from an argparse.ArgumentParser.
"""
self._git_revision = None
self._issue = None
self._patchset = None
self._job_id = None
self._local_pixel_tests = None
self._no_luci_auth = None
self._bypass_skia_gold_functionality = None
self._code_review_system = None
self._continuous_integration_system = None
self._local_png_directory = None
self._InitializeProperties(args)
def IsTryjobRun(self):
return self.issue is not None
@property
def continuous_integration_system(self):
return self._continuous_integration_system or 'buildbucket'
@property
def code_review_system(self):
return self._code_review_system or 'gerrit'
@property
def git_revision(self):
return self._GetGitRevision()
@property
def issue(self):
return self._issue
@property
def job_id(self):
return self._job_id
@property
def local_pixel_tests(self):
return self._IsLocalRun()
@property
def local_png_directory(self):
return self._local_png_directory
@property
def no_luci_auth(self):
return self._no_luci_auth
@property
def patchset(self):
return self._patchset
@property
def bypass_skia_gold_functionality(self):
return self._bypass_skia_gold_functionality
@staticmethod
def _GetGitOriginMasterHeadSha1():
raise NotImplementedError()
def _GetGitRevision(self):
if not self._git_revision:
# Automated tests should always pass the revision, so assume we're on
# a workstation and try to get the local origin/master HEAD.
if not self._IsLocalRun():
raise RuntimeError(
'--git-revision was not passed when running on a bot')
revision = self._GetGitOriginMasterHeadSha1()
if not revision or len(revision) != 40:
raise RuntimeError(
'--git-revision not passed and unable to determine from git')
self._git_revision = revision
return self._git_revision
def _IsLocalRun(self):
if self._local_pixel_tests is None:
# Look for the presence of the SWARMING_SERVER environment variable as a
# heuristic to determine whether we're running on a workstation or a bot.
# This should always be set on swarming, but would be strange to be set on
# a workstation.
self._local_pixel_tests = 'SWARMING_SERVER' not in os.environ
if self._local_pixel_tests:
logging.warning(
'Automatically determined that test is running on a workstation')
else:
logging.warning(
'Automatically determined that test is running on a bot')
return self._local_pixel_tests
def _InitializeProperties(self, args):
if hasattr(args, 'local_pixel_tests'):
# If not set, will be automatically determined later if needed.
self._local_pixel_tests = args.local_pixel_tests
if hasattr(args, 'skia_gold_local_png_write_directory'):
self._local_png_directory = args.skia_gold_local_png_write_directory
if hasattr(args, 'no_luci_auth'):
self._no_luci_auth = args.no_luci_auth
if hasattr(args, 'bypass_skia_gold_functionality'):
self._bypass_skia_gold_functionality = args.bypass_skia_gold_functionality
if hasattr(args, 'code_review_system'):
self._code_review_system = args.code_review_system
if hasattr(args, 'continuous_integration_system'):
self._continuous_integration_system = args.continuous_integration_system
# Will be automatically determined later if needed.
if not hasattr(args, 'git_revision') or not args.git_revision:
return
self._git_revision = args.git_revision
# Only expected on tryjob runs.
if not hasattr(args, 'gerrit_issue') or not args.gerrit_issue:
return
self._issue = args.gerrit_issue
if not hasattr(args, 'gerrit_patchset') or not args.gerrit_patchset:
raise RuntimeError(
'--gerrit-issue passed, but --gerrit-patchset not passed.')
self._patchset = args.gerrit_patchset
if not hasattr(args, 'buildbucket_id') or not args.buildbucket_id:
raise RuntimeError(
'--gerrit-issue passed, but --buildbucket-id not passed.')
self._job_id = args.buildbucket_id | build/skia_gold_common/skia_gold_properties.py | import logging
import os
class SkiaGoldProperties(object):
def __init__(self, args):
"""Abstract class to validate and store properties related to Skia Gold.
Args:
args: The parsed arguments from an argparse.ArgumentParser.
"""
self._git_revision = None
self._issue = None
self._patchset = None
self._job_id = None
self._local_pixel_tests = None
self._no_luci_auth = None
self._bypass_skia_gold_functionality = None
self._code_review_system = None
self._continuous_integration_system = None
self._local_png_directory = None
self._InitializeProperties(args)
def IsTryjobRun(self):
return self.issue is not None
@property
def continuous_integration_system(self):
return self._continuous_integration_system or 'buildbucket'
@property
def code_review_system(self):
return self._code_review_system or 'gerrit'
@property
def git_revision(self):
return self._GetGitRevision()
@property
def issue(self):
return self._issue
@property
def job_id(self):
return self._job_id
@property
def local_pixel_tests(self):
return self._IsLocalRun()
@property
def local_png_directory(self):
return self._local_png_directory
@property
def no_luci_auth(self):
return self._no_luci_auth
@property
def patchset(self):
return self._patchset
@property
def bypass_skia_gold_functionality(self):
return self._bypass_skia_gold_functionality
@staticmethod
def _GetGitOriginMasterHeadSha1():
raise NotImplementedError()
def _GetGitRevision(self):
if not self._git_revision:
# Automated tests should always pass the revision, so assume we're on
# a workstation and try to get the local origin/master HEAD.
if not self._IsLocalRun():
raise RuntimeError(
'--git-revision was not passed when running on a bot')
revision = self._GetGitOriginMasterHeadSha1()
if not revision or len(revision) != 40:
raise RuntimeError(
'--git-revision not passed and unable to determine from git')
self._git_revision = revision
return self._git_revision
def _IsLocalRun(self):
if self._local_pixel_tests is None:
# Look for the presence of the SWARMING_SERVER environment variable as a
# heuristic to determine whether we're running on a workstation or a bot.
# This should always be set on swarming, but would be strange to be set on
# a workstation.
self._local_pixel_tests = 'SWARMING_SERVER' not in os.environ
if self._local_pixel_tests:
logging.warning(
'Automatically determined that test is running on a workstation')
else:
logging.warning(
'Automatically determined that test is running on a bot')
return self._local_pixel_tests
def _InitializeProperties(self, args):
if hasattr(args, 'local_pixel_tests'):
# If not set, will be automatically determined later if needed.
self._local_pixel_tests = args.local_pixel_tests
if hasattr(args, 'skia_gold_local_png_write_directory'):
self._local_png_directory = args.skia_gold_local_png_write_directory
if hasattr(args, 'no_luci_auth'):
self._no_luci_auth = args.no_luci_auth
if hasattr(args, 'bypass_skia_gold_functionality'):
self._bypass_skia_gold_functionality = args.bypass_skia_gold_functionality
if hasattr(args, 'code_review_system'):
self._code_review_system = args.code_review_system
if hasattr(args, 'continuous_integration_system'):
self._continuous_integration_system = args.continuous_integration_system
# Will be automatically determined later if needed.
if not hasattr(args, 'git_revision') or not args.git_revision:
return
self._git_revision = args.git_revision
# Only expected on tryjob runs.
if not hasattr(args, 'gerrit_issue') or not args.gerrit_issue:
return
self._issue = args.gerrit_issue
if not hasattr(args, 'gerrit_patchset') or not args.gerrit_patchset:
raise RuntimeError(
'--gerrit-issue passed, but --gerrit-patchset not passed.')
self._patchset = args.gerrit_patchset
if not hasattr(args, 'buildbucket_id') or not args.buildbucket_id:
raise RuntimeError(
'--gerrit-issue passed, but --buildbucket-id not passed.')
self._job_id = args.buildbucket_id | 0.670824 | 0.118131 |
import pandas as pd
df = pd.read_csv(r'C:\Users\Ishant\Desktop\BE Project\Combined_99Acres_V4.csv') # reading the csv file to be cleaned
df2 = df.copy() # create a copy of csv file
row = 1 # initialise row counter for reading each tuple of csv
new_column = df['Location'] + str(1)
# we then add the series to the dataframe, which holds our parsed CSV file
df['Bathrooms'] = new_column
new_column = df['Bathrooms'] + str(1)
# we then add the series to the dataframe, which holds our parsed CSV file
df['Bedrooms'] = new_column
new_column = df['Bedrooms'] + str(1)
# we then add the series to the dataframe, which holds our parsed CSV file
df['Balconies'] = new_column
counter = 1
for index in range(1, 32794):
try:
str1 = str(df2.at[row, 'Configuration']).replace('No', str(0)) # replace no with zero
s = map(int, filter(str.isdigit, str1)) # map the numbers from str1 and save the number in var s
for i in s: # storing the no of bathroom, bedroom, balconies into resp columns
if counter == 1:
df2.at[row, 'Bathrooms'] = i
counter = counter + 1
print(i)
elif counter == 2:
df2.at[row, 'Bedrooms'] = i
counter = counter + 1
print(i)
elif counter == 3:
df2.at[row, 'Balconies'] = i
counter = 1
print(i)
print(s)
df2.at[row, 'Floor Number'] = str(df2.at[row, 'Floor Number']).split('of', 1)[0] # cleaning floor no column
str3 = str(df2.at[row, 'Floor Number'])
df2.at[row, 'Floor Number'] = map(int, filter(str.isdigit, str3))
if str(df2.at[row, 'Parking']).find(','):
data = str(df2.at[row, 'Parking']).split(',')
df2.at[row, 'Parking'] = str(float(data[0])+float(data[2]))
if str(df2.at[row, 'Parking']).find(' None '):
df2.at[row, 'Parking'] = ''
try:
if str(df2.at[row, 'Price']).find('Cr'):
k = str(df2.at[row, 'Price']).replace('Cr', '')
df2.at[row, 'Price'] = k.replace(k, str(float(k)*100))
except Exception as e:
print(e)
try:
if str(df2.at[row, 'Price per sq.Ft']).find(','):
df2.at[row, 'Price per sq.Ft'] = str(df2.at[row, 'Price per sq.Ft']).replace(',', '')
except Exception as e:
print('')
row = row + 1
except Exception as e:
continue
df2.to_csv(r'C:\Users\Ishant\Desktop\Combined_99Acres_V5.csv') # saving the data and formatting of df2 to the specified csv
print(df2.head()) # print first 5 rows and columns of df2 | Data_Cleaning.py | import pandas as pd
df = pd.read_csv(r'C:\Users\Ishant\Desktop\BE Project\Combined_99Acres_V4.csv') # reading the csv file to be cleaned
df2 = df.copy() # create a copy of csv file
row = 1 # initialise row counter for reading each tuple of csv
new_column = df['Location'] + str(1)
# we then add the series to the dataframe, which holds our parsed CSV file
df['Bathrooms'] = new_column
new_column = df['Bathrooms'] + str(1)
# we then add the series to the dataframe, which holds our parsed CSV file
df['Bedrooms'] = new_column
new_column = df['Bedrooms'] + str(1)
# we then add the series to the dataframe, which holds our parsed CSV file
df['Balconies'] = new_column
counter = 1
for index in range(1, 32794):
try:
str1 = str(df2.at[row, 'Configuration']).replace('No', str(0)) # replace no with zero
s = map(int, filter(str.isdigit, str1)) # map the numbers from str1 and save the number in var s
for i in s: # storing the no of bathroom, bedroom, balconies into resp columns
if counter == 1:
df2.at[row, 'Bathrooms'] = i
counter = counter + 1
print(i)
elif counter == 2:
df2.at[row, 'Bedrooms'] = i
counter = counter + 1
print(i)
elif counter == 3:
df2.at[row, 'Balconies'] = i
counter = 1
print(i)
print(s)
df2.at[row, 'Floor Number'] = str(df2.at[row, 'Floor Number']).split('of', 1)[0] # cleaning floor no column
str3 = str(df2.at[row, 'Floor Number'])
df2.at[row, 'Floor Number'] = map(int, filter(str.isdigit, str3))
if str(df2.at[row, 'Parking']).find(','):
data = str(df2.at[row, 'Parking']).split(',')
df2.at[row, 'Parking'] = str(float(data[0])+float(data[2]))
if str(df2.at[row, 'Parking']).find(' None '):
df2.at[row, 'Parking'] = ''
try:
if str(df2.at[row, 'Price']).find('Cr'):
k = str(df2.at[row, 'Price']).replace('Cr', '')
df2.at[row, 'Price'] = k.replace(k, str(float(k)*100))
except Exception as e:
print(e)
try:
if str(df2.at[row, 'Price per sq.Ft']).find(','):
df2.at[row, 'Price per sq.Ft'] = str(df2.at[row, 'Price per sq.Ft']).replace(',', '')
except Exception as e:
print('')
row = row + 1
except Exception as e:
continue
df2.to_csv(r'C:\Users\Ishant\Desktop\Combined_99Acres_V5.csv') # saving the data and formatting of df2 to the specified csv
print(df2.head()) # print first 5 rows and columns of df2 | 0.113383 | 0.402803 |
import sys
from typing import Any
from PySide2.QtCore import QRectF, QRect
from PySide2.QtWidgets import QMessageBox, QLabel
from PySide2.QtGui import Qt, QColor, QPainter, QPen, QPaintEvent, QFontMetrics, QBrush
__author__ = "<NAME>"
QDIALOG_TYPES = (QMessageBox)
WINDOW_BUTTONS_RIGHT: str = 'window_buttons_right'
WIDTH_PADDING_PX = 3 # add 2 pixel padding to width of titlebar text
DEBUG = False
class WindowTitleLabel(QLabel):
"""Implements a label for window titles"""
def __init__(self,
text: str,
height: int,
color: QColor,
parent: Any,
window_buttons_position: str,
button_bar_width: int,
minimum_width: int,
margin: int) -> None:
"""
Constructor
test: titlebar text
height: titlebar height in pixels
color: titlebar text color
parent: parent widget
window_buttons_position: WINDOW_BUTTONS_RIGHT or WINDOW_BUTTONS_RIGHT
button_bar_width: combined width of all buttons in pixels
minimum_width:
right_margin:
"""
super().__init__(parent)
self.__original_text: str = text
self.__ofs_y: float = height / 2.0
self.__font_pen: QPen = QPen(color)
self.__br_height: int = 0
self.__window_buttons_position: str = window_buttons_position
self.__margin: int = margin
self.__button_bar_width: int = button_bar_width + margin
self.setText(text)
self.setAttribute(Qt.WA_TransparentForMouseEvents)
self.setMinimumWidth(minimum_width)
def paintEvent(self, _: QPaintEvent) -> None:
"""Qt Paint Event: ensures the title-bar text is elided properly and stays centered"""
painter = QPainter()
painter.begin(self)
painter.setPen(self.__font_pen)
# bold font for macOS window titles
font = self.font()
if sys.platform in ["darwin", "linux"]:
font.setBold(True)
font_metrics = QFontMetrics(font)
# calculate text properties
text_width: int = self.width() - self.__button_bar_width - self.__margin
text: str = font_metrics.elidedText(self.__original_text, Qt.ElideRight, text_width)
# calculate height
br: QRect = font_metrics.boundingRect(text)
if br.height() > 0:
self.__br_height = br.height()
py = self.__ofs_y - self.__br_height / 2.0
br_width = br.width()
# calculate width
px = (self.width() - br_width - WIDTH_PADDING_PX) / 2.0
if px < self.__button_bar_width:
if self.__window_buttons_position == WINDOW_BUTTONS_RIGHT:
if text != self.__original_text:
px = self.__margin
else:
px = px - (self.__button_bar_width - px)
else:
px = self.__button_bar_width
# draw title
rect = QRectF(px, py, br_width + WIDTH_PADDING_PX, self.__br_height)
painter.setFont(font)
if DEBUG:
painter.setBrush(QBrush(QColor('#ff0000')))
painter.drawRect(rect)
painter.drawText(rect, Qt.AlignLeft, text)
def setWindowTitle(self, title: str) -> None:
"""Sets the Window title string"""
self.__original_text = title
self.update() | src/qtmodernredux/windowstyle/windowtitlelabel.py | import sys
from typing import Any
from PySide2.QtCore import QRectF, QRect
from PySide2.QtWidgets import QMessageBox, QLabel
from PySide2.QtGui import Qt, QColor, QPainter, QPen, QPaintEvent, QFontMetrics, QBrush
__author__ = "<NAME>"
QDIALOG_TYPES = (QMessageBox)
WINDOW_BUTTONS_RIGHT: str = 'window_buttons_right'
WIDTH_PADDING_PX = 3 # add 2 pixel padding to width of titlebar text
DEBUG = False
class WindowTitleLabel(QLabel):
"""Implements a label for window titles"""
def __init__(self,
text: str,
height: int,
color: QColor,
parent: Any,
window_buttons_position: str,
button_bar_width: int,
minimum_width: int,
margin: int) -> None:
"""
Constructor
test: titlebar text
height: titlebar height in pixels
color: titlebar text color
parent: parent widget
window_buttons_position: WINDOW_BUTTONS_RIGHT or WINDOW_BUTTONS_RIGHT
button_bar_width: combined width of all buttons in pixels
minimum_width:
right_margin:
"""
super().__init__(parent)
self.__original_text: str = text
self.__ofs_y: float = height / 2.0
self.__font_pen: QPen = QPen(color)
self.__br_height: int = 0
self.__window_buttons_position: str = window_buttons_position
self.__margin: int = margin
self.__button_bar_width: int = button_bar_width + margin
self.setText(text)
self.setAttribute(Qt.WA_TransparentForMouseEvents)
self.setMinimumWidth(minimum_width)
def paintEvent(self, _: QPaintEvent) -> None:
"""Qt Paint Event: ensures the title-bar text is elided properly and stays centered"""
painter = QPainter()
painter.begin(self)
painter.setPen(self.__font_pen)
# bold font for macOS window titles
font = self.font()
if sys.platform in ["darwin", "linux"]:
font.setBold(True)
font_metrics = QFontMetrics(font)
# calculate text properties
text_width: int = self.width() - self.__button_bar_width - self.__margin
text: str = font_metrics.elidedText(self.__original_text, Qt.ElideRight, text_width)
# calculate height
br: QRect = font_metrics.boundingRect(text)
if br.height() > 0:
self.__br_height = br.height()
py = self.__ofs_y - self.__br_height / 2.0
br_width = br.width()
# calculate width
px = (self.width() - br_width - WIDTH_PADDING_PX) / 2.0
if px < self.__button_bar_width:
if self.__window_buttons_position == WINDOW_BUTTONS_RIGHT:
if text != self.__original_text:
px = self.__margin
else:
px = px - (self.__button_bar_width - px)
else:
px = self.__button_bar_width
# draw title
rect = QRectF(px, py, br_width + WIDTH_PADDING_PX, self.__br_height)
painter.setFont(font)
if DEBUG:
painter.setBrush(QBrush(QColor('#ff0000')))
painter.drawRect(rect)
painter.drawText(rect, Qt.AlignLeft, text)
def setWindowTitle(self, title: str) -> None:
"""Sets the Window title string"""
self.__original_text = title
self.update() | 0.584508 | 0.164148 |
import inspect
import re
from collections import Counter
from typing import Any, Callable, Dict, List, Optional, Tuple
from urllib.parse import urljoin
from squall import convertors, params
PARAM_REGEX = re.compile("{([a-zA-Z_][a-zA-Z0-9_]*)(:[a-zA-Z_][a-zA-Z0-9_]*)?}")
class Path:
__slots__ = ["path", "handler"]
def __init__(self, path: str, handler: Callable[..., Any]) -> None:
self.path = path
self.handler = handler
def strip_trailing_slash(self) -> None:
"""Strip trailing slash if path differ form '/'
>>> p = Path("/my/route/", lambda: None)
>>> assert p.path == "/my/route/"
>>> p.strip_trailing_slash()
>>> assert p.path == "/my/route"
"""
if self.path != "/":
self.path = self.path.rstrip("/")
def append_left(self, prefix: str) -> None:
"""Prepend provided prefix to the path
:param prefix: Prefix path to add
>>> p = Path("/my/route", lambda: None)
>>> assert p.path == "/my/route"
>>> p.append_left("/api/v1")
>>> assert p.path == "/api/v1/my/route"
"""
if not prefix.endswith("/"):
prefix += "/"
self.path = urljoin(prefix, self.path.strip("/"))
@property
def path_params(self) -> List[Tuple[str, Optional[str]]]:
"""Returns dynamic path parameters and their path-declared convertor aliases
>>> p = Path("/user/{user_id:int}/notes/{note:uuid}/type/{type}", lambda: None)
>>> assert p.path_params == [
>>> ("user_id", "int"),
>>> ("note", "uuid"),
>>> ("type", None)
>>> ]
"""
result, names = [], []
for param in PARAM_REGEX.finditer(self.path):
name, suffix = param.groups("")
names.append(name)
convertor = suffix.lstrip(":") if suffix else None
result.append((name, convertor))
if duplicates := [i for i, cnt in Counter(names).items() if cnt > 1]:
raise ValueError(
f'Path "{self.path}" contains '
f"duplicate params: {', '.join(duplicates)}"
)
return result
@property
def schema_path(self) -> str:
"""Returns simplified path without convertor aliases
>>> p = Path("/user/{user_id:int}/notes/{note:uuid}/type/{type}", lambda: None)
>>> assert p.schema_path == "/user/{user_id}/notes/{note}/type/{type}"
"""
result = self.path
for match in PARAM_REGEX.finditer(self.path):
param_name, convertor_type = match.groups("")
result = result.replace(
"{" f"{param_name}{convertor_type}" "}",
"{" f"{param_name}" "}",
)
return result
@property
def router_path(self) -> str:
"""Returns path with detailed convertors aliases information.
Also uses handler annotations for lookup.
Used for exact pattern registration in squall-router library
>>> async def my_handler(user_id: int, note): pass
>>>
>>> p = Path("/user/{user_id}/notes/{note:uuid}", my_handler)
>>> assert p.router_path == "/user/{user_id:int}/notes/{note:uuid}"
"""
result = self.schema_path
from_handler = self.get_path_params_from_handler()
for param_name, convertor_name in self.path_params:
if convertor_name := convertor_name or from_handler.get(param_name):
result = result.replace(
"{" f"{param_name}" "}",
"{" f"{param_name}:{convertor_name}" "}",
)
return result
def get_path_params_from_handler(self) -> Dict[str, Optional[str]]:
"""Returns handler parameters affiliated with path.
>>> from uuid import UUID
>>>
>>> async def my_handler(user_id: int, note: UUID): pass
>>>
>>> p = Path("/user/{user_id}/notes/{note}", my_handler)
>>> assert p.get_path_params_from_handler() == {
>>> "user_id": "int",
>>> "note": "uuid"
>>> }
"""
results = {}
path_params = dict(self.path_params)
for k, v in inspect.signature(self.handler).parameters.items():
name = k
if isinstance(v.default, params.Path):
if alias := v.default.alias:
name = alias
elif v.default is v.empty:
name = k
if name not in path_params:
continue
validate = path_params[name]
if v.annotation != v.empty:
if convertor := convertors.database.get_by_type(v.annotation):
if validate is None:
validate = convertor.alias
elif convertor.alias != path_params[name]:
raise ValueError(
f"Parameter {name} have different annotation and convertor types: "
f"{convertor.alias} != {path_params[name]}"
)
else:
raise ValueError(
f"Parameter `{name}` have unknown convertor type: {v.annotation}"
)
results[name] = validate
return results | squall/routing/path.py | import inspect
import re
from collections import Counter
from typing import Any, Callable, Dict, List, Optional, Tuple
from urllib.parse import urljoin
from squall import convertors, params
PARAM_REGEX = re.compile("{([a-zA-Z_][a-zA-Z0-9_]*)(:[a-zA-Z_][a-zA-Z0-9_]*)?}")
class Path:
__slots__ = ["path", "handler"]
def __init__(self, path: str, handler: Callable[..., Any]) -> None:
self.path = path
self.handler = handler
def strip_trailing_slash(self) -> None:
"""Strip trailing slash if path differ form '/'
>>> p = Path("/my/route/", lambda: None)
>>> assert p.path == "/my/route/"
>>> p.strip_trailing_slash()
>>> assert p.path == "/my/route"
"""
if self.path != "/":
self.path = self.path.rstrip("/")
def append_left(self, prefix: str) -> None:
"""Prepend provided prefix to the path
:param prefix: Prefix path to add
>>> p = Path("/my/route", lambda: None)
>>> assert p.path == "/my/route"
>>> p.append_left("/api/v1")
>>> assert p.path == "/api/v1/my/route"
"""
if not prefix.endswith("/"):
prefix += "/"
self.path = urljoin(prefix, self.path.strip("/"))
@property
def path_params(self) -> List[Tuple[str, Optional[str]]]:
"""Returns dynamic path parameters and their path-declared convertor aliases
>>> p = Path("/user/{user_id:int}/notes/{note:uuid}/type/{type}", lambda: None)
>>> assert p.path_params == [
>>> ("user_id", "int"),
>>> ("note", "uuid"),
>>> ("type", None)
>>> ]
"""
result, names = [], []
for param in PARAM_REGEX.finditer(self.path):
name, suffix = param.groups("")
names.append(name)
convertor = suffix.lstrip(":") if suffix else None
result.append((name, convertor))
if duplicates := [i for i, cnt in Counter(names).items() if cnt > 1]:
raise ValueError(
f'Path "{self.path}" contains '
f"duplicate params: {', '.join(duplicates)}"
)
return result
@property
def schema_path(self) -> str:
"""Returns simplified path without convertor aliases
>>> p = Path("/user/{user_id:int}/notes/{note:uuid}/type/{type}", lambda: None)
>>> assert p.schema_path == "/user/{user_id}/notes/{note}/type/{type}"
"""
result = self.path
for match in PARAM_REGEX.finditer(self.path):
param_name, convertor_type = match.groups("")
result = result.replace(
"{" f"{param_name}{convertor_type}" "}",
"{" f"{param_name}" "}",
)
return result
@property
def router_path(self) -> str:
"""Returns path with detailed convertors aliases information.
Also uses handler annotations for lookup.
Used for exact pattern registration in squall-router library
>>> async def my_handler(user_id: int, note): pass
>>>
>>> p = Path("/user/{user_id}/notes/{note:uuid}", my_handler)
>>> assert p.router_path == "/user/{user_id:int}/notes/{note:uuid}"
"""
result = self.schema_path
from_handler = self.get_path_params_from_handler()
for param_name, convertor_name in self.path_params:
if convertor_name := convertor_name or from_handler.get(param_name):
result = result.replace(
"{" f"{param_name}" "}",
"{" f"{param_name}:{convertor_name}" "}",
)
return result
def get_path_params_from_handler(self) -> Dict[str, Optional[str]]:
"""Returns handler parameters affiliated with path.
>>> from uuid import UUID
>>>
>>> async def my_handler(user_id: int, note: UUID): pass
>>>
>>> p = Path("/user/{user_id}/notes/{note}", my_handler)
>>> assert p.get_path_params_from_handler() == {
>>> "user_id": "int",
>>> "note": "uuid"
>>> }
"""
results = {}
path_params = dict(self.path_params)
for k, v in inspect.signature(self.handler).parameters.items():
name = k
if isinstance(v.default, params.Path):
if alias := v.default.alias:
name = alias
elif v.default is v.empty:
name = k
if name not in path_params:
continue
validate = path_params[name]
if v.annotation != v.empty:
if convertor := convertors.database.get_by_type(v.annotation):
if validate is None:
validate = convertor.alias
elif convertor.alias != path_params[name]:
raise ValueError(
f"Parameter {name} have different annotation and convertor types: "
f"{convertor.alias} != {path_params[name]}"
)
else:
raise ValueError(
f"Parameter `{name}` have unknown convertor type: {v.annotation}"
)
results[name] = validate
return results | 0.86164 | 0.187579 |
import os
import numpy as np
import argparse
import random
import tensorflow as tf
from data_generators.data_generator import DataGenerator
from models.maml import MAML
parser = argparse.ArgumentParser()
parser.add_argument('data_path', metavar='DATA',
help='path to data')
parser.add_argument('ckpt_name', metavar='CKPT',
help='path to checkpoint')
parser.add_argument('-ds', '--data_source', default='imagenet', type=str,
help='data_source (imagenet or omniglot)')
parser.add_argument('-t', '--test', action='store_true', default=False,
help='set for test, otherwise train')
parser.add_argument('--multi', action='store_true', default=False,
help='set for multi-class problems, otherwise binary classification')
parser.add_argument('-l', '--train_lr', default=1e-4, type=float,
help='train_lr (default=1e-4)')
parser.add_argument('-p', '--pkl_file', default='filelist', type=str,
help='path to pickle file')
parser.add_argument('-cf', '--cluster_folder', default=None, type=str,
help='cluster folder w/o root (default=None)')
parser.add_argument('--kshot', default=1, type=int,
help='# of shots per class (default=1)')
parser.add_argument('--kquery', default=1, type=int,
help='# of queries per class (default=1)')
parser.add_argument('--nway', default=51, type=int,
help='# of classes per problem (default=51)')
parser.add_argument('--metabatch', default=4, type=int,
help='meta batch-size for training (default=4)')
parser.add_argument('--steps', default=5, type=int,
help='# of gradient steps (default=5)')
parser.add_argument('--iter', default=40000, type=int,
help='# of training iterations (default=40,000)')
parser.add_argument('--train_problems', default=100000, type=int,
help='# of training problems (default=100,000)')
parser.add_argument('--test_problems', default=600, type=int,
help='# of test problems (default=600)')
parser.add_argument('-c', '--cuda_id', default="0", type=str,
help='cuda ID (default="0")')
args = parser.parse_args()
data_path = args.data_path
data_source = args.data_source
ckpt_name = args.ckpt_name
cluster_folder = args.cluster_folder
train_lr = args.train_lr
pkl_file = args.pkl_file
kshot = args.kshot
kquery = args.kquery
nway = args.nway
meta_batchsz = args.metabatch
steps = args.steps
n_iterations = args.iter
train_problems = args.train_problems
test_problems = args.test_problems
cuda_id = args.cuda_id
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_id
def train(model, saver, sess):
"""
:param model:
:param saver:
:param sess:
:return:
"""
# write graph to tensorboard
# tb = tf.summary.FileWriter(os.path.join('logs', 'mini'), sess.graph)
prelosses, postlosses, supportaccs, preaccs, postaccs = [], [], [], [], []
best_acc = 0
# train for meta_iteartion epoches
for iteration in range(n_iterations):
# this is the main op
ops = [model.meta_op]
# add summary and print op
if iteration % 200 == 0:
ops.extend([model.summ_op,
model.query_losses[0], model.query_losses[-1],
model.query_accs[0], model.query_accs[-1]
])
# run all ops
result = sess.run(ops)
# summary
if iteration % 200 == 0:
# summ_op
# tb.add_summary(result[1], iteration)
# query_losses[0]
prelosses.append(result[2])
# query_losses[-1]
postlosses.append(result[3])
# query_accs[0]
preaccs.append(result[4])
# query_accs[-1]
postaccs.append(result[5])
# support_acc
print(iteration, '\tloss:', np.mean(prelosses), '=>', np.mean(postlosses),
'\t\tacc:', np.mean(preaccs), '=>', np.mean(postaccs))
prelosses, postlosses, preaccs, postaccs, supportaccs = [], [], [], [], []
# evaluation
if iteration % 2000 == 0:
# DO NOT write as a = b = [], in that case a=b
# DO NOT use train variable as we have train func already.
acc0s, acc1s, acc2s = [], [], []
# sample 20 times to get more accurate statistics.
for _ in range(20):
acc1, acc2 = sess.run([
model.test_query_accs[0],
model.test_query_accs[-1]])
acc1s.append(acc1)
acc2s.append(acc2)
acc = np.mean(acc2s)
print('>>>>\t\tValidation accs::\t ', np.mean(acc1s), acc, 'best:', best_acc, '\t\t<<<<')
if acc - best_acc > 0.0:
saver.save(sess, os.path.join(ckpt_name, 'maml.mdl'))
best_acc = acc
print('saved into ckpt:', acc)
def test(model, sess):
np.random.seed(1)
random.seed(1)
# repeat test accuracy for 600 times
test_accs = []
for i in range(test_problems):
if i % 100 == 1:
print(i)
# extend return None!!!
ops = [model.query_preds_probs, model.test_support_acc]
ops.extend(model.test_query_accs)
result = sess.run(ops)
test_accs.append(result[1:])
# [600, steps+1]
test_accs = np.array(test_accs)
# [steps+1]
means = np.mean(test_accs, 0)
stds = np.std(test_accs, 0)
ci95 = 1.96 * stds * 100 / np.sqrt(test_problems)
print('[support_t0, query_t0 - \t\t\tsteps] ')
print('mean:', means)
print('stds:', stds)
print('ci95:', ci95)
def main():
training = not args.test
multiclass = args.multi
# kshot + kquery images per category, nway categories, meta_batchsz tasks.
db = DataGenerator(data_source, nway, kshot, kquery, meta_batchsz,
pkl_file, data_path, cluster_folder, multiclass, train_problems, test_problems)
if training: # only construct training model if needed
# get the tensors
image_tensor, label_tensor = db.make_data_tensor(training=True)
support_x = tf.slice(image_tensor, [0, 0, 0], [-1, nway, -1], name='support_x')
query_x = tf.slice(image_tensor, [0, nway, 0], [-1, -1, -1], name='query_x')
support_y = tf.slice(label_tensor, [0, 0, 0], [-1, nway, -1], name='support_y')
query_y = tf.slice(label_tensor, [0, nway, 0], [-1, -1, -1], name='query_y')
# construct test tensors
image_tensor, label_tensor = db.make_data_tensor(training=False)
support_x_test = tf.slice(image_tensor, [0, 0, 0], [-1, nway, -1], name='support_x_test')
query_x_test = tf.slice(image_tensor, [0, nway, 0], [-1, -1, -1], name='query_x_test')
support_y_test = tf.slice(label_tensor, [0, 0, 0], [-1, nway, -1], name='support_y_test')
query_y_test = tf.slice(label_tensor, [0, nway, 0], [-1, -1, -1], name='query_y_test')
# 1. construct MAML model
model = MAML(data_source, 2, kshot, kquery, train_lr=train_lr)
# construct metatrain_ and metaval_
if training:
model.build(support_x, support_y, query_x, query_y,
steps, meta_batchsz, mode='train')
model.build(support_x_test, support_y_test, query_x_test,
query_y_test, steps, meta_batchsz, mode='eval')
else:
model.build(support_x_test, support_y_test, query_x_test,
query_y_test, steps, meta_batchsz, mode='test')
model.summ_op = tf.summary.merge_all()
all_vars = filter(lambda x: 'meta_optim' not in x.name, tf.trainable_variables())
for p in all_vars:
print(p)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
# tf.global_variables() to save moving_mean and moving variance of batch norm
# tf.trainable_variables() NOT include moving_mean and moving_variance.
saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
# initialize, under interative session
tf.global_variables_initializer().run()
tf.train.start_queue_runners()
if os.path.exists(os.path.join(ckpt_name, 'checkpoint')):
# alway load ckpt both train and test.
model_file = tf.train.latest_checkpoint(ckpt_name)
print("Restoring model weights from ", model_file)
saver.restore(sess, model_file)
if training:
train(model, saver, sess)
else:
test(model, sess)
if __name__ == "__main__":
main() | main.py | import os
import numpy as np
import argparse
import random
import tensorflow as tf
from data_generators.data_generator import DataGenerator
from models.maml import MAML
parser = argparse.ArgumentParser()
parser.add_argument('data_path', metavar='DATA',
help='path to data')
parser.add_argument('ckpt_name', metavar='CKPT',
help='path to checkpoint')
parser.add_argument('-ds', '--data_source', default='imagenet', type=str,
help='data_source (imagenet or omniglot)')
parser.add_argument('-t', '--test', action='store_true', default=False,
help='set for test, otherwise train')
parser.add_argument('--multi', action='store_true', default=False,
help='set for multi-class problems, otherwise binary classification')
parser.add_argument('-l', '--train_lr', default=1e-4, type=float,
help='train_lr (default=1e-4)')
parser.add_argument('-p', '--pkl_file', default='filelist', type=str,
help='path to pickle file')
parser.add_argument('-cf', '--cluster_folder', default=None, type=str,
help='cluster folder w/o root (default=None)')
parser.add_argument('--kshot', default=1, type=int,
help='# of shots per class (default=1)')
parser.add_argument('--kquery', default=1, type=int,
help='# of queries per class (default=1)')
parser.add_argument('--nway', default=51, type=int,
help='# of classes per problem (default=51)')
parser.add_argument('--metabatch', default=4, type=int,
help='meta batch-size for training (default=4)')
parser.add_argument('--steps', default=5, type=int,
help='# of gradient steps (default=5)')
parser.add_argument('--iter', default=40000, type=int,
help='# of training iterations (default=40,000)')
parser.add_argument('--train_problems', default=100000, type=int,
help='# of training problems (default=100,000)')
parser.add_argument('--test_problems', default=600, type=int,
help='# of test problems (default=600)')
parser.add_argument('-c', '--cuda_id', default="0", type=str,
help='cuda ID (default="0")')
args = parser.parse_args()
data_path = args.data_path
data_source = args.data_source
ckpt_name = args.ckpt_name
cluster_folder = args.cluster_folder
train_lr = args.train_lr
pkl_file = args.pkl_file
kshot = args.kshot
kquery = args.kquery
nway = args.nway
meta_batchsz = args.metabatch
steps = args.steps
n_iterations = args.iter
train_problems = args.train_problems
test_problems = args.test_problems
cuda_id = args.cuda_id
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_id
def train(model, saver, sess):
"""
:param model:
:param saver:
:param sess:
:return:
"""
# write graph to tensorboard
# tb = tf.summary.FileWriter(os.path.join('logs', 'mini'), sess.graph)
prelosses, postlosses, supportaccs, preaccs, postaccs = [], [], [], [], []
best_acc = 0
# train for meta_iteartion epoches
for iteration in range(n_iterations):
# this is the main op
ops = [model.meta_op]
# add summary and print op
if iteration % 200 == 0:
ops.extend([model.summ_op,
model.query_losses[0], model.query_losses[-1],
model.query_accs[0], model.query_accs[-1]
])
# run all ops
result = sess.run(ops)
# summary
if iteration % 200 == 0:
# summ_op
# tb.add_summary(result[1], iteration)
# query_losses[0]
prelosses.append(result[2])
# query_losses[-1]
postlosses.append(result[3])
# query_accs[0]
preaccs.append(result[4])
# query_accs[-1]
postaccs.append(result[5])
# support_acc
print(iteration, '\tloss:', np.mean(prelosses), '=>', np.mean(postlosses),
'\t\tacc:', np.mean(preaccs), '=>', np.mean(postaccs))
prelosses, postlosses, preaccs, postaccs, supportaccs = [], [], [], [], []
# evaluation
if iteration % 2000 == 0:
# DO NOT write as a = b = [], in that case a=b
# DO NOT use train variable as we have train func already.
acc0s, acc1s, acc2s = [], [], []
# sample 20 times to get more accurate statistics.
for _ in range(20):
acc1, acc2 = sess.run([
model.test_query_accs[0],
model.test_query_accs[-1]])
acc1s.append(acc1)
acc2s.append(acc2)
acc = np.mean(acc2s)
print('>>>>\t\tValidation accs::\t ', np.mean(acc1s), acc, 'best:', best_acc, '\t\t<<<<')
if acc - best_acc > 0.0:
saver.save(sess, os.path.join(ckpt_name, 'maml.mdl'))
best_acc = acc
print('saved into ckpt:', acc)
def test(model, sess):
np.random.seed(1)
random.seed(1)
# repeat test accuracy for 600 times
test_accs = []
for i in range(test_problems):
if i % 100 == 1:
print(i)
# extend return None!!!
ops = [model.query_preds_probs, model.test_support_acc]
ops.extend(model.test_query_accs)
result = sess.run(ops)
test_accs.append(result[1:])
# [600, steps+1]
test_accs = np.array(test_accs)
# [steps+1]
means = np.mean(test_accs, 0)
stds = np.std(test_accs, 0)
ci95 = 1.96 * stds * 100 / np.sqrt(test_problems)
print('[support_t0, query_t0 - \t\t\tsteps] ')
print('mean:', means)
print('stds:', stds)
print('ci95:', ci95)
def main():
training = not args.test
multiclass = args.multi
# kshot + kquery images per category, nway categories, meta_batchsz tasks.
db = DataGenerator(data_source, nway, kshot, kquery, meta_batchsz,
pkl_file, data_path, cluster_folder, multiclass, train_problems, test_problems)
if training: # only construct training model if needed
# get the tensors
image_tensor, label_tensor = db.make_data_tensor(training=True)
support_x = tf.slice(image_tensor, [0, 0, 0], [-1, nway, -1], name='support_x')
query_x = tf.slice(image_tensor, [0, nway, 0], [-1, -1, -1], name='query_x')
support_y = tf.slice(label_tensor, [0, 0, 0], [-1, nway, -1], name='support_y')
query_y = tf.slice(label_tensor, [0, nway, 0], [-1, -1, -1], name='query_y')
# construct test tensors
image_tensor, label_tensor = db.make_data_tensor(training=False)
support_x_test = tf.slice(image_tensor, [0, 0, 0], [-1, nway, -1], name='support_x_test')
query_x_test = tf.slice(image_tensor, [0, nway, 0], [-1, -1, -1], name='query_x_test')
support_y_test = tf.slice(label_tensor, [0, 0, 0], [-1, nway, -1], name='support_y_test')
query_y_test = tf.slice(label_tensor, [0, nway, 0], [-1, -1, -1], name='query_y_test')
# 1. construct MAML model
model = MAML(data_source, 2, kshot, kquery, train_lr=train_lr)
# construct metatrain_ and metaval_
if training:
model.build(support_x, support_y, query_x, query_y,
steps, meta_batchsz, mode='train')
model.build(support_x_test, support_y_test, query_x_test,
query_y_test, steps, meta_batchsz, mode='eval')
else:
model.build(support_x_test, support_y_test, query_x_test,
query_y_test, steps, meta_batchsz, mode='test')
model.summ_op = tf.summary.merge_all()
all_vars = filter(lambda x: 'meta_optim' not in x.name, tf.trainable_variables())
for p in all_vars:
print(p)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
# tf.global_variables() to save moving_mean and moving variance of batch norm
# tf.trainable_variables() NOT include moving_mean and moving_variance.
saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
# initialize, under interative session
tf.global_variables_initializer().run()
tf.train.start_queue_runners()
if os.path.exists(os.path.join(ckpt_name, 'checkpoint')):
# alway load ckpt both train and test.
model_file = tf.train.latest_checkpoint(ckpt_name)
print("Restoring model weights from ", model_file)
saver.restore(sess, model_file)
if training:
train(model, saver, sess)
else:
test(model, sess)
if __name__ == "__main__":
main() | 0.516595 | 0.132711 |
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import nodes_pb2 as nodes__pb2
class NodesStub(object):
"""Nodes make up the strongDM network, and allow your users to connect securely to your resources. There are two types of nodes:
- **Gateways** are the entry points into network. They listen for connection from the strongDM client, and provide access to databases and servers.
- **Relays** are used to extend the strongDM network into segmented subnets. They provide access to databases and servers but do not listen for incoming connections.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
'/v1.Nodes/Create',
request_serializer=nodes__pb2.NodeCreateRequest.SerializeToString,
response_deserializer=nodes__pb2.NodeCreateResponse.FromString,
)
self.Get = channel.unary_unary(
'/v1.Nodes/Get',
request_serializer=nodes__pb2.NodeGetRequest.SerializeToString,
response_deserializer=nodes__pb2.NodeGetResponse.FromString,
)
self.Update = channel.unary_unary(
'/v1.Nodes/Update',
request_serializer=nodes__pb2.NodeUpdateRequest.SerializeToString,
response_deserializer=nodes__pb2.NodeUpdateResponse.FromString,
)
self.Delete = channel.unary_unary(
'/v1.Nodes/Delete',
request_serializer=nodes__pb2.NodeDeleteRequest.SerializeToString,
response_deserializer=nodes__pb2.NodeDeleteResponse.FromString,
)
self.List = channel.unary_unary(
'/v1.Nodes/List',
request_serializer=nodes__pb2.NodeListRequest.SerializeToString,
response_deserializer=nodes__pb2.NodeListResponse.FromString,
)
class NodesServicer(object):
"""Nodes make up the strongDM network, and allow your users to connect securely to your resources. There are two types of nodes:
- **Gateways** are the entry points into network. They listen for connection from the strongDM client, and provide access to databases and servers.
- **Relays** are used to extend the strongDM network into segmented subnets. They provide access to databases and servers but do not listen for incoming connections.
"""
def Create(self, request, context):
"""Create registers a new Node.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Get reads one Node by ID.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Update patches a Node by ID.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Delete removes a Node by ID.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""List gets a list of Nodes matching a given set of criteria.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NodesServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=nodes__pb2.NodeCreateRequest.FromString,
response_serializer=nodes__pb2.NodeCreateResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=nodes__pb2.NodeGetRequest.FromString,
response_serializer=nodes__pb2.NodeGetResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=nodes__pb2.NodeUpdateRequest.FromString,
response_serializer=nodes__pb2.NodeUpdateResponse.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=nodes__pb2.NodeDeleteRequest.FromString,
response_serializer=nodes__pb2.NodeDeleteResponse.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=nodes__pb2.NodeListRequest.FromString,
response_serializer=nodes__pb2.NodeListResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'v1.Nodes', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Nodes(object):
"""Nodes make up the strongDM network, and allow your users to connect securely to your resources. There are two types of nodes:
- **Gateways** are the entry points into network. They listen for connection from the strongDM client, and provide access to databases and servers.
- **Relays** are used to extend the strongDM network into segmented subnets. They provide access to databases and servers but do not listen for incoming connections.
"""
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Nodes/Create',
nodes__pb2.NodeCreateRequest.SerializeToString,
nodes__pb2.NodeCreateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Nodes/Get',
nodes__pb2.NodeGetRequest.SerializeToString,
nodes__pb2.NodeGetResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Nodes/Update',
nodes__pb2.NodeUpdateRequest.SerializeToString,
nodes__pb2.NodeUpdateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Nodes/Delete',
nodes__pb2.NodeDeleteRequest.SerializeToString,
nodes__pb2.NodeDeleteResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Nodes/List',
nodes__pb2.NodeListRequest.SerializeToString,
nodes__pb2.NodeListResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | strongdm/nodes_pb2_grpc.py | """Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import nodes_pb2 as nodes__pb2
class NodesStub(object):
"""Nodes make up the strongDM network, and allow your users to connect securely to your resources. There are two types of nodes:
- **Gateways** are the entry points into network. They listen for connection from the strongDM client, and provide access to databases and servers.
- **Relays** are used to extend the strongDM network into segmented subnets. They provide access to databases and servers but do not listen for incoming connections.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
'/v1.Nodes/Create',
request_serializer=nodes__pb2.NodeCreateRequest.SerializeToString,
response_deserializer=nodes__pb2.NodeCreateResponse.FromString,
)
self.Get = channel.unary_unary(
'/v1.Nodes/Get',
request_serializer=nodes__pb2.NodeGetRequest.SerializeToString,
response_deserializer=nodes__pb2.NodeGetResponse.FromString,
)
self.Update = channel.unary_unary(
'/v1.Nodes/Update',
request_serializer=nodes__pb2.NodeUpdateRequest.SerializeToString,
response_deserializer=nodes__pb2.NodeUpdateResponse.FromString,
)
self.Delete = channel.unary_unary(
'/v1.Nodes/Delete',
request_serializer=nodes__pb2.NodeDeleteRequest.SerializeToString,
response_deserializer=nodes__pb2.NodeDeleteResponse.FromString,
)
self.List = channel.unary_unary(
'/v1.Nodes/List',
request_serializer=nodes__pb2.NodeListRequest.SerializeToString,
response_deserializer=nodes__pb2.NodeListResponse.FromString,
)
class NodesServicer(object):
"""Nodes make up the strongDM network, and allow your users to connect securely to your resources. There are two types of nodes:
- **Gateways** are the entry points into network. They listen for connection from the strongDM client, and provide access to databases and servers.
- **Relays** are used to extend the strongDM network into segmented subnets. They provide access to databases and servers but do not listen for incoming connections.
"""
def Create(self, request, context):
"""Create registers a new Node.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Get reads one Node by ID.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Update patches a Node by ID.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Delete removes a Node by ID.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""List gets a list of Nodes matching a given set of criteria.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NodesServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=nodes__pb2.NodeCreateRequest.FromString,
response_serializer=nodes__pb2.NodeCreateResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=nodes__pb2.NodeGetRequest.FromString,
response_serializer=nodes__pb2.NodeGetResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=nodes__pb2.NodeUpdateRequest.FromString,
response_serializer=nodes__pb2.NodeUpdateResponse.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=nodes__pb2.NodeDeleteRequest.FromString,
response_serializer=nodes__pb2.NodeDeleteResponse.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=nodes__pb2.NodeListRequest.FromString,
response_serializer=nodes__pb2.NodeListResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'v1.Nodes', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Nodes(object):
"""Nodes make up the strongDM network, and allow your users to connect securely to your resources. There are two types of nodes:
- **Gateways** are the entry points into network. They listen for connection from the strongDM client, and provide access to databases and servers.
- **Relays** are used to extend the strongDM network into segmented subnets. They provide access to databases and servers but do not listen for incoming connections.
"""
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Nodes/Create',
nodes__pb2.NodeCreateRequest.SerializeToString,
nodes__pb2.NodeCreateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Nodes/Get',
nodes__pb2.NodeGetRequest.SerializeToString,
nodes__pb2.NodeGetResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Nodes/Update',
nodes__pb2.NodeUpdateRequest.SerializeToString,
nodes__pb2.NodeUpdateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Nodes/Delete',
nodes__pb2.NodeDeleteRequest.SerializeToString,
nodes__pb2.NodeDeleteResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Nodes/List',
nodes__pb2.NodeListRequest.SerializeToString,
nodes__pb2.NodeListResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | 0.776538 | 0.288619 |
import socket
import argparse
import threading
import signal
import json
import requests
import sys
import time
import traceback
from queue import Queue
from contextlib import contextmanager
CLIENT2SERVER = 1
SERVER2CLIENT = 2
running = True
def log(m):
print(m, file=sys.stderr)
def mitm(buff, direction, shared):
hb = "".join("{:02x}".format(c) for c in buff)
if direction == CLIENT2SERVER:
log("-> %s ->" % hb)
elif direction == SERVER2CLIENT:
log("<- %s <-" % hb)
return buff
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
def kill_p(a, b):
with ignored(Exception):
a.shutdown(socket.SHUT_RDWR)
a.close()
b.shutdown(socket.SHUT_RDWR)
b.close()
return
def worker(client, server, n, shared):
while running:
b = ""
with ignored(Exception):
b = client.recv(4096)
if len(b) == 0:
kill_p(client, server)
return
try:
b = mitm(b, n, shared)
except Exception:
pass
try:
server.send(b)
except Exception:
pass
kill_p(client, server)
return
kill_p(client, server)
return
def signal_handler(sn, sf):
global running
running = False
def do_proxy_main(port, remote_host, remote_port):
signal.signal(signal.SIGTERM, signal_handler)
workers = []
p = None
global bank_address
global bank_port
bank_address = args.s
bank_port = args.q
try:
shared = Queue()
p = threading.Thread(target=send_input, args=(args.c, args.d, shared))
p.start()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("0.0.0.0", port))
s.listen(1)
print("started")
sys.stdout.flush()
while running:
k, a = s.accept()
v = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
v.connect((remote_host, remote_port))
t1 = threading.Thread(target=worker, args=(k, v, CLIENT2SERVER, shared))
t2 = threading.Thread(target=worker, args=(v, k, SERVER2CLIENT, shared))
t2.start()
t1.start()
workers.append((t1, t2, k, v))
except Exception:
pass
signal_handler(None, None)
for t1, t2, k, v in workers:
kill_p(k, v)
t1.join()
t2.join()
p.join()
return
def send_input(host, port, shared):
global running
while running:
try:
d = shared.get(block=True, timeout=1)
time.sleep(1)
r = requests.post("http://" + host + ":" + str(port), data={'REQUEST': json.dumps(d)})
log(r.text)
except Exception:
pass
time.sleep(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Proxy')
parser.add_argument('-p', type=int, default=4000, help="listen port")
parser.add_argument('-s', type=str, default="127.0.0.1", help="server ip address")
parser.add_argument('-q', type=int, default=3000, help="server port")
parser.add_argument('-c', type=str, default="127.0.0.1", help="command server")
parser.add_argument('-d', type=int, default=5000, help="command port")
args = parser.parse_args()
do_proxy_main(args.p, args.s, args.q) | attacks/confi_check/mitm.py | import socket
import argparse
import threading
import signal
import json
import requests
import sys
import time
import traceback
from queue import Queue
from contextlib import contextmanager
CLIENT2SERVER = 1
SERVER2CLIENT = 2
running = True
def log(m):
print(m, file=sys.stderr)
def mitm(buff, direction, shared):
hb = "".join("{:02x}".format(c) for c in buff)
if direction == CLIENT2SERVER:
log("-> %s ->" % hb)
elif direction == SERVER2CLIENT:
log("<- %s <-" % hb)
return buff
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
def kill_p(a, b):
with ignored(Exception):
a.shutdown(socket.SHUT_RDWR)
a.close()
b.shutdown(socket.SHUT_RDWR)
b.close()
return
def worker(client, server, n, shared):
while running:
b = ""
with ignored(Exception):
b = client.recv(4096)
if len(b) == 0:
kill_p(client, server)
return
try:
b = mitm(b, n, shared)
except Exception:
pass
try:
server.send(b)
except Exception:
pass
kill_p(client, server)
return
kill_p(client, server)
return
def signal_handler(sn, sf):
global running
running = False
def do_proxy_main(port, remote_host, remote_port):
signal.signal(signal.SIGTERM, signal_handler)
workers = []
p = None
global bank_address
global bank_port
bank_address = args.s
bank_port = args.q
try:
shared = Queue()
p = threading.Thread(target=send_input, args=(args.c, args.d, shared))
p.start()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("0.0.0.0", port))
s.listen(1)
print("started")
sys.stdout.flush()
while running:
k, a = s.accept()
v = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
v.connect((remote_host, remote_port))
t1 = threading.Thread(target=worker, args=(k, v, CLIENT2SERVER, shared))
t2 = threading.Thread(target=worker, args=(v, k, SERVER2CLIENT, shared))
t2.start()
t1.start()
workers.append((t1, t2, k, v))
except Exception:
pass
signal_handler(None, None)
for t1, t2, k, v in workers:
kill_p(k, v)
t1.join()
t2.join()
p.join()
return
def send_input(host, port, shared):
global running
while running:
try:
d = shared.get(block=True, timeout=1)
time.sleep(1)
r = requests.post("http://" + host + ":" + str(port), data={'REQUEST': json.dumps(d)})
log(r.text)
except Exception:
pass
time.sleep(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Proxy')
parser.add_argument('-p', type=int, default=4000, help="listen port")
parser.add_argument('-s', type=str, default="127.0.0.1", help="server ip address")
parser.add_argument('-q', type=int, default=3000, help="server port")
parser.add_argument('-c', type=str, default="127.0.0.1", help="command server")
parser.add_argument('-d', type=int, default=5000, help="command port")
args = parser.parse_args()
do_proxy_main(args.p, args.s, args.q) | 0.211498 | 0.091707 |
from __future__ import absolute_import, unicode_literals
import requests
import json
import os
import sys
import urllib
reload(sys)
sys.setdefaultencoding('utf8')
from flask import Flask, request, abort, render_template
from wechatpy import parse_message, create_reply
from wechatpy.utils import check_signature
from wechatpy.exceptions import (
InvalidSignatureException,
InvalidAppIdException,
)
#send data to iot hardware
content=""
url="https://api.weixin.qq.com/hardware/mydevice/platform/ctrl_device?access_token=<KEY>"
headers={'content-type':'application/json'}
datajson={ "device_type": "gh_6064295bfad2",
"device_id":"gh_6064295bfad2_d11fafd815c759ba",
"user": "oYd-ytwz-EYkcXPb1mo4DmCKaUBw",
"services": {
"operation_status": {
"status": 1
}
},
"data": ""
}
# set token or get from environments
TOKEN = os.getenv('WECHAT_TOKEN', '<PASSWORD>')
AES_KEY = os.getenv('WECHAT_AES_KEY', 'a636227739200a71b1ec76be9e5bec81')
APPID = os.getenv('WECHAT_APPID', 'wxc7ffbe44cacb90d0')
app = Flask(__name__)
def fetch_app_access_token(app_id, app_secret):
resp = urllib.urlopen(
'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid='+app_id + '&secret=' + app_secret)
if resp.getcode() == 200:
resp_body=resp.read()
token=json.loads(resp_body.decode('utf-8'))['access_token']
global url
url='https://api.weixin.qq.com/hardware/mydevice/platform/ctrl_device?access_token='+token
print('fetch token update '+url)
return resp.read()
else:
return None
def fixup(adict,k,v):
for key in adict.keys():
if key==k:
adict[key]=v
elif type(adict[key]) is dict:
fixup(adict[key],k,v)
@app.route('/')
def index():
host = request.url_root
return render_template('index.html', host=host)
@app.route('/wechat', methods=['GET', 'POST'])
def wechat():
signature = request.args.get('signature', '')
timestamp = request.args.get('timestamp', '')
nonce = request.args.get('nonce', '')
encrypt_type = request.args.get('encrypt_type', 'raw')
msg_signature = request.args.get('msg_signature', '')
try:
check_signature(TOKEN, signature, timestamp, nonce)
except InvalidSignatureException:
abort(403)
if request.method == 'GET':
echo_str = request.args.get('echostr', '')
return echo_str
# POST request
if encrypt_type == 'raw':
# plaintext mode
print("msg content "+request.data)
msg = parse_message(request.data)
if msg.type == 'text':
print('msg_content:', msg.content )
datacontent=json.loads(json.dumps(datajson))
fixup(datacontent,'data',msg.content.decode('unicode_escape'))
r=requests.post(url,json.dumps(datacontent))
print('push to iot device'+r.text)
if "errmsg" in r.text:
if json.loads(r.text)['errmsg']=='access_token expired' or 'access_token' in json.loads(r.text)['errmsg']:
fetch_app_access_token('<KEY>','a636227739200a71b1ec76be9e5bec81')
r=requests.post(url,json.dumps(datacontent))
print('again push to iot device'+r.text+'url:'+url)
reply = create_reply(msg.content, msg)
elif msg.type=='voice':
print('voice message '+str(msg.media_id)+ 'voice message '+str(msg.format)+'voice recognition '+str(msg.recognition))
data='{\"msg_type\":voice,\"media_id+\":'+msg.media_id+',\"format\":'+msg.format+'}'
datacontent=json.loads(json.dumps(datajson))
fixup(datacontent,'data',data)
r=requests.post(url,json.dumps(datacontent))
print('voice to iot device'+r.text)
if "errmsg" in r.text:
if json.loads(r.text)['errmsg']=='access_token expired':
fetch_app_access_token('<KEY>','a636227739200a71b1ec76be9e5bec81')
r=requests.post(url,json.dumps(datacontent))
print('again push to iot device'+r.text+'url:'+url)
reply=create_reply('voice message',msg)
else:
reply = create_reply('Sorry, can not handle this for now', msg)
return reply.render()
else:
# encryption mode
from wechatpy.crypto import WeChatCrypto
crypto = WeChatCrypto(TOKEN, AES_KEY, APPID)
try:
msg = crypto.decrypt_message(
request.data,
msg_signature,
timestamp,
nonce
)
except (InvalidSignatureException, InvalidAppIdException):
abort(403)
else:
msg = parse_message(msg)
if msg.type == 'text':
reply = create_reply(msg.content, msg)
else:
reply = create_reply('Sorry, can not handle this for now', msg)
return crypto.encrypt_message(reply.render(), nonce, timestamp)
if __name__ == '__main__':
app.run('192.168.127.12',80, debug=True) | examples/echo/app.py | from __future__ import absolute_import, unicode_literals
import requests
import json
import os
import sys
import urllib
reload(sys)
sys.setdefaultencoding('utf8')
from flask import Flask, request, abort, render_template
from wechatpy import parse_message, create_reply
from wechatpy.utils import check_signature
from wechatpy.exceptions import (
InvalidSignatureException,
InvalidAppIdException,
)
#send data to iot hardware
content=""
url="https://api.weixin.qq.com/hardware/mydevice/platform/ctrl_device?access_token=<KEY>"
headers={'content-type':'application/json'}
datajson={ "device_type": "gh_6064295bfad2",
"device_id":"gh_6064295bfad2_d11fafd815c759ba",
"user": "oYd-ytwz-EYkcXPb1mo4DmCKaUBw",
"services": {
"operation_status": {
"status": 1
}
},
"data": ""
}
# set token or get from environments
TOKEN = os.getenv('WECHAT_TOKEN', '<PASSWORD>')
AES_KEY = os.getenv('WECHAT_AES_KEY', 'a636227739200a71b1ec76be9e5bec81')
APPID = os.getenv('WECHAT_APPID', 'wxc7ffbe44cacb90d0')
app = Flask(__name__)
def fetch_app_access_token(app_id, app_secret):
resp = urllib.urlopen(
'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid='+app_id + '&secret=' + app_secret)
if resp.getcode() == 200:
resp_body=resp.read()
token=json.loads(resp_body.decode('utf-8'))['access_token']
global url
url='https://api.weixin.qq.com/hardware/mydevice/platform/ctrl_device?access_token='+token
print('fetch token update '+url)
return resp.read()
else:
return None
def fixup(adict,k,v):
for key in adict.keys():
if key==k:
adict[key]=v
elif type(adict[key]) is dict:
fixup(adict[key],k,v)
@app.route('/')
def index():
host = request.url_root
return render_template('index.html', host=host)
@app.route('/wechat', methods=['GET', 'POST'])
def wechat():
signature = request.args.get('signature', '')
timestamp = request.args.get('timestamp', '')
nonce = request.args.get('nonce', '')
encrypt_type = request.args.get('encrypt_type', 'raw')
msg_signature = request.args.get('msg_signature', '')
try:
check_signature(TOKEN, signature, timestamp, nonce)
except InvalidSignatureException:
abort(403)
if request.method == 'GET':
echo_str = request.args.get('echostr', '')
return echo_str
# POST request
if encrypt_type == 'raw':
# plaintext mode
print("msg content "+request.data)
msg = parse_message(request.data)
if msg.type == 'text':
print('msg_content:', msg.content )
datacontent=json.loads(json.dumps(datajson))
fixup(datacontent,'data',msg.content.decode('unicode_escape'))
r=requests.post(url,json.dumps(datacontent))
print('push to iot device'+r.text)
if "errmsg" in r.text:
if json.loads(r.text)['errmsg']=='access_token expired' or 'access_token' in json.loads(r.text)['errmsg']:
fetch_app_access_token('<KEY>','a636227739200a71b1ec76be9e5bec81')
r=requests.post(url,json.dumps(datacontent))
print('again push to iot device'+r.text+'url:'+url)
reply = create_reply(msg.content, msg)
elif msg.type=='voice':
print('voice message '+str(msg.media_id)+ 'voice message '+str(msg.format)+'voice recognition '+str(msg.recognition))
data='{\"msg_type\":voice,\"media_id+\":'+msg.media_id+',\"format\":'+msg.format+'}'
datacontent=json.loads(json.dumps(datajson))
fixup(datacontent,'data',data)
r=requests.post(url,json.dumps(datacontent))
print('voice to iot device'+r.text)
if "errmsg" in r.text:
if json.loads(r.text)['errmsg']=='access_token expired':
fetch_app_access_token('<KEY>','a636227739200a71b1ec76be9e5bec81')
r=requests.post(url,json.dumps(datacontent))
print('again push to iot device'+r.text+'url:'+url)
reply=create_reply('voice message',msg)
else:
reply = create_reply('Sorry, can not handle this for now', msg)
return reply.render()
else:
# encryption mode
from wechatpy.crypto import WeChatCrypto
crypto = WeChatCrypto(TOKEN, AES_KEY, APPID)
try:
msg = crypto.decrypt_message(
request.data,
msg_signature,
timestamp,
nonce
)
except (InvalidSignatureException, InvalidAppIdException):
abort(403)
else:
msg = parse_message(msg)
if msg.type == 'text':
reply = create_reply(msg.content, msg)
else:
reply = create_reply('Sorry, can not handle this for now', msg)
return crypto.encrypt_message(reply.render(), nonce, timestamp)
if __name__ == '__main__':
app.run('192.168.127.12',80, debug=True) | 0.20091 | 0.0584 |
import logging
import os
import time
from core.results_processor import util
from core.tbmv3 import trace_processor
from tracing.metrics import metric_runner
# Aggregated TBMv2 trace is saved under this name.
HTML_TRACE_NAME = 'trace.html'
# Concatenated proto trace is saved under this name.
CONCATENATED_PROTO_NAME = 'trace.pb'
def _RunMetric(test_result, metrics):
html_trace = test_result['outputArtifacts'][HTML_TRACE_NAME]
html_local_path = html_trace['filePath']
html_remote_url = html_trace.get('viewUrl')
# The timeout needs to be coordinated with the Swarming IO timeout for the
# task that runs this code. If this timeout is longer or close in length
# to the swarming IO timeout then we risk being forcibly killed for not
# producing any output. Note that this could be fixed by periodically
# outputting logs while waiting for metrics to be calculated.
TEN_MINUTES = 60 * 10
mre_result = metric_runner.RunMetricOnSingleTrace(
html_local_path, metrics, canonical_url=html_remote_url,
timeout=TEN_MINUTES,
extra_import_options={'trackDetailedModelStats': True})
if mre_result.failures:
util.SetUnexpectedFailure(test_result)
for f in mre_result.failures:
logging.error('Failure recorded for test %s: %s',
test_result['testPath'], f)
return mre_result.pairs.get('histograms', [])
def ComputeTBMv2Metrics(test_result):
"""Compute metrics on aggregated traces in parallel.
For each test run that has an aggregate trace and some TBMv2 metrics listed
in its tags, compute the metrics and return the list of all resulting
histograms.
"""
artifacts = test_result.get('outputArtifacts', {})
if test_result['status'] == 'SKIP':
return
metrics = [tag['value'] for tag in test_result.get('tags', [])
if tag['key'] == 'tbmv2']
if not metrics:
logging.debug('%s: No TBMv2 metrics specified.', test_result['testPath'])
return
if HTML_TRACE_NAME not in artifacts:
util.SetUnexpectedFailure(test_result)
logging.error('%s: No traces to compute metrics on.',
test_result['testPath'])
return
trace_size_in_mib = (os.path.getsize(artifacts[HTML_TRACE_NAME]['filePath'])
/ (2 ** 20))
# Bails out on traces that are too big. See crbug.com/812631 for more
# details.
if trace_size_in_mib > 400:
util.SetUnexpectedFailure(test_result)
logging.error('%s: Trace size is too big: %s MiB',
test_result['testPath'], trace_size_in_mib)
return
start = time.time()
test_result['_histograms'].ImportDicts(_RunMetric(test_result, metrics))
logging.info('%s: Computing TBMv2 metrics took %.3f seconds.' % (
test_result['testPath'], time.time() - start))
def ComputeTBMv3Metrics(test_result,
trace_processor_path,
fetch_power_profile=False):
artifacts = test_result.get('outputArtifacts', {})
if test_result['status'] == 'SKIP':
return
metrics = [tag['value'] for tag in test_result.get('tags', [])
if tag['key'] == 'tbmv3']
if not metrics:
logging.debug('%s: No TBMv3 metrics specified.', test_result['testPath'])
return
if CONCATENATED_PROTO_NAME not in artifacts:
# TODO(crbug.com/990304): This is only a warning now, because proto trace
# generation is enabled only on selected bots. Make this an error
# when Telemetry is switched over to proto trace generation everywhere.
# Also don't forget to call util.SetUnexpectedFailure(test_result).
logging.warning('%s: No proto traces to compute metrics on.',
test_result['testPath'])
return
start = time.time()
for metric in metrics:
histograms = trace_processor.RunMetric(
trace_processor_path, artifacts[CONCATENATED_PROTO_NAME]['filePath'],
metric, fetch_power_profile)
test_result['_histograms'].Merge(histograms)
logging.info('%s: Computing TBMv3 metrics took %.3f seconds.' % (
test_result['testPath'], time.time() - start)) | tools/perf/core/results_processor/compute_metrics.py |
import logging
import os
import time
from core.results_processor import util
from core.tbmv3 import trace_processor
from tracing.metrics import metric_runner
# Aggregated TBMv2 trace is saved under this name.
HTML_TRACE_NAME = 'trace.html'
# Concatenated proto trace is saved under this name.
CONCATENATED_PROTO_NAME = 'trace.pb'
def _RunMetric(test_result, metrics):
html_trace = test_result['outputArtifacts'][HTML_TRACE_NAME]
html_local_path = html_trace['filePath']
html_remote_url = html_trace.get('viewUrl')
# The timeout needs to be coordinated with the Swarming IO timeout for the
# task that runs this code. If this timeout is longer or close in length
# to the swarming IO timeout then we risk being forcibly killed for not
# producing any output. Note that this could be fixed by periodically
# outputting logs while waiting for metrics to be calculated.
TEN_MINUTES = 60 * 10
mre_result = metric_runner.RunMetricOnSingleTrace(
html_local_path, metrics, canonical_url=html_remote_url,
timeout=TEN_MINUTES,
extra_import_options={'trackDetailedModelStats': True})
if mre_result.failures:
util.SetUnexpectedFailure(test_result)
for f in mre_result.failures:
logging.error('Failure recorded for test %s: %s',
test_result['testPath'], f)
return mre_result.pairs.get('histograms', [])
def ComputeTBMv2Metrics(test_result):
"""Compute metrics on aggregated traces in parallel.
For each test run that has an aggregate trace and some TBMv2 metrics listed
in its tags, compute the metrics and return the list of all resulting
histograms.
"""
artifacts = test_result.get('outputArtifacts', {})
if test_result['status'] == 'SKIP':
return
metrics = [tag['value'] for tag in test_result.get('tags', [])
if tag['key'] == 'tbmv2']
if not metrics:
logging.debug('%s: No TBMv2 metrics specified.', test_result['testPath'])
return
if HTML_TRACE_NAME not in artifacts:
util.SetUnexpectedFailure(test_result)
logging.error('%s: No traces to compute metrics on.',
test_result['testPath'])
return
trace_size_in_mib = (os.path.getsize(artifacts[HTML_TRACE_NAME]['filePath'])
/ (2 ** 20))
# Bails out on traces that are too big. See crbug.com/812631 for more
# details.
if trace_size_in_mib > 400:
util.SetUnexpectedFailure(test_result)
logging.error('%s: Trace size is too big: %s MiB',
test_result['testPath'], trace_size_in_mib)
return
start = time.time()
test_result['_histograms'].ImportDicts(_RunMetric(test_result, metrics))
logging.info('%s: Computing TBMv2 metrics took %.3f seconds.' % (
test_result['testPath'], time.time() - start))
def ComputeTBMv3Metrics(test_result,
trace_processor_path,
fetch_power_profile=False):
artifacts = test_result.get('outputArtifacts', {})
if test_result['status'] == 'SKIP':
return
metrics = [tag['value'] for tag in test_result.get('tags', [])
if tag['key'] == 'tbmv3']
if not metrics:
logging.debug('%s: No TBMv3 metrics specified.', test_result['testPath'])
return
if CONCATENATED_PROTO_NAME not in artifacts:
# TODO(crbug.com/990304): This is only a warning now, because proto trace
# generation is enabled only on selected bots. Make this an error
# when Telemetry is switched over to proto trace generation everywhere.
# Also don't forget to call util.SetUnexpectedFailure(test_result).
logging.warning('%s: No proto traces to compute metrics on.',
test_result['testPath'])
return
start = time.time()
for metric in metrics:
histograms = trace_processor.RunMetric(
trace_processor_path, artifacts[CONCATENATED_PROTO_NAME]['filePath'],
metric, fetch_power_profile)
test_result['_histograms'].Merge(histograms)
logging.info('%s: Computing TBMv3 metrics took %.3f seconds.' % (
test_result['testPath'], time.time() - start)) | 0.438785 | 0.118947 |
import pytest
import smbclient._pool as pool
from smbprotocol.exceptions import (
InvalidParameter,
)
from .conftest import (
DOMAIN_NAME,
DOMAIN_REFERRAL,
)
@pytest.fixture()
def reset_config():
config = pool.ClientConfig()
client_guid = config.client_guid
username = config.username
password = config.password
domain_controller = config.domain_controller
skip_dfs = config.skip_dfs
yield
config.set(client_guid=client_guid, username=username, password=password, skip_dfs=skip_dfs,
domain_controller=domain_controller)
config._domain_cache = []
config._referral_cache = []
def test_client_config_private_key(reset_config):
with pytest.raises(ValueError, match='Cannot set private attribute _domain_controller'):
pool.ClientConfig().set(_domain_controller='test')
def test_set_config_option(reset_config):
pool.ClientConfig(username='test')
assert pool.ClientConfig().username == 'test'
pool.ClientConfig(username=None)
assert not pool.ClientConfig().username
def test_config_domain_cache(reset_config, monkeypatch, mocker):
dfs_mock = mocker.MagicMock()
dfs_mock.return_value = DOMAIN_REFERRAL
monkeypatch.setattr(pool, 'get_smb_tree', mocker.MagicMock())
monkeypatch.setattr(pool, 'dfs_request', dfs_mock)
config = pool.ClientConfig()
domain_referral = config.lookup_domain(DOMAIN_NAME)
assert domain_referral is None
config.domain_controller = DOMAIN_NAME
domain_referral = config.lookup_domain(DOMAIN_NAME)
assert domain_referral.domain_name == '\\%s' % DOMAIN_NAME
assert dfs_mock.call_count == 1
assert dfs_mock.call_args[0][1] == u''
def test_config_domain_cache_not_dfs_endpoint(reset_config, monkeypatch, mocker):
dfs_mock = mocker.MagicMock()
dfs_mock.side_effect = InvalidParameter()
warning_mock = mocker.MagicMock()
monkeypatch.setattr(pool, 'get_smb_tree', mocker.MagicMock())
monkeypatch.setattr(pool, 'dfs_request', dfs_mock)
monkeypatch.setattr(pool.log, 'warning', warning_mock)
config = pool.ClientConfig()
config.domain_controller = DOMAIN_NAME
domain_referral = config.lookup_domain(DOMAIN_NAME)
assert domain_referral is None
assert warning_mock.call_count == 1
assert 'cannot use as DFS domain cache source' in warning_mock.call_args[0][0]
def test_reset_connection_error_fail(mocker):
connection_mock = mocker.MagicMock()
connection_mock.disconnect.side_effect = Exception("exception")
with pytest.raises(Exception, match="exception"):
pool.reset_connection_cache(connection_cache={'conn': connection_mock})
def test_reset_connection_error_warning(monkeypatch, mocker):
connection_mock = mocker.MagicMock()
connection_mock.disconnect.side_effect = Exception("exception")
warning_mock = mocker.MagicMock()
monkeypatch.setattr(pool.warnings, 'warn', warning_mock)
pool.reset_connection_cache(fail_on_error=False, connection_cache={'conn': connection_mock})
assert warning_mock.call_count == 1
assert warning_mock.call_args[0][0] == 'Failed to close connection conn: exception' | tests/test_smbclient_pool.py |
import pytest
import smbclient._pool as pool
from smbprotocol.exceptions import (
InvalidParameter,
)
from .conftest import (
DOMAIN_NAME,
DOMAIN_REFERRAL,
)
@pytest.fixture()
def reset_config():
config = pool.ClientConfig()
client_guid = config.client_guid
username = config.username
password = config.password
domain_controller = config.domain_controller
skip_dfs = config.skip_dfs
yield
config.set(client_guid=client_guid, username=username, password=password, skip_dfs=skip_dfs,
domain_controller=domain_controller)
config._domain_cache = []
config._referral_cache = []
def test_client_config_private_key(reset_config):
with pytest.raises(ValueError, match='Cannot set private attribute _domain_controller'):
pool.ClientConfig().set(_domain_controller='test')
def test_set_config_option(reset_config):
pool.ClientConfig(username='test')
assert pool.ClientConfig().username == 'test'
pool.ClientConfig(username=None)
assert not pool.ClientConfig().username
def test_config_domain_cache(reset_config, monkeypatch, mocker):
dfs_mock = mocker.MagicMock()
dfs_mock.return_value = DOMAIN_REFERRAL
monkeypatch.setattr(pool, 'get_smb_tree', mocker.MagicMock())
monkeypatch.setattr(pool, 'dfs_request', dfs_mock)
config = pool.ClientConfig()
domain_referral = config.lookup_domain(DOMAIN_NAME)
assert domain_referral is None
config.domain_controller = DOMAIN_NAME
domain_referral = config.lookup_domain(DOMAIN_NAME)
assert domain_referral.domain_name == '\\%s' % DOMAIN_NAME
assert dfs_mock.call_count == 1
assert dfs_mock.call_args[0][1] == u''
def test_config_domain_cache_not_dfs_endpoint(reset_config, monkeypatch, mocker):
dfs_mock = mocker.MagicMock()
dfs_mock.side_effect = InvalidParameter()
warning_mock = mocker.MagicMock()
monkeypatch.setattr(pool, 'get_smb_tree', mocker.MagicMock())
monkeypatch.setattr(pool, 'dfs_request', dfs_mock)
monkeypatch.setattr(pool.log, 'warning', warning_mock)
config = pool.ClientConfig()
config.domain_controller = DOMAIN_NAME
domain_referral = config.lookup_domain(DOMAIN_NAME)
assert domain_referral is None
assert warning_mock.call_count == 1
assert 'cannot use as DFS domain cache source' in warning_mock.call_args[0][0]
def test_reset_connection_error_fail(mocker):
connection_mock = mocker.MagicMock()
connection_mock.disconnect.side_effect = Exception("exception")
with pytest.raises(Exception, match="exception"):
pool.reset_connection_cache(connection_cache={'conn': connection_mock})
def test_reset_connection_error_warning(monkeypatch, mocker):
connection_mock = mocker.MagicMock()
connection_mock.disconnect.side_effect = Exception("exception")
warning_mock = mocker.MagicMock()
monkeypatch.setattr(pool.warnings, 'warn', warning_mock)
pool.reset_connection_cache(fail_on_error=False, connection_cache={'conn': connection_mock})
assert warning_mock.call_count == 1
assert warning_mock.call_args[0][0] == 'Failed to close connection conn: exception' | 0.533397 | 0.127625 |
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
dummy_val = np.random.randn(2, 3)
in_val = np.full((2, 3), -2)
cpu0_device = flow.device("cpu")
gpu0_device = flow.device("cuda")
class DummyModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("dummy_buf", flow.Tensor(dummy_val))
self.dummy_para = flow.nn.Parameter(flow.Tensor(dummy_val))
def forward(self, x):
return (self.dummy_para * x) + self.dummy_buf
def _test_dummy_module(test_case):
m = DummyModule()
test_case.assertEqual(m.dummy_buf.device, cpu0_device)
test_case.assertEqual(m.dummy_para.device, cpu0_device)
input = flow.Tensor(in_val)
output = m(input)
test_case.assertTrue(np.allclose(output.numpy(), -dummy_val, 1e-4, 1e-4))
test_case.assertEqual(m.dummy_buf.grad, None)
test_case.assertEqual(m.dummy_para.grad, None)
test_case.assertEqual(input.device, cpu0_device)
test_case.assertEqual(output.device, cpu0_device)
def _test_dummy_module_to(test_case):
m = DummyModule()
test_case.assertEqual(m.dummy_buf.device, cpu0_device)
test_case.assertEqual(m.dummy_para.device, cpu0_device)
# test to
m.to(gpu0_device)
test_case.assertEqual(m.dummy_buf.device, gpu0_device)
test_case.assertTrue(m.dummy_buf.is_leaf)
test_case.assertTrue(not m.dummy_buf.requires_grad)
test_case.assertEqual(m.dummy_para.device, gpu0_device)
test_case.assertTrue(m.dummy_para.is_leaf)
test_case.assertTrue(m.dummy_para.requires_grad)
input = flow.Tensor(in_val).to(gpu0_device)
output = m(input)
test_case.assertTrue(np.allclose(output.numpy(), -dummy_val, 1e-4, 1e-4))
test_case.assertEqual(m.dummy_buf.grad, None)
test_case.assertEqual(m.dummy_para.grad, None)
test_case.assertEqual(input.device, gpu0_device)
test_case.assertEqual(output.device, gpu0_device)
# test to with backward
output_grad = flow.ones((2, 3)).to(gpu0_device)
output.backward(output_grad)
test_case.assertEqual(output_grad.device, gpu0_device)
test_case.assertEqual(m.dummy_buf.grad, None)
test_case.assertTrue(np.allclose(m.dummy_para.grad.numpy(), in_val, 1e-4, 1e-4))
test_case.assertEqual(m.dummy_para.grad.device, gpu0_device)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestModuleTo(flow.unittest.TestCase):
def test_module_to(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_dummy_module,
_test_dummy_module_to,
]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main() | oneflow/python/test/modules/test_module_to.py | import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
dummy_val = np.random.randn(2, 3)
in_val = np.full((2, 3), -2)
cpu0_device = flow.device("cpu")
gpu0_device = flow.device("cuda")
class DummyModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("dummy_buf", flow.Tensor(dummy_val))
self.dummy_para = flow.nn.Parameter(flow.Tensor(dummy_val))
def forward(self, x):
return (self.dummy_para * x) + self.dummy_buf
def _test_dummy_module(test_case):
m = DummyModule()
test_case.assertEqual(m.dummy_buf.device, cpu0_device)
test_case.assertEqual(m.dummy_para.device, cpu0_device)
input = flow.Tensor(in_val)
output = m(input)
test_case.assertTrue(np.allclose(output.numpy(), -dummy_val, 1e-4, 1e-4))
test_case.assertEqual(m.dummy_buf.grad, None)
test_case.assertEqual(m.dummy_para.grad, None)
test_case.assertEqual(input.device, cpu0_device)
test_case.assertEqual(output.device, cpu0_device)
def _test_dummy_module_to(test_case):
m = DummyModule()
test_case.assertEqual(m.dummy_buf.device, cpu0_device)
test_case.assertEqual(m.dummy_para.device, cpu0_device)
# test to
m.to(gpu0_device)
test_case.assertEqual(m.dummy_buf.device, gpu0_device)
test_case.assertTrue(m.dummy_buf.is_leaf)
test_case.assertTrue(not m.dummy_buf.requires_grad)
test_case.assertEqual(m.dummy_para.device, gpu0_device)
test_case.assertTrue(m.dummy_para.is_leaf)
test_case.assertTrue(m.dummy_para.requires_grad)
input = flow.Tensor(in_val).to(gpu0_device)
output = m(input)
test_case.assertTrue(np.allclose(output.numpy(), -dummy_val, 1e-4, 1e-4))
test_case.assertEqual(m.dummy_buf.grad, None)
test_case.assertEqual(m.dummy_para.grad, None)
test_case.assertEqual(input.device, gpu0_device)
test_case.assertEqual(output.device, gpu0_device)
# test to with backward
output_grad = flow.ones((2, 3)).to(gpu0_device)
output.backward(output_grad)
test_case.assertEqual(output_grad.device, gpu0_device)
test_case.assertEqual(m.dummy_buf.grad, None)
test_case.assertTrue(np.allclose(m.dummy_para.grad.numpy(), in_val, 1e-4, 1e-4))
test_case.assertEqual(m.dummy_para.grad.device, gpu0_device)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestModuleTo(flow.unittest.TestCase):
def test_module_to(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_dummy_module,
_test_dummy_module_to,
]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main() | 0.661595 | 0.716746 |
# pylint:disable=redefined-outer-name
import numpy as np
import pytest
from pyei import GoodmansER
@pytest.fixture
def group_and_vote_fractions():
"""Sample group and vote fractions, where every member of the demographic
group votes for the given candidate and every non-member of the
demographic group does not vote for the given candidate.
"""
group_share = np.array([0, 0.2, 0.4, 0.6, 0.8])
vote_share = np.array([0, 0.2, 0.4, 0.6, 0.8])
return group_share, vote_share
@pytest.fixture
def group_and_vote_fractions_with_pop():
"""Sample group and vote fractions, where every member of the demographic
group votes for the given candidate and 10% of the demographic group's
complement supports the given candidate (i.e., slope = 1, intercept = 0.1),
with an exception of one precinct.
All precincts have population 1000, except one precinct that has population
1 and does not follow the above formula.
"""
group_share = np.array([0, 0.2, 0.4, 0.6, 0.8, 0.9])
vote_share = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 0.9])
populations = np.array([1000, 1000, 1000, 1000, 1000, 1])
return group_share, vote_share, populations
def test_fit(group_and_vote_fractions):
model = GoodmansER()
group_share, vote_share = group_and_vote_fractions
model.fit(group_share, vote_share)
np.testing.assert_almost_equal(model.intercept_, 0)
np.testing.assert_almost_equal(model.slope_, 1)
def test_weighted_fit(group_and_vote_fractions_with_pop):
model = GoodmansER(is_weighted_regression=True)
group_share, vote_share, pops = group_and_vote_fractions_with_pop
model.fit(group_share, vote_share, pops)
np.testing.assert_almost_equal(model.intercept_, 0.1, decimal=3)
np.testing.assert_almost_equal(model.slope_, 1, decimal=3)
def test_summary():
model = GoodmansER()
model.demographic_group_name = "Trees"
model.candidate_name = "Lorax"
model.voting_prefs_est_ = 1.0
model.voting_prefs_complement_est_ = 0.0
expected_summary = """Goodmans ER
Est. fraction of Trees
voters who voted for Lorax is
1.000
Est. fraction of non- Trees
voters who voted for Lorax is
0.000
"""
assert model.summary() == expected_summary
model.is_weighted_regression = True
expected_summary = """Goodmans ER, weighted by population
Est. fraction of Trees
voters who voted for Lorax is
1.000
Est. fraction of non- Trees
voters who voted for Lorax is
0.000
"""
assert model.summary() == expected_summary
def test_plot(group_and_vote_fractions):
model = GoodmansER()
group_share, vote_share = group_and_vote_fractions
model.fit(group_share, vote_share)
_, ax = model.plot()
x_plot, y_plot = ax.lines[0].get_xydata().T
np.testing.assert_allclose(x_plot, y_plot, atol=1e-10)
assert (0.0, 1.0) == ax.get_xlim()
assert (0.0, 1.0) == ax.get_ylim() | test/test_goodmans_er.py | # pylint:disable=redefined-outer-name
import numpy as np
import pytest
from pyei import GoodmansER
@pytest.fixture
def group_and_vote_fractions():
"""Sample group and vote fractions, where every member of the demographic
group votes for the given candidate and every non-member of the
demographic group does not vote for the given candidate.
"""
group_share = np.array([0, 0.2, 0.4, 0.6, 0.8])
vote_share = np.array([0, 0.2, 0.4, 0.6, 0.8])
return group_share, vote_share
@pytest.fixture
def group_and_vote_fractions_with_pop():
"""Sample group and vote fractions, where every member of the demographic
group votes for the given candidate and 10% of the demographic group's
complement supports the given candidate (i.e., slope = 1, intercept = 0.1),
with an exception of one precinct.
All precincts have population 1000, except one precinct that has population
1 and does not follow the above formula.
"""
group_share = np.array([0, 0.2, 0.4, 0.6, 0.8, 0.9])
vote_share = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 0.9])
populations = np.array([1000, 1000, 1000, 1000, 1000, 1])
return group_share, vote_share, populations
def test_fit(group_and_vote_fractions):
model = GoodmansER()
group_share, vote_share = group_and_vote_fractions
model.fit(group_share, vote_share)
np.testing.assert_almost_equal(model.intercept_, 0)
np.testing.assert_almost_equal(model.slope_, 1)
def test_weighted_fit(group_and_vote_fractions_with_pop):
model = GoodmansER(is_weighted_regression=True)
group_share, vote_share, pops = group_and_vote_fractions_with_pop
model.fit(group_share, vote_share, pops)
np.testing.assert_almost_equal(model.intercept_, 0.1, decimal=3)
np.testing.assert_almost_equal(model.slope_, 1, decimal=3)
def test_summary():
model = GoodmansER()
model.demographic_group_name = "Trees"
model.candidate_name = "Lorax"
model.voting_prefs_est_ = 1.0
model.voting_prefs_complement_est_ = 0.0
expected_summary = """Goodmans ER
Est. fraction of Trees
voters who voted for Lorax is
1.000
Est. fraction of non- Trees
voters who voted for Lorax is
0.000
"""
assert model.summary() == expected_summary
model.is_weighted_regression = True
expected_summary = """Goodmans ER, weighted by population
Est. fraction of Trees
voters who voted for Lorax is
1.000
Est. fraction of non- Trees
voters who voted for Lorax is
0.000
"""
assert model.summary() == expected_summary
def test_plot(group_and_vote_fractions):
model = GoodmansER()
group_share, vote_share = group_and_vote_fractions
model.fit(group_share, vote_share)
_, ax = model.plot()
x_plot, y_plot = ax.lines[0].get_xydata().T
np.testing.assert_allclose(x_plot, y_plot, atol=1e-10)
assert (0.0, 1.0) == ax.get_xlim()
assert (0.0, 1.0) == ax.get_ylim() | 0.726717 | 0.696023 |
import os
import json
import boto3
from boto3.dynamodb.conditions import Key, Attr
from policy import MFAuth
application = os.environ['application']
environment = os.environ['environment']
apps_table_name = '{}-{}-apps'.format(application, environment)
schema_table_name = '{}-{}-schema'.format(application, environment)
waves_table_name = '{}-{}-waves'.format(application, environment)
servers_table_name = '{}-{}-servers'.format(application, environment)
apps_table = boto3.resource('dynamodb').Table(apps_table_name)
schema_table = boto3.resource('dynamodb').Table(schema_table_name)
waves_table = boto3.resource('dynamodb').Table(waves_table_name)
servers_table = boto3.resource('dynamodb').Table(servers_table_name)
def lambda_handler(event, context):
if event['httpMethod'] == 'GET':
resp = apps_table.get_item(Key={'app_id': event['pathParameters']['appid']})
if 'Item' in resp:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'body': json.dumps(resp['Item'])}
else:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': 'app Id: ' + str(event['pathParameters']['appid']) + ' does not exist'}
elif event['httpMethod'] == 'PUT':
auth = MFAuth()
authResponse = auth.getUserAttributePolicy(event)
if authResponse['action'] == 'allow':
try:
body = json.loads(event['body'])
app_attributes = []
if "app_id" in body:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': "You cannot modify app_id, this is managed by the system"}
# check if app id exist
existing_attr = apps_table.get_item(Key={'app_id': event['pathParameters']['appid']})
print(existing_attr)
if 'Item' not in existing_attr:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': 'app Id: ' + str(event['pathParameters']['appid']) + ' does not exist'}
# Validate Wave_id
if 'wave_id' in body:
waves = waves_table.scan()
check = False
for wave in waves['Items']:
if wave['wave_id'] == str(body['wave_id']):
check = True
if check == False:
message = 'wave Id: ' + body['wave_id'] + ' does not exist'
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': message}
# Check if there is a duplicate app_name
apps = apps_table.scan()
for app in apps['Items']:
if 'app_name' in body:
if app['app_name'].lower() == str(body['app_name']).lower() and app['app_id'] != str(event['pathParameters']['appid']):
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': 'app_name: ' + body['app_name'] + ' already exist'}
# Check if attribute is defined in the App schema
for app_schema in schema_table.scan()['Items']:
if app_schema['schema_name'] == "app":
app_attributes = app_schema['attributes']
for key in body.keys():
check = False
for attribute in app_attributes:
if key == attribute['name']:
check = True
if check == False:
message = "App attribute: " + key + " is not defined in the App schema"
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': message}
# Check if attribute in the body matches the list value defined in schema
for attribute in app_attributes:
if 'listvalue' in attribute:
listvalue = attribute['listvalue'].split(',')
for key in body.keys():
if key == attribute['name']:
if body[key] not in listvalue:
message = "App attribute " + key + " for app " + body['app_name'] + " is '" + body[key] + "', does not match the list values '" + attribute['listvalue'] + "' defined in the App schema"
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': message}
# Merge new attributes with existing one
for key in body.keys():
existing_attr['Item'][key] = body[key]
print(existing_attr)
resp = apps_table.put_item(
Item=existing_attr['Item']
)
return {'headers': {'Access-Control-Allow-Origin': '*'},
'body': json.dumps(resp)}
except Exception as e:
print(e)
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': 'malformed json input'}
else:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 401,
'body': json.dumps(authResponse)}
elif event['httpMethod'] == 'DELETE':
auth = MFAuth()
authResponse = auth.getUserResourceCrationPolicy(event)
if authResponse['action'] == 'allow':
resp = apps_table.get_item(Key={'app_id': event['pathParameters']['appid']})
if 'Item' in resp:
respdel = apps_table.delete_item(Key={'app_id': event['pathParameters']['appid']})
if respdel['ResponseMetadata']['HTTPStatusCode'] == 200:
# Remove App Id from servers
servers = servers_table.query(
IndexName='app_id-index',
KeyConditionExpression=Key('app_id').eq(event['pathParameters']['appid'])
)
if servers['Count'] is not 0:
serverids = []
for server in servers['Items']:
serverids.append(str(server['server_id']))
for id in serverids:
serverattr = servers_table.get_item(Key={'server_id': id})
del serverattr['Item']['app_id']
serverupdate = servers_table.put_item(
Item=serverattr['Item']
)
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 200, 'body': "App " + str(resp['Item']) + " was successfully deleted"}
else:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': respdel['ResponseMetadata']['HTTPStatusCode'], 'body': json.dumps(respdel)}
else:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': 'app Id: ' + str(event['pathParameters']['appid']) + ' does not exist'}
else:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 401,
'body': json.dumps(authResponse)} | source/lambda_app_item.py |
import os
import json
import boto3
from boto3.dynamodb.conditions import Key, Attr
from policy import MFAuth
application = os.environ['application']
environment = os.environ['environment']
apps_table_name = '{}-{}-apps'.format(application, environment)
schema_table_name = '{}-{}-schema'.format(application, environment)
waves_table_name = '{}-{}-waves'.format(application, environment)
servers_table_name = '{}-{}-servers'.format(application, environment)
apps_table = boto3.resource('dynamodb').Table(apps_table_name)
schema_table = boto3.resource('dynamodb').Table(schema_table_name)
waves_table = boto3.resource('dynamodb').Table(waves_table_name)
servers_table = boto3.resource('dynamodb').Table(servers_table_name)
def lambda_handler(event, context):
if event['httpMethod'] == 'GET':
resp = apps_table.get_item(Key={'app_id': event['pathParameters']['appid']})
if 'Item' in resp:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'body': json.dumps(resp['Item'])}
else:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': 'app Id: ' + str(event['pathParameters']['appid']) + ' does not exist'}
elif event['httpMethod'] == 'PUT':
auth = MFAuth()
authResponse = auth.getUserAttributePolicy(event)
if authResponse['action'] == 'allow':
try:
body = json.loads(event['body'])
app_attributes = []
if "app_id" in body:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': "You cannot modify app_id, this is managed by the system"}
# check if app id exist
existing_attr = apps_table.get_item(Key={'app_id': event['pathParameters']['appid']})
print(existing_attr)
if 'Item' not in existing_attr:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': 'app Id: ' + str(event['pathParameters']['appid']) + ' does not exist'}
# Validate Wave_id
if 'wave_id' in body:
waves = waves_table.scan()
check = False
for wave in waves['Items']:
if wave['wave_id'] == str(body['wave_id']):
check = True
if check == False:
message = 'wave Id: ' + body['wave_id'] + ' does not exist'
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': message}
# Check if there is a duplicate app_name
apps = apps_table.scan()
for app in apps['Items']:
if 'app_name' in body:
if app['app_name'].lower() == str(body['app_name']).lower() and app['app_id'] != str(event['pathParameters']['appid']):
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': 'app_name: ' + body['app_name'] + ' already exist'}
# Check if attribute is defined in the App schema
for app_schema in schema_table.scan()['Items']:
if app_schema['schema_name'] == "app":
app_attributes = app_schema['attributes']
for key in body.keys():
check = False
for attribute in app_attributes:
if key == attribute['name']:
check = True
if check == False:
message = "App attribute: " + key + " is not defined in the App schema"
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': message}
# Check if attribute in the body matches the list value defined in schema
for attribute in app_attributes:
if 'listvalue' in attribute:
listvalue = attribute['listvalue'].split(',')
for key in body.keys():
if key == attribute['name']:
if body[key] not in listvalue:
message = "App attribute " + key + " for app " + body['app_name'] + " is '" + body[key] + "', does not match the list values '" + attribute['listvalue'] + "' defined in the App schema"
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': message}
# Merge new attributes with existing one
for key in body.keys():
existing_attr['Item'][key] = body[key]
print(existing_attr)
resp = apps_table.put_item(
Item=existing_attr['Item']
)
return {'headers': {'Access-Control-Allow-Origin': '*'},
'body': json.dumps(resp)}
except Exception as e:
print(e)
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': 'malformed json input'}
else:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 401,
'body': json.dumps(authResponse)}
elif event['httpMethod'] == 'DELETE':
auth = MFAuth()
authResponse = auth.getUserResourceCrationPolicy(event)
if authResponse['action'] == 'allow':
resp = apps_table.get_item(Key={'app_id': event['pathParameters']['appid']})
if 'Item' in resp:
respdel = apps_table.delete_item(Key={'app_id': event['pathParameters']['appid']})
if respdel['ResponseMetadata']['HTTPStatusCode'] == 200:
# Remove App Id from servers
servers = servers_table.query(
IndexName='app_id-index',
KeyConditionExpression=Key('app_id').eq(event['pathParameters']['appid'])
)
if servers['Count'] is not 0:
serverids = []
for server in servers['Items']:
serverids.append(str(server['server_id']))
for id in serverids:
serverattr = servers_table.get_item(Key={'server_id': id})
del serverattr['Item']['app_id']
serverupdate = servers_table.put_item(
Item=serverattr['Item']
)
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 200, 'body': "App " + str(resp['Item']) + " was successfully deleted"}
else:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': respdel['ResponseMetadata']['HTTPStatusCode'], 'body': json.dumps(respdel)}
else:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 400, 'body': 'app Id: ' + str(event['pathParameters']['appid']) + ' does not exist'}
else:
return {'headers': {'Access-Control-Allow-Origin': '*'},
'statusCode': 401,
'body': json.dumps(authResponse)} | 0.258607 | 0.054525 |
__all__ = ['convert_examples_to_features', 'preprocess_data', 'ALL_TASKS_DIC', 'EXTRA_METRICS',
'RACEHash', 'ReCoRDHash', 'process_MultiRC_answers', 'process_ReCoRD_answers', 'process_RACE_answers']
# Classes being called directly by finetune_bort.py
import os
import re
import json
import logging
import fnmatch
import mxnet as mx
import gluonnlp as nlp
import numpy as np
from collections import OrderedDict
from functools import partial
from mxnet import gluon, nd
from gluonnlp.base import get_home_dir
from data.preprocessing_utils import truncate_seqs_equal, concat_sequences
from data.classification import MRPCTask, QQPTask, RTETask, STSBTask, SSTTask
from data.classification import QNLITask, CoLATask, MNLITask, WNLITask
from data.classification import SuperGLUERTETask, CBTask, WSCTask, WiCTask, COPATask
from data.classification import MultiRCTask, BoolQTask, ReCoRDTask, AXbTask, AXgTask
from data.classification import RACEHTask, RACEMTask
from .metrics import AvgF1, GP
ALL_TASKS_DIC = {
'MRPC': MRPCTask(),
'QQP': QQPTask(),
'QNLI': QNLITask(),
'RTE': RTETask(),
'STS-B': STSBTask(),
'CoLA': CoLATask(),
'MNLI': MNLITask(),
'WNLI': WNLITask(),
'SST': SSTTask(),
'SuperGLUERTE': SuperGLUERTETask(),
'CB': CBTask(),
'WSC': WSCTask(),
"WiC": WiCTask(),
"COPA": COPATask(),
"MultiRC": MultiRCTask(),
"BoolQ": BoolQTask(),
"ReCoRD": ReCoRDTask(),
"AXb": AXbTask(),
"AXg": AXgTask(),
"RACE-H": RACEHTask(),
"RACE-M": RACEMTask()
}
EXTRA_METRICS = {
"CB": [("avg_f1", AvgF1())],
"AXg": [("GP", GP())],
}
def do_log(batch_id, batch_num, metric, step_loss, log_interval, epoch_id=None, learning_rate=None):
"""Generate and print out the log messages. """
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm, metric_val = [metric_nm], [metric_val]
if epoch_id is None:
eval_str = '[Batch %d/%d] loss=%.4f, metrics:' + \
','.join([i + ':%.4f' for i in metric_nm])
logging.info(eval_str, batch_id + 1, batch_num,
step_loss / log_interval, *metric_val)
else:
train_str = '[Epoch %d Batch %d/%d] loss=%.4f, lr=%.10f, metrics:' + \
','.join([i + ':%.4f' for i in metric_nm])
logging.info(train_str, epoch_id + 1, batch_id + 1, batch_num, step_loss / log_interval,
learning_rate, *metric_val)
def convert_examples_to_features(example, tokenizer=None, truncate_length=512, cls_token=None,
sep_token=None, class_labels=None, label_alias=None, vocab=None,
is_test=False):
"""Convert GLUE/SuperGLUE classification and regression examples into
the necessary features"""
if not is_test:
label_dtype = 'int32' if class_labels else 'float32'
example, label = example[:-1], example[-1]
# create label maps if classification task
if class_labels:
label_map = {}
for (i, l) in enumerate(class_labels):
label_map[l] = i
if label_alias:
for key in label_alias:
label_map[key] = label_map[label_alias[key]]
# Fix for BoolQ, WSC, and MultiRC, json values get loaded as boolean and not as string
# assignments.
if type(label) == bool:
label = "true" if label else "false"
# Fix for COPA
if type(label) == int:
label = "0" if label == 0 else "1"
label = label_map[label]
label = np.array([label], dtype=label_dtype)
tokens_raw = [tokenizer(l) for l in example]
tokens_trun = truncate_seqs_equal(tokens_raw, truncate_length)
tokens_trun[0] = [cls_token] + tokens_trun[0]
tokens, segment_ids, _ = concat_sequences(
tokens_trun, [[sep_token]] * len(tokens_trun))
input_ids = vocab[tokens]
valid_length = len(input_ids)
if not is_test:
return input_ids, segment_ids, valid_length, label
else:
return input_ids, segment_ids, valid_length
def preprocess_data(tokenizer, task, batch_size, dev_batch_size, max_len, vocab, world_size=None):
"""Train/eval Data preparation function."""
label_dtype = 'int32' if task.class_labels else 'float32'
truncate_length = max_len - 3 if task.is_pair else max_len - 2
trans = partial(convert_examples_to_features, tokenizer=tokenizer,
truncate_length=truncate_length,
cls_token=vocab.bos_token,
sep_token=vocab.eos_token,
class_labels=task.class_labels,
label_alias=task.label_alias, vocab=vocab)
# task.dataset_train returns (segment_name, dataset)
train_tsv = task.dataset_train()[1]
data_train = mx.gluon.data.SimpleDataset(list(map(trans, train_tsv)))
data_train_len = data_train.transform(lambda _, segment_ids, valid_length, label: valid_length,
lazy=False)
# bucket sampler for training
pad_val = vocab[vocab.padding_token]
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0, pad_val=pad_val),
nlp.data.batchify.Pad(axis=0, pad_val=0),
nlp.data.batchify.Stack(), # length
nlp.data.batchify.Stack(label_dtype)) # label
if world_size is not None:
batch_sampler = nlp.data.sampler.FixedBucketSampler(data_train_len, batch_size=batch_size,
num_buckets=15, ratio=0, shuffle=True,
num_shards=world_size)
loader_train = nlp.data.ShardedDataLoader(dataset=data_train, num_workers=4,
batch_sampler=batch_sampler, batchify_fn=batchify_fn)
else:
batch_sampler = nlp.data.sampler.FixedBucketSampler(data_train_len, batch_size=batch_size,
num_buckets=15, ratio=0, shuffle=True)
loader_train = mx.gluon.data.DataLoader(dataset=data_train, num_workers=4,
batch_sampler=batch_sampler, batchify_fn=batchify_fn)
# data dev. For MNLI, more than one dev set is available
dev_tsv = task.dataset_dev()
dev_tsv_list = dev_tsv if isinstance(dev_tsv, list) else [dev_tsv]
loader_dev_list = []
for segment, data in dev_tsv_list:
data_dev = mx.gluon.data.SimpleDataset(list(map(trans, data)))
loader_dev = mx.gluon.data.DataLoader(data_dev, batch_size=dev_batch_size, num_workers=4,
shuffle=False, batchify_fn=batchify_fn)
loader_dev_list.append((segment, loader_dev))
# batchify for data test
test_batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0, pad_val=pad_val),
nlp.data.batchify.Pad(axis=0, pad_val=0),
nlp.data.batchify.Stack())
# transform for data test
test_trans = partial(convert_examples_to_features, tokenizer=tokenizer, truncate_length=truncate_length,
cls_token=vocab.bos_token,
sep_token=vocab.eos_token,
class_labels=None, is_test=True, vocab=vocab)
# data test. For MNLI, more than one test set is available
test_tsv = task.dataset_test()
test_tsv_list = test_tsv if isinstance(test_tsv, list) else [test_tsv]
loader_test_list = []
for segment, data in test_tsv_list:
data_test = mx.gluon.data.SimpleDataset(list(map(test_trans, data)))
loader_test = mx.gluon.data.DataLoader(data_test, batch_size=dev_batch_size, num_workers=4,
shuffle=False, batchify_fn=test_batchify_fn)
loader_test_list.append((segment, loader_test))
# Return data_dev for ReCoRD and MultiRC
return loader_train, data_dev, loader_dev_list, loader_test_list, len(data_train)
def MultiRCHash(test_dataset_location):
""" MultiRC has multiple nested points. Return a list of dictionaries with
the predictions to fill out.
"""
dataset = [json.loads(l, object_pairs_hook=OrderedDict)
for l in open(test_dataset_location, 'r', encoding='utf8').readlines()]
line_dict = [(l["idx"], l["passage"]) for l in dataset]
lines = []
for idx, line in line_dict:
questions = line["questions"]
line_hashes = {"idx": idx, "passage": {"questions": []}}
for question in questions:
question_dict = {"idx": question["idx"], "answers": []}
for answer in question["answers"]:
question_dict["answers"].append(
{"idx": answer["idx"], "label": 0})
line_hashes["passage"]["questions"].append(question_dict)
lines.append(line_hashes)
return lines
def ReCoRDHash(dataset_location):
""" Because of the way we've setup ReCoRD, we need to figure out a way to translate
it back into a viable answer.
"""
dataset = [json.loads(l, object_pairs_hook=OrderedDict)
for l in open(dataset_location, 'r', encoding='utf8').readlines()]
is_test = "test" in dataset_location
all_lines = [(l["idx"], l["passage"], l["qas"]) for l in dataset]
lines = {}
for idx, line, qas in all_lines:
entities = sorted(
set([line["text"][e["start"]:e["end"] + 1] for e in line["entities"]]))
for question in qas:
tmp_lines = []
answers = None if is_test else [
ans["text"] for ans in question["answers"]]
for entity in entities:
is_answer = False
if not is_test:
is_answer = entity in answers
tmp_lines.append(
{"idx": question["idx"], "label": entity, "is_answer": is_answer})
lines[question["idx"]] = tmp_lines
return lines
def RACEHash(dataset_location, task_name, segment='test'):
""" Because of the way we've setup RACE-H/RACE-M, we need to figure out a way to
translate it back into a viable answer.
"""
if dataset_location is None:
dataset_location = os.path.join(get_home_dir(), 'datasets', 'race')
task = "high" if task_name[-1] == "H" else "middle"
test_dataset_location = os.path.join(dataset_location, segment, task)
filenames = [os.path.expanduser(f) for f in os.listdir(
test_dataset_location) if fnmatch.fnmatch(f, '*.txt')]
filenames.sort()
dataset = []
for f in filenames:
dataset += [json.loads(l, object_pairs_hook=OrderedDict)
for l in open(os.path.join(test_dataset_location, f), 'r').readlines()]
return dataset
def process_ReCoRD_answers(results, result_data):
# In practice we should get the max confidence over the question space.
# First assign label and confidence to every single point on the set, then
# prune out low-confidence elements.
tmp_results = []
start_index = 0
preds, label = [], []
for i in range(len(result_data)):
candidate_result_array = result_data[i]
results_subarray = results[
start_index:start_index + len(candidate_result_array)]
idx, max_confidence = 0, -np.inf
backup_idx, backup_max_confidence = 0, -np.inf
for j in range(len(results_subarray)):
score, logits = results_subarray[j][0], results_subarray[j][-1]
if score == 1 and logits[-1] > max_confidence:
idx = j
else:
if logits[-1] > backup_max_confidence:
backup_idx = j
backup_max_confidence = logits[-1]
if max_confidence == -np.inf:
idx = backup_idx
chosen_candidate = candidate_result_array[idx]
preds.append(chosen_candidate["label"])
label.append(chosen_candidate["label"] if candidate_result_array[
idx]["is_answer"] else "glorp")
chosen_candidate.pop("is_answer", None)
tmp_results.append(chosen_candidate)
start_index = start_index + len(results_subarray)
# This number is meaningless in test (all eval to False), and
# might have high false negatives
score = sum([p == l for (p, l) in zip(preds, label)]) / len(preds)
return tmp_results, score
def process_MultiRC_answers(results, test_dataset_location):
# "Re-roll" the unrolled prediction vector into the required format.
result_data = MultiRCHash(test_dataset_location)
p_idx, q_idx, a_idx = 0, 0, 0
for label in results:
if len(result_data[p_idx]["passage"]["questions"][q_idx]["answers"]) == a_idx:
a_idx = 0
q_idx += 1
if len(result_data[p_idx]["passage"]["questions"]) == q_idx:
q_idx = 0
p_idx += 1
result_data[p_idx]["passage"]["questions"][
q_idx]["answers"][a_idx]["label"] = int(label)
a_idx += 1
return result_data
def process_RACE_answers(result_data, results):
# In practice we should get the max confidence over the question space.
# First assign label and confidence to every single point on the set, then
# prune out low-confidence elements.
IDX_TO_ANSWER = {"0": "A", "1": "B", "2": "C", "3": "D"}
tmp_results = []
start_index = 0
total, correct = 0, 0
for line in result_data:
new_line = {k: v for k, v in line.items()}
new_line["answers"] = []
for question, prediction in zip(new_line["questions"], results[start_index:start_index + len(line["answers"])]):
label = IDX_TO_ANSWER[prediction[0]]
new_line["answers"].append(label)
start_index += len(line["answers"])
if "answers" in line:
for pred, label in zip(new_line["answers"], line["answers"]):
if pred == label:
correct += 1
total += 1
tmp_results.append(new_line)
class_accuracy = correct / total if total != 0 else 0
# Class accuracy is bugged, but we only need the actual accuracy anyway
return tmp_results, class_accuracy | utils/finetune_utils.py |
__all__ = ['convert_examples_to_features', 'preprocess_data', 'ALL_TASKS_DIC', 'EXTRA_METRICS',
'RACEHash', 'ReCoRDHash', 'process_MultiRC_answers', 'process_ReCoRD_answers', 'process_RACE_answers']
# Classes being called directly by finetune_bort.py
import os
import re
import json
import logging
import fnmatch
import mxnet as mx
import gluonnlp as nlp
import numpy as np
from collections import OrderedDict
from functools import partial
from mxnet import gluon, nd
from gluonnlp.base import get_home_dir
from data.preprocessing_utils import truncate_seqs_equal, concat_sequences
from data.classification import MRPCTask, QQPTask, RTETask, STSBTask, SSTTask
from data.classification import QNLITask, CoLATask, MNLITask, WNLITask
from data.classification import SuperGLUERTETask, CBTask, WSCTask, WiCTask, COPATask
from data.classification import MultiRCTask, BoolQTask, ReCoRDTask, AXbTask, AXgTask
from data.classification import RACEHTask, RACEMTask
from .metrics import AvgF1, GP
ALL_TASKS_DIC = {
'MRPC': MRPCTask(),
'QQP': QQPTask(),
'QNLI': QNLITask(),
'RTE': RTETask(),
'STS-B': STSBTask(),
'CoLA': CoLATask(),
'MNLI': MNLITask(),
'WNLI': WNLITask(),
'SST': SSTTask(),
'SuperGLUERTE': SuperGLUERTETask(),
'CB': CBTask(),
'WSC': WSCTask(),
"WiC": WiCTask(),
"COPA": COPATask(),
"MultiRC": MultiRCTask(),
"BoolQ": BoolQTask(),
"ReCoRD": ReCoRDTask(),
"AXb": AXbTask(),
"AXg": AXgTask(),
"RACE-H": RACEHTask(),
"RACE-M": RACEMTask()
}
EXTRA_METRICS = {
"CB": [("avg_f1", AvgF1())],
"AXg": [("GP", GP())],
}
def do_log(batch_id, batch_num, metric, step_loss, log_interval, epoch_id=None, learning_rate=None):
"""Generate and print out the log messages. """
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm, metric_val = [metric_nm], [metric_val]
if epoch_id is None:
eval_str = '[Batch %d/%d] loss=%.4f, metrics:' + \
','.join([i + ':%.4f' for i in metric_nm])
logging.info(eval_str, batch_id + 1, batch_num,
step_loss / log_interval, *metric_val)
else:
train_str = '[Epoch %d Batch %d/%d] loss=%.4f, lr=%.10f, metrics:' + \
','.join([i + ':%.4f' for i in metric_nm])
logging.info(train_str, epoch_id + 1, batch_id + 1, batch_num, step_loss / log_interval,
learning_rate, *metric_val)
def convert_examples_to_features(example, tokenizer=None, truncate_length=512, cls_token=None,
sep_token=None, class_labels=None, label_alias=None, vocab=None,
is_test=False):
"""Convert GLUE/SuperGLUE classification and regression examples into
the necessary features"""
if not is_test:
label_dtype = 'int32' if class_labels else 'float32'
example, label = example[:-1], example[-1]
# create label maps if classification task
if class_labels:
label_map = {}
for (i, l) in enumerate(class_labels):
label_map[l] = i
if label_alias:
for key in label_alias:
label_map[key] = label_map[label_alias[key]]
# Fix for BoolQ, WSC, and MultiRC, json values get loaded as boolean and not as string
# assignments.
if type(label) == bool:
label = "true" if label else "false"
# Fix for COPA
if type(label) == int:
label = "0" if label == 0 else "1"
label = label_map[label]
label = np.array([label], dtype=label_dtype)
tokens_raw = [tokenizer(l) for l in example]
tokens_trun = truncate_seqs_equal(tokens_raw, truncate_length)
tokens_trun[0] = [cls_token] + tokens_trun[0]
tokens, segment_ids, _ = concat_sequences(
tokens_trun, [[sep_token]] * len(tokens_trun))
input_ids = vocab[tokens]
valid_length = len(input_ids)
if not is_test:
return input_ids, segment_ids, valid_length, label
else:
return input_ids, segment_ids, valid_length
def preprocess_data(tokenizer, task, batch_size, dev_batch_size, max_len, vocab, world_size=None):
"""Train/eval Data preparation function."""
label_dtype = 'int32' if task.class_labels else 'float32'
truncate_length = max_len - 3 if task.is_pair else max_len - 2
trans = partial(convert_examples_to_features, tokenizer=tokenizer,
truncate_length=truncate_length,
cls_token=vocab.bos_token,
sep_token=vocab.eos_token,
class_labels=task.class_labels,
label_alias=task.label_alias, vocab=vocab)
# task.dataset_train returns (segment_name, dataset)
train_tsv = task.dataset_train()[1]
data_train = mx.gluon.data.SimpleDataset(list(map(trans, train_tsv)))
data_train_len = data_train.transform(lambda _, segment_ids, valid_length, label: valid_length,
lazy=False)
# bucket sampler for training
pad_val = vocab[vocab.padding_token]
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0, pad_val=pad_val),
nlp.data.batchify.Pad(axis=0, pad_val=0),
nlp.data.batchify.Stack(), # length
nlp.data.batchify.Stack(label_dtype)) # label
if world_size is not None:
batch_sampler = nlp.data.sampler.FixedBucketSampler(data_train_len, batch_size=batch_size,
num_buckets=15, ratio=0, shuffle=True,
num_shards=world_size)
loader_train = nlp.data.ShardedDataLoader(dataset=data_train, num_workers=4,
batch_sampler=batch_sampler, batchify_fn=batchify_fn)
else:
batch_sampler = nlp.data.sampler.FixedBucketSampler(data_train_len, batch_size=batch_size,
num_buckets=15, ratio=0, shuffle=True)
loader_train = mx.gluon.data.DataLoader(dataset=data_train, num_workers=4,
batch_sampler=batch_sampler, batchify_fn=batchify_fn)
# data dev. For MNLI, more than one dev set is available
dev_tsv = task.dataset_dev()
dev_tsv_list = dev_tsv if isinstance(dev_tsv, list) else [dev_tsv]
loader_dev_list = []
for segment, data in dev_tsv_list:
data_dev = mx.gluon.data.SimpleDataset(list(map(trans, data)))
loader_dev = mx.gluon.data.DataLoader(data_dev, batch_size=dev_batch_size, num_workers=4,
shuffle=False, batchify_fn=batchify_fn)
loader_dev_list.append((segment, loader_dev))
# batchify for data test
test_batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0, pad_val=pad_val),
nlp.data.batchify.Pad(axis=0, pad_val=0),
nlp.data.batchify.Stack())
# transform for data test
test_trans = partial(convert_examples_to_features, tokenizer=tokenizer, truncate_length=truncate_length,
cls_token=vocab.bos_token,
sep_token=vocab.eos_token,
class_labels=None, is_test=True, vocab=vocab)
# data test. For MNLI, more than one test set is available
test_tsv = task.dataset_test()
test_tsv_list = test_tsv if isinstance(test_tsv, list) else [test_tsv]
loader_test_list = []
for segment, data in test_tsv_list:
data_test = mx.gluon.data.SimpleDataset(list(map(test_trans, data)))
loader_test = mx.gluon.data.DataLoader(data_test, batch_size=dev_batch_size, num_workers=4,
shuffle=False, batchify_fn=test_batchify_fn)
loader_test_list.append((segment, loader_test))
# Return data_dev for ReCoRD and MultiRC
return loader_train, data_dev, loader_dev_list, loader_test_list, len(data_train)
def MultiRCHash(test_dataset_location):
""" MultiRC has multiple nested points. Return a list of dictionaries with
the predictions to fill out.
"""
dataset = [json.loads(l, object_pairs_hook=OrderedDict)
for l in open(test_dataset_location, 'r', encoding='utf8').readlines()]
line_dict = [(l["idx"], l["passage"]) for l in dataset]
lines = []
for idx, line in line_dict:
questions = line["questions"]
line_hashes = {"idx": idx, "passage": {"questions": []}}
for question in questions:
question_dict = {"idx": question["idx"], "answers": []}
for answer in question["answers"]:
question_dict["answers"].append(
{"idx": answer["idx"], "label": 0})
line_hashes["passage"]["questions"].append(question_dict)
lines.append(line_hashes)
return lines
def ReCoRDHash(dataset_location):
""" Because of the way we've setup ReCoRD, we need to figure out a way to translate
it back into a viable answer.
"""
dataset = [json.loads(l, object_pairs_hook=OrderedDict)
for l in open(dataset_location, 'r', encoding='utf8').readlines()]
is_test = "test" in dataset_location
all_lines = [(l["idx"], l["passage"], l["qas"]) for l in dataset]
lines = {}
for idx, line, qas in all_lines:
entities = sorted(
set([line["text"][e["start"]:e["end"] + 1] for e in line["entities"]]))
for question in qas:
tmp_lines = []
answers = None if is_test else [
ans["text"] for ans in question["answers"]]
for entity in entities:
is_answer = False
if not is_test:
is_answer = entity in answers
tmp_lines.append(
{"idx": question["idx"], "label": entity, "is_answer": is_answer})
lines[question["idx"]] = tmp_lines
return lines
def RACEHash(dataset_location, task_name, segment='test'):
""" Because of the way we've setup RACE-H/RACE-M, we need to figure out a way to
translate it back into a viable answer.
"""
if dataset_location is None:
dataset_location = os.path.join(get_home_dir(), 'datasets', 'race')
task = "high" if task_name[-1] == "H" else "middle"
test_dataset_location = os.path.join(dataset_location, segment, task)
filenames = [os.path.expanduser(f) for f in os.listdir(
test_dataset_location) if fnmatch.fnmatch(f, '*.txt')]
filenames.sort()
dataset = []
for f in filenames:
dataset += [json.loads(l, object_pairs_hook=OrderedDict)
for l in open(os.path.join(test_dataset_location, f), 'r').readlines()]
return dataset
def process_ReCoRD_answers(results, result_data):
# In practice we should get the max confidence over the question space.
# First assign label and confidence to every single point on the set, then
# prune out low-confidence elements.
tmp_results = []
start_index = 0
preds, label = [], []
for i in range(len(result_data)):
candidate_result_array = result_data[i]
results_subarray = results[
start_index:start_index + len(candidate_result_array)]
idx, max_confidence = 0, -np.inf
backup_idx, backup_max_confidence = 0, -np.inf
for j in range(len(results_subarray)):
score, logits = results_subarray[j][0], results_subarray[j][-1]
if score == 1 and logits[-1] > max_confidence:
idx = j
else:
if logits[-1] > backup_max_confidence:
backup_idx = j
backup_max_confidence = logits[-1]
if max_confidence == -np.inf:
idx = backup_idx
chosen_candidate = candidate_result_array[idx]
preds.append(chosen_candidate["label"])
label.append(chosen_candidate["label"] if candidate_result_array[
idx]["is_answer"] else "glorp")
chosen_candidate.pop("is_answer", None)
tmp_results.append(chosen_candidate)
start_index = start_index + len(results_subarray)
# This number is meaningless in test (all eval to False), and
# might have high false negatives
score = sum([p == l for (p, l) in zip(preds, label)]) / len(preds)
return tmp_results, score
def process_MultiRC_answers(results, test_dataset_location):
# "Re-roll" the unrolled prediction vector into the required format.
result_data = MultiRCHash(test_dataset_location)
p_idx, q_idx, a_idx = 0, 0, 0
for label in results:
if len(result_data[p_idx]["passage"]["questions"][q_idx]["answers"]) == a_idx:
a_idx = 0
q_idx += 1
if len(result_data[p_idx]["passage"]["questions"]) == q_idx:
q_idx = 0
p_idx += 1
result_data[p_idx]["passage"]["questions"][
q_idx]["answers"][a_idx]["label"] = int(label)
a_idx += 1
return result_data
def process_RACE_answers(result_data, results):
# In practice we should get the max confidence over the question space.
# First assign label and confidence to every single point on the set, then
# prune out low-confidence elements.
IDX_TO_ANSWER = {"0": "A", "1": "B", "2": "C", "3": "D"}
tmp_results = []
start_index = 0
total, correct = 0, 0
for line in result_data:
new_line = {k: v for k, v in line.items()}
new_line["answers"] = []
for question, prediction in zip(new_line["questions"], results[start_index:start_index + len(line["answers"])]):
label = IDX_TO_ANSWER[prediction[0]]
new_line["answers"].append(label)
start_index += len(line["answers"])
if "answers" in line:
for pred, label in zip(new_line["answers"], line["answers"]):
if pred == label:
correct += 1
total += 1
tmp_results.append(new_line)
class_accuracy = correct / total if total != 0 else 0
# Class accuracy is bugged, but we only need the actual accuracy anyway
return tmp_results, class_accuracy | 0.679179 | 0.317083 |
import gzip
import pickle
import os
from gensim.models.doc2vec import TaggedDocument
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.abspath(os.path.join(os.path.join(CURRENT_DIR, '..'), 'data'))
MODEL_DIR = os.path.join(DATA_DIR, 'models')
VOCAB_DIR = os.path.join(DATA_DIR, 'vocabs')
PICKLES_DIR = os.path.join(DATA_DIR, 'pickles')
def read(path):
with open(path, 'rb') as file:
raw = file.read()
return raw
def load(path):
file = gzip.open(path,'rb')
obj = pickle.load(file)
file.close()
return obj
def save(object, path):
file = gzip.open(path,'wb')
pickle.dump(object, file)
file.close()
def load_multi(path):
file = gzip.open(path,'rb')
objs = []
while True:
try: objs.append(pickle.load(file))
except EOFError: break
file.close()
return objs
def load_vocab(path):
file = gzip.open(path,'rb')
objs = {}
while True:
try:
obj = pickle.load(file)
vocab = {word[0]:word[1] for word in obj[1]}
objs.update(vocab)
except EOFError: break
file.close()
return objs
def read_vocab(path):
file = gzip.open(path,'rb')
objs = []
while True:
try:
obj = pickle.load(file)
objs.extend([obj[0]])
except EOFError: break
file.close()
return objs
def save_multi(object, path):
file = gzip.open(path,'ab')
pickle.dump(object, file)
file.close()
def save_subvocab(dir, main_vocab, percents):
more_half = int(100/(100-percents))
less_half = int(100/percents)
name = 'vocab%d.data'%percents
path = os.path.join(dir, name)
new = gzip.open(path,'ab')
main = gzip.open(main_vocab,'rb')
index = 0
while True:
try:
obj = pickle.load(main)
if less_half > 1:
if index % less_half == 0:
pickle.dump(obj, new)
else:
if index % more_half != 0:
pickle.dump(obj, new)
index += 1
except EOFError: break
main.close()
new.close()
def load_docs(path, random_docs=False, count=-1, full_word=0):
file = gzip.open(path,'rb')
documents = []
index = 0
while True:
try:
obj = pickle.load(file)
name = obj[0]
words = [word[full_word] for word in obj[1]]
documents.append(TaggedDocument(words, [name[:-4].strip()]))
if count >= 0 and index >= count:
break
index += 1
except EOFError:
break
file.close()
if random_docs:
shuffle(documents)
if count >= 0:
documents = documents[:count]
return documents | scripts/manager.py | import gzip
import pickle
import os
from gensim.models.doc2vec import TaggedDocument
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.abspath(os.path.join(os.path.join(CURRENT_DIR, '..'), 'data'))
MODEL_DIR = os.path.join(DATA_DIR, 'models')
VOCAB_DIR = os.path.join(DATA_DIR, 'vocabs')
PICKLES_DIR = os.path.join(DATA_DIR, 'pickles')
def read(path):
with open(path, 'rb') as file:
raw = file.read()
return raw
def load(path):
file = gzip.open(path,'rb')
obj = pickle.load(file)
file.close()
return obj
def save(object, path):
file = gzip.open(path,'wb')
pickle.dump(object, file)
file.close()
def load_multi(path):
file = gzip.open(path,'rb')
objs = []
while True:
try: objs.append(pickle.load(file))
except EOFError: break
file.close()
return objs
def load_vocab(path):
file = gzip.open(path,'rb')
objs = {}
while True:
try:
obj = pickle.load(file)
vocab = {word[0]:word[1] for word in obj[1]}
objs.update(vocab)
except EOFError: break
file.close()
return objs
def read_vocab(path):
file = gzip.open(path,'rb')
objs = []
while True:
try:
obj = pickle.load(file)
objs.extend([obj[0]])
except EOFError: break
file.close()
return objs
def save_multi(object, path):
file = gzip.open(path,'ab')
pickle.dump(object, file)
file.close()
def save_subvocab(dir, main_vocab, percents):
more_half = int(100/(100-percents))
less_half = int(100/percents)
name = 'vocab%d.data'%percents
path = os.path.join(dir, name)
new = gzip.open(path,'ab')
main = gzip.open(main_vocab,'rb')
index = 0
while True:
try:
obj = pickle.load(main)
if less_half > 1:
if index % less_half == 0:
pickle.dump(obj, new)
else:
if index % more_half != 0:
pickle.dump(obj, new)
index += 1
except EOFError: break
main.close()
new.close()
def load_docs(path, random_docs=False, count=-1, full_word=0):
file = gzip.open(path,'rb')
documents = []
index = 0
while True:
try:
obj = pickle.load(file)
name = obj[0]
words = [word[full_word] for word in obj[1]]
documents.append(TaggedDocument(words, [name[:-4].strip()]))
if count >= 0 and index >= count:
break
index += 1
except EOFError:
break
file.close()
if random_docs:
shuffle(documents)
if count >= 0:
documents = documents[:count]
return documents | 0.06777 | 0.07024 |
"""Define graph entity."""
import abc
from collections import OrderedDict
from typing import List
from mindinsight.mindconverter.common.log import logger as log
from mindinsight.mindconverter.graph_based_converter.constant import InputType
from mindinsight.mindconverter.common.exceptions import NodeInputTypeNotSupportError
class GraphParser(metaclass=abc.ABCMeta):
"""Graph parser."""
@classmethod
@abc.abstractmethod
def parse(cls, model_path: str, **kwargs):
"""Parse graph into readable format."""
class BaseGraph(metaclass=abc.ABCMeta):
"""Define basic graph."""
_REQUIRED_PARAM_OF_MODEL = "model"
@abc.abstractmethod
def build(self):
"""Build graph."""
@abc.abstractmethod
def sub_graph_merging(self):
"""Merge split nodes into one."""
@staticmethod
@abc.abstractmethod
def load_checkpoint(ckpt_path: str) -> dict:
"""Load checkpoint file."""
@staticmethod
@abc.abstractmethod
def load_metadata(**kwargs):
"""Load graph metadata."""
@staticmethod
@abc.abstractmethod
def load_graph(graph_path: str, **kwargs):
"""Load graph file."""
@classmethod
@abc.abstractmethod
def load(cls, model_path: str, **kwargs):
"""Factory method to initialize an graph object."""
def __new__(cls, *args, **kwargs):
"""Control the create action of graph."""
model_param = args[0] if args else kwargs.get(
cls._REQUIRED_PARAM_OF_MODEL)
if not model_param:
error = ValueError(f"`{cls._REQUIRED_PARAM_OF_MODEL}` "
f"can not be None.")
log.error(str(error))
log.exception(error)
raise error
return super(BaseGraph, cls).__new__(cls)
class Graph(BaseGraph, abc.ABC):
"""
Define Factory method to create Graph sub-class.
Args:
model (Union[torch.nn.Module, Any]): Graph file.
checkpoint (dict): Checkpoint path.
"""
sorted = False
def __init__(self, model, **kwargs):
super(Graph, self).__init__()
self.model = model
self._raw_input_nodes = kwargs.get("input_nodes")
self._raw_output_nodes = kwargs.get("output_nodes")
self._nodes_collection = OrderedDict()
self._nodes_record = dict()
self._shape_dict = dict()
self._input_nodes = []
self._output_nodes = []
self._topological_order = []
self._input_shape = dict()
self._is_multi_opt_graph = False
@property
def user_provided_input_nodes(self) -> List[str]:
"""User provided input_nodes in CLI."""
return list(self._raw_input_nodes.keys())
def get_input_shape(self, name):
"""
Get node input shape.
Args:
name (str): Node name.
Returns:
Union[list, int], shape.
"""
return self._input_shape.get(name)
def get_output_shape(self, name):
"""
Get node output shape.
Args:
name (str): Node name.
Returns:
Union[list, int],
"""
return self._shape_dict.get(name)
def get_input_shape_from_input(self, name):
"""
Get node input shape.
Returns:
list, shape.
"""
return self._input_shape.get(name)
@property
def nodes_in_topological_order(self):
"""
Return nodes in topological order.
Returns:
List[GraphNode], nodes.
"""
if not self.sorted:
self._topological_sort()
return self._topological_order
def _reset_topological_order(self):
"""
Reset topological order queue.
"""
self._topological_order = self._input_nodes[:]
self.sorted = False
def get_node(self, node_name):
"""
Get node reference.
Args:
node_name (str): Node name.
Returns:
GraphNode, node instance.
"""
prefix = node_name.split(":")[0]
if prefix not in self._nodes_collection:
return None
return self._nodes_collection[prefix]
def build(self):
"""Build graph."""
# Collect input nodes and output nodes.
self._collect_ipt_and_opt_nodes()
# Use topological sort to solve nodes order.
self._topological_sort()
def _collect_ipt_and_opt_nodes(self):
"""
Collect input and output nodes in model.
"""
for name, node in self._nodes_collection.items():
if node.in_degree == 0:
self._input_nodes.append(name)
if node.out_degree == 0:
self._output_nodes.append(name)
def _topological_sort(self):
"""Topological sort to arrange nodes order."""
self._reset_topological_order()
def is_connected(src, dst):
"""Judge two node whether are connected."""
for precursor in dst.precursor_nodes:
if src == precursor.split(":")[0]:
return 1
return 0
idx = 0
while idx < len(self._topological_order):
cur_node_name = self._topological_order[idx]
cur_node = self.get_node(cur_node_name)
# `scsr` is abbreviation for `successor`.
for scsr_name in cur_node.successor_nodes:
scsr_node = self.get_node(scsr_name)
scsr_node.cur_in_degree -= is_connected(cur_node_name,
scsr_node)
if scsr_node.cur_in_degree == 0:
self._topological_order.append(scsr_name)
idx += 1
self.sorted = True
def sub_graph_merging(self):
raise NotImplementedError
@staticmethod
def load_checkpoint(ckpt_path: str) -> dict:
raise NotImplementedError
@staticmethod
def load_metadata(**kwargs):
raise NotImplementedError
@staticmethod
def load_graph(graph_path: str, **kwargs):
raise NotImplementedError
@classmethod
def load(cls, model_path: str, **kwargs) -> BaseGraph:
"""
Load third party graph.
Args:
model_path (str): Graph or model file path.
Returns:
cls, graph instance.
"""
src_graph = cls.load_graph(graph_path=model_path, **kwargs)
return cls(src_graph, **kwargs)
class GraphNode(abc.ABC):
"""
Graph node.
Args:
node (torch._C.Node): PyTorch node.
"""
transformed = False
def __init__(self, node):
# Store the edge from precursor.
self.precursor_nodes = []
# Store the edge to successor.
self.successor_nodes = []
# Control dependency.
self._deleted_in_edge = 0
# Source node in ONNX.
self._src_node = node if node else None
# Original operation name in ONNX.
self._op_name = None
self._op_params = dict()
self._scope_name = None
self._op_shape = None
# Node type of current node, e.g. class, module, operation.
self._node_type = None
# Function, class or operation needed args.
self._args_in_code = dict()
# Unique key of node.
self._hash_key = None
# Input shape of current op.
self._ipt_shape = None
# Output shape of current op.
self._opt_shape = None
# Weight of current op.
self._weight = None
# Input variable names.
self._ipt_var_names = list()
# Output variable names.
self._opt_var_names = list()
# Is in multi output graph.
self._is_in_multi_opt_graph = False
@property
def ir_node_name(self):
"""Getter of ir node's name."""
return self._src_node.name
@property
def ir_node_operation(self):
"""Getter of ir node's operation."""
return self._src_node.op_type
@property
def ir_node_inputs(self):
"""Getter of ir node's inputs."""
return list(self._src_node.input_name_list)
@property
def ir_node_outputs(self):
"""Getter of ir node's outputs."""
return list(self._src_node.output_name_list)
@property
def ir_node_precursor(self):
"""Getter of ir node's precursor."""
return [
v.name for _, v in self._src_node.precursor_onnx_node_dict.items()
]
@property
def ir_node_successor(self):
"""Getter of ir node's successor."""
return [
v.name for _, v in self._src_node.successor_onnx_node_dict.items()
]
@property
def weight(self):
return self._weight
@property
def ipt_var_names(self):
return self._ipt_var_names
@ipt_var_names.setter
def ipt_var_names(self, var_names):
self._ipt_var_names = var_names
@property
def opt_var_names(self):
return self._opt_var_names
@opt_var_names.setter
def opt_var_names(self, var_names):
self._opt_var_names = var_names
@staticmethod
def get_opt_var_name(variable_name):
"""
Output variable name.
Returns:
str, variable name.
"""
return f"{variable_name}_opt"
@property
def args_in_code(self):
"""
Args in code.
Returns:
dict, args.
"""
return self._args_in_code
@args_in_code.setter
def args_in_code(self, args):
"""
Setter for args_in_code.
Args:
args (dict): Args.
"""
self._args_in_code = args
@property
def input_shape(self):
"""
Input tensor shape of current node.
Returns:
tuple, tensor shape of input.
"""
return self._ipt_shape
@property
def output_shape(self):
"""
Output tensor shape.
Returns:
tuple, output tensor shape.
"""
return self._opt_shape
def is_empty(self):
"""
Whether is empty.
Returns:
bool, true or false.
"""
return not self._src_node
@property
def node_type(self):
"""Get node type (ONNX op type)."""
return self._node_type
@node_type.setter
def node_type(self, m):
"""
Setter of node_type.
Args:
m (str): Node type.
"""
self._node_type = m
@property
def scope_name(self):
"""
Scope name.
Returns:
str, scope name.
"""
return self._scope_name
@scope_name.setter
def scope_name(self, name):
"""
Setter of scope name.
Args:
name(str): Scope name.
"""
self._scope_name = name
@property
def node_params(self):
"""Get node params (ONNX op params)."""
return self._op_params
@property
def cur_in_degree(self):
"""
Current in-degree.
Returns:
int, current in-degree.
"""
return self.in_degree - self._deleted_in_edge
@cur_in_degree.setter
def cur_in_degree(self, e):
"""
Setter of cur_in_degree.
Args:
e (int): To be update value.
"""
self._deleted_in_edge += self.cur_in_degree - e
@property
def in_degree(self):
"""
Define in-degree.
Returns:
int, in-degree.
"""
return len(self.precursor_nodes)
@property
def out_degree(self):
"""
Define out-degree.
Returns:
int, out-degree.
"""
return len(self.successor_nodes)
@property
@abc.abstractmethod
def hash_key(self):
"""
Generate unique hash key for each node.
Use topological order as key.
"""
@abc.abstractmethod
def _get_raw_params(self, node):
"""Get params in onnx."""
@property
@abc.abstractmethod
def op_name(self):
"""Return op_name."""
@abc.abstractmethod
def replace_with_arg(self, src_arg, tgt_arg):
"""Replace actual parameter with formal parameter."""
@abc.abstractmethod
def _get_arg_name(self, arg, variable_name):
"""Get arg name for func or class."""
@abc.abstractmethod
def clear_args_of_declaration(self):
"""Clear `_args_in_code`."""
@property
@abc.abstractmethod
def real_name(self):
"""Getter of `real_name`."""
@real_name.setter
@abc.abstractmethod
def real_name(self, **kwargs):
"""Setter of `real_name`."""
@abc.abstractmethod
def to_code(self, ipt_args_in_construct: str, variable_name: str, output_var: str, code_fragment):
"""Graph node to MindSpore code."""
@abc.abstractmethod
def to_ir(self):
"""Graph node to ir node."""
@abc.abstractmethod
def add_input_and_output_shape(self, input_shape, output_shape):
"""Add the node input shape."""
@staticmethod
def _generate_ipt_args_settings_in_construct(ipt_args_in_construct, settings):
"""
Generate input with args and settings in construct.
Args:
ipt_args_in_construct (str): Input args in construct.
settings (Setting): Settings in operator.
Returns:
str, args of each node in generated construct statement.
"""
if settings and settings.op_ipt_type:
input_type = settings.op_ipt_type
if input_type == InputType.TENSOR.value:
ipt_args_settings_in_construct = ipt_args_in_construct
elif input_type == InputType.LIST.value:
ipt_args_settings_in_construct = f"({ipt_args_in_construct},)"
else:
raise NodeInputTypeNotSupportError(f"Input type[{input_type}] is not supported now.")
else:
ipt_args_settings_in_construct = ipt_args_in_construct
if settings and settings.op_extra_input:
settings_value = settings.op_extra_input
if settings_value:
settings_in_construct = ', '.join([f"{setting_val}" for _, setting_val in settings_value.items()])
ipt_args_settings_in_construct = ', '.join((ipt_args_settings_in_construct, settings_in_construct))
return ipt_args_settings_in_construct | mindinsight/mindconverter/graph_based_converter/third_party_graph/base.py | """Define graph entity."""
import abc
from collections import OrderedDict
from typing import List
from mindinsight.mindconverter.common.log import logger as log
from mindinsight.mindconverter.graph_based_converter.constant import InputType
from mindinsight.mindconverter.common.exceptions import NodeInputTypeNotSupportError
class GraphParser(metaclass=abc.ABCMeta):
"""Graph parser."""
@classmethod
@abc.abstractmethod
def parse(cls, model_path: str, **kwargs):
"""Parse graph into readable format."""
class BaseGraph(metaclass=abc.ABCMeta):
"""Define basic graph."""
_REQUIRED_PARAM_OF_MODEL = "model"
@abc.abstractmethod
def build(self):
"""Build graph."""
@abc.abstractmethod
def sub_graph_merging(self):
"""Merge split nodes into one."""
@staticmethod
@abc.abstractmethod
def load_checkpoint(ckpt_path: str) -> dict:
"""Load checkpoint file."""
@staticmethod
@abc.abstractmethod
def load_metadata(**kwargs):
"""Load graph metadata."""
@staticmethod
@abc.abstractmethod
def load_graph(graph_path: str, **kwargs):
"""Load graph file."""
@classmethod
@abc.abstractmethod
def load(cls, model_path: str, **kwargs):
"""Factory method to initialize an graph object."""
def __new__(cls, *args, **kwargs):
"""Control the create action of graph."""
model_param = args[0] if args else kwargs.get(
cls._REQUIRED_PARAM_OF_MODEL)
if not model_param:
error = ValueError(f"`{cls._REQUIRED_PARAM_OF_MODEL}` "
f"can not be None.")
log.error(str(error))
log.exception(error)
raise error
return super(BaseGraph, cls).__new__(cls)
class Graph(BaseGraph, abc.ABC):
"""
Define Factory method to create Graph sub-class.
Args:
model (Union[torch.nn.Module, Any]): Graph file.
checkpoint (dict): Checkpoint path.
"""
sorted = False
def __init__(self, model, **kwargs):
super(Graph, self).__init__()
self.model = model
self._raw_input_nodes = kwargs.get("input_nodes")
self._raw_output_nodes = kwargs.get("output_nodes")
self._nodes_collection = OrderedDict()
self._nodes_record = dict()
self._shape_dict = dict()
self._input_nodes = []
self._output_nodes = []
self._topological_order = []
self._input_shape = dict()
self._is_multi_opt_graph = False
@property
def user_provided_input_nodes(self) -> List[str]:
"""User provided input_nodes in CLI."""
return list(self._raw_input_nodes.keys())
def get_input_shape(self, name):
"""
Get node input shape.
Args:
name (str): Node name.
Returns:
Union[list, int], shape.
"""
return self._input_shape.get(name)
def get_output_shape(self, name):
"""
Get node output shape.
Args:
name (str): Node name.
Returns:
Union[list, int],
"""
return self._shape_dict.get(name)
def get_input_shape_from_input(self, name):
"""
Get node input shape.
Returns:
list, shape.
"""
return self._input_shape.get(name)
@property
def nodes_in_topological_order(self):
"""
Return nodes in topological order.
Returns:
List[GraphNode], nodes.
"""
if not self.sorted:
self._topological_sort()
return self._topological_order
def _reset_topological_order(self):
"""
Reset topological order queue.
"""
self._topological_order = self._input_nodes[:]
self.sorted = False
def get_node(self, node_name):
"""
Get node reference.
Args:
node_name (str): Node name.
Returns:
GraphNode, node instance.
"""
prefix = node_name.split(":")[0]
if prefix not in self._nodes_collection:
return None
return self._nodes_collection[prefix]
def build(self):
"""Build graph."""
# Collect input nodes and output nodes.
self._collect_ipt_and_opt_nodes()
# Use topological sort to solve nodes order.
self._topological_sort()
def _collect_ipt_and_opt_nodes(self):
"""
Collect input and output nodes in model.
"""
for name, node in self._nodes_collection.items():
if node.in_degree == 0:
self._input_nodes.append(name)
if node.out_degree == 0:
self._output_nodes.append(name)
def _topological_sort(self):
"""Topological sort to arrange nodes order."""
self._reset_topological_order()
def is_connected(src, dst):
"""Judge two node whether are connected."""
for precursor in dst.precursor_nodes:
if src == precursor.split(":")[0]:
return 1
return 0
idx = 0
while idx < len(self._topological_order):
cur_node_name = self._topological_order[idx]
cur_node = self.get_node(cur_node_name)
# `scsr` is abbreviation for `successor`.
for scsr_name in cur_node.successor_nodes:
scsr_node = self.get_node(scsr_name)
scsr_node.cur_in_degree -= is_connected(cur_node_name,
scsr_node)
if scsr_node.cur_in_degree == 0:
self._topological_order.append(scsr_name)
idx += 1
self.sorted = True
def sub_graph_merging(self):
raise NotImplementedError
@staticmethod
def load_checkpoint(ckpt_path: str) -> dict:
raise NotImplementedError
@staticmethod
def load_metadata(**kwargs):
raise NotImplementedError
@staticmethod
def load_graph(graph_path: str, **kwargs):
raise NotImplementedError
@classmethod
def load(cls, model_path: str, **kwargs) -> BaseGraph:
"""
Load third party graph.
Args:
model_path (str): Graph or model file path.
Returns:
cls, graph instance.
"""
src_graph = cls.load_graph(graph_path=model_path, **kwargs)
return cls(src_graph, **kwargs)
class GraphNode(abc.ABC):
"""
Graph node.
Args:
node (torch._C.Node): PyTorch node.
"""
transformed = False
def __init__(self, node):
# Store the edge from precursor.
self.precursor_nodes = []
# Store the edge to successor.
self.successor_nodes = []
# Control dependency.
self._deleted_in_edge = 0
# Source node in ONNX.
self._src_node = node if node else None
# Original operation name in ONNX.
self._op_name = None
self._op_params = dict()
self._scope_name = None
self._op_shape = None
# Node type of current node, e.g. class, module, operation.
self._node_type = None
# Function, class or operation needed args.
self._args_in_code = dict()
# Unique key of node.
self._hash_key = None
# Input shape of current op.
self._ipt_shape = None
# Output shape of current op.
self._opt_shape = None
# Weight of current op.
self._weight = None
# Input variable names.
self._ipt_var_names = list()
# Output variable names.
self._opt_var_names = list()
# Is in multi output graph.
self._is_in_multi_opt_graph = False
@property
def ir_node_name(self):
"""Getter of ir node's name."""
return self._src_node.name
@property
def ir_node_operation(self):
"""Getter of ir node's operation."""
return self._src_node.op_type
@property
def ir_node_inputs(self):
"""Getter of ir node's inputs."""
return list(self._src_node.input_name_list)
@property
def ir_node_outputs(self):
"""Getter of ir node's outputs."""
return list(self._src_node.output_name_list)
@property
def ir_node_precursor(self):
"""Getter of ir node's precursor."""
return [
v.name for _, v in self._src_node.precursor_onnx_node_dict.items()
]
@property
def ir_node_successor(self):
"""Getter of ir node's successor."""
return [
v.name for _, v in self._src_node.successor_onnx_node_dict.items()
]
@property
def weight(self):
return self._weight
@property
def ipt_var_names(self):
return self._ipt_var_names
@ipt_var_names.setter
def ipt_var_names(self, var_names):
self._ipt_var_names = var_names
@property
def opt_var_names(self):
return self._opt_var_names
@opt_var_names.setter
def opt_var_names(self, var_names):
self._opt_var_names = var_names
@staticmethod
def get_opt_var_name(variable_name):
"""
Output variable name.
Returns:
str, variable name.
"""
return f"{variable_name}_opt"
@property
def args_in_code(self):
"""
Args in code.
Returns:
dict, args.
"""
return self._args_in_code
@args_in_code.setter
def args_in_code(self, args):
"""
Setter for args_in_code.
Args:
args (dict): Args.
"""
self._args_in_code = args
@property
def input_shape(self):
"""
Input tensor shape of current node.
Returns:
tuple, tensor shape of input.
"""
return self._ipt_shape
@property
def output_shape(self):
"""
Output tensor shape.
Returns:
tuple, output tensor shape.
"""
return self._opt_shape
def is_empty(self):
"""
Whether is empty.
Returns:
bool, true or false.
"""
return not self._src_node
@property
def node_type(self):
"""Get node type (ONNX op type)."""
return self._node_type
@node_type.setter
def node_type(self, m):
"""
Setter of node_type.
Args:
m (str): Node type.
"""
self._node_type = m
@property
def scope_name(self):
"""
Scope name.
Returns:
str, scope name.
"""
return self._scope_name
@scope_name.setter
def scope_name(self, name):
"""
Setter of scope name.
Args:
name(str): Scope name.
"""
self._scope_name = name
@property
def node_params(self):
"""Get node params (ONNX op params)."""
return self._op_params
@property
def cur_in_degree(self):
"""
Current in-degree.
Returns:
int, current in-degree.
"""
return self.in_degree - self._deleted_in_edge
@cur_in_degree.setter
def cur_in_degree(self, e):
"""
Setter of cur_in_degree.
Args:
e (int): To be update value.
"""
self._deleted_in_edge += self.cur_in_degree - e
@property
def in_degree(self):
"""
Define in-degree.
Returns:
int, in-degree.
"""
return len(self.precursor_nodes)
@property
def out_degree(self):
"""
Define out-degree.
Returns:
int, out-degree.
"""
return len(self.successor_nodes)
@property
@abc.abstractmethod
def hash_key(self):
"""
Generate unique hash key for each node.
Use topological order as key.
"""
@abc.abstractmethod
def _get_raw_params(self, node):
"""Get params in onnx."""
@property
@abc.abstractmethod
def op_name(self):
"""Return op_name."""
@abc.abstractmethod
def replace_with_arg(self, src_arg, tgt_arg):
"""Replace actual parameter with formal parameter."""
@abc.abstractmethod
def _get_arg_name(self, arg, variable_name):
"""Get arg name for func or class."""
@abc.abstractmethod
def clear_args_of_declaration(self):
"""Clear `_args_in_code`."""
@property
@abc.abstractmethod
def real_name(self):
"""Getter of `real_name`."""
@real_name.setter
@abc.abstractmethod
def real_name(self, **kwargs):
"""Setter of `real_name`."""
@abc.abstractmethod
def to_code(self, ipt_args_in_construct: str, variable_name: str, output_var: str, code_fragment):
"""Graph node to MindSpore code."""
@abc.abstractmethod
def to_ir(self):
"""Graph node to ir node."""
@abc.abstractmethod
def add_input_and_output_shape(self, input_shape, output_shape):
"""Add the node input shape."""
@staticmethod
def _generate_ipt_args_settings_in_construct(ipt_args_in_construct, settings):
"""
Generate input with args and settings in construct.
Args:
ipt_args_in_construct (str): Input args in construct.
settings (Setting): Settings in operator.
Returns:
str, args of each node in generated construct statement.
"""
if settings and settings.op_ipt_type:
input_type = settings.op_ipt_type
if input_type == InputType.TENSOR.value:
ipt_args_settings_in_construct = ipt_args_in_construct
elif input_type == InputType.LIST.value:
ipt_args_settings_in_construct = f"({ipt_args_in_construct},)"
else:
raise NodeInputTypeNotSupportError(f"Input type[{input_type}] is not supported now.")
else:
ipt_args_settings_in_construct = ipt_args_in_construct
if settings and settings.op_extra_input:
settings_value = settings.op_extra_input
if settings_value:
settings_in_construct = ', '.join([f"{setting_val}" for _, setting_val in settings_value.items()])
ipt_args_settings_in_construct = ', '.join((ipt_args_settings_in_construct, settings_in_construct))
return ipt_args_settings_in_construct | 0.940572 | 0.271333 |
__all__ = ['ParticleInterval']
"""
Contains the ParticleInterval class
"""
from panda3d.core import *
from panda3d.direct import *
from direct.directnotify.DirectNotifyGlobal import directNotify
from .Interval import Interval
class ParticleInterval(Interval):
"""
Use this interval when you want to have greater control over a
ParticleEffect. The interval does not register the effect with
the global particle and physics managers, but it does call upon
them to perform its stepping. You should NOT call
particleEffect.start() with an effect that is being controlled
by a ParticleInterval.
"""
# Name counter
particleNum = 1
# create ParticleInterval DirectNotify category
notify = directNotify.newCategory('ParticleInterval')
# Class methods
def __init__(self,
particleEffect,
parent,
worldRelative = 1,
renderParent = None,
duration = 0.0,
softStopT = 0.0,
cleanup = False,
name = None):
"""
particleEffect is a ParticleEffect
parent is a NodePath: this is where the effect will be
parented in the scenegraph
worldRelative is a boolean: this will override 'renderParent'
with render
renderParent is a NodePath: this is where the particles will
be rendered in the scenegraph
duration is a float: for the time
softStopT is a float: no effect if 0.0,
a positive value will count from the
start of the interval,
a negative value will count from the
end of the interval
cleanup is a boolean: if True the effect will be destroyed
and removed from the scenegraph upon
interval completion
set to False if planning on reusing
the interval
name is a string: use this for unique intervals so that
they can be easily found in the taskMgr
"""
# Generate unique name
id = 'Particle-%d' % ParticleInterval.particleNum
ParticleInterval.particleNum += 1
if name == None:
name = id
# Record instance variables
self.particleEffect = particleEffect
self.cleanup = cleanup
if parent != None:
self.particleEffect.reparentTo(parent)
if worldRelative:
renderParent = render
if renderParent:
for particles in self.particleEffect.getParticlesList():
particles.setRenderParent(renderParent.node())
self.__softStopped = False
if softStopT == 0.0:
self.softStopT = duration
elif softStopT < 0.0:
self.softStopT = duration+softStopT
else:
self.softStopT = softStopT
# Initialize superclass
Interval.__init__(self, name, duration)
def __step(self,dt):
if self.particleEffect:
self.particleEffect.accelerate(dt,1,0.05)
def __softStart(self):
if self.particleEffect:
self.particleEffect.softStart()
self.__softStopped = False
def __softStop(self):
if self.particleEffect:
self.particleEffect.softStop()
self.__softStopped = True
def privInitialize(self, t):
if self.state != CInterval.SPaused:
# Restarting from a hard stop or just interrupting the
# current play
self.__softStart()
if self.particleEffect:
self.particleEffect.clearToInitial()
self.currT = 0
if self.particleEffect:
for forceGroup in self.particleEffect.getForceGroupList():
forceGroup.enable()
Interval.privInitialize(self,t)
def privInstant(self):
self.privInitialize(self.getDuration())
self.privFinalize()
def privStep(self, t):
if self.state == CInterval.SPaused or t < self.currT:
# Restarting from a pause.
self.privInitialize(t)
else:
if not self.__softStopped and t > self.softStopT:
self.__step(self.softStopT-self.currT)
self.__softStop()
self.__step(t-self.softStopT)
else:
self.__step(t-self.currT)
Interval.privStep(self,t)
def privFinalize(self):
Interval.privFinalize(self)
if self.cleanup and self.particleEffect:
self.particleEffect.cleanup()
self.particleEffect = None | Panda3D-1.10.0/direct/interval/ParticleInterval.py |
__all__ = ['ParticleInterval']
"""
Contains the ParticleInterval class
"""
from panda3d.core import *
from panda3d.direct import *
from direct.directnotify.DirectNotifyGlobal import directNotify
from .Interval import Interval
class ParticleInterval(Interval):
"""
Use this interval when you want to have greater control over a
ParticleEffect. The interval does not register the effect with
the global particle and physics managers, but it does call upon
them to perform its stepping. You should NOT call
particleEffect.start() with an effect that is being controlled
by a ParticleInterval.
"""
# Name counter
particleNum = 1
# create ParticleInterval DirectNotify category
notify = directNotify.newCategory('ParticleInterval')
# Class methods
def __init__(self,
particleEffect,
parent,
worldRelative = 1,
renderParent = None,
duration = 0.0,
softStopT = 0.0,
cleanup = False,
name = None):
"""
particleEffect is a ParticleEffect
parent is a NodePath: this is where the effect will be
parented in the scenegraph
worldRelative is a boolean: this will override 'renderParent'
with render
renderParent is a NodePath: this is where the particles will
be rendered in the scenegraph
duration is a float: for the time
softStopT is a float: no effect if 0.0,
a positive value will count from the
start of the interval,
a negative value will count from the
end of the interval
cleanup is a boolean: if True the effect will be destroyed
and removed from the scenegraph upon
interval completion
set to False if planning on reusing
the interval
name is a string: use this for unique intervals so that
they can be easily found in the taskMgr
"""
# Generate unique name
id = 'Particle-%d' % ParticleInterval.particleNum
ParticleInterval.particleNum += 1
if name == None:
name = id
# Record instance variables
self.particleEffect = particleEffect
self.cleanup = cleanup
if parent != None:
self.particleEffect.reparentTo(parent)
if worldRelative:
renderParent = render
if renderParent:
for particles in self.particleEffect.getParticlesList():
particles.setRenderParent(renderParent.node())
self.__softStopped = False
if softStopT == 0.0:
self.softStopT = duration
elif softStopT < 0.0:
self.softStopT = duration+softStopT
else:
self.softStopT = softStopT
# Initialize superclass
Interval.__init__(self, name, duration)
def __step(self,dt):
if self.particleEffect:
self.particleEffect.accelerate(dt,1,0.05)
def __softStart(self):
if self.particleEffect:
self.particleEffect.softStart()
self.__softStopped = False
def __softStop(self):
if self.particleEffect:
self.particleEffect.softStop()
self.__softStopped = True
def privInitialize(self, t):
if self.state != CInterval.SPaused:
# Restarting from a hard stop or just interrupting the
# current play
self.__softStart()
if self.particleEffect:
self.particleEffect.clearToInitial()
self.currT = 0
if self.particleEffect:
for forceGroup in self.particleEffect.getForceGroupList():
forceGroup.enable()
Interval.privInitialize(self,t)
def privInstant(self):
self.privInitialize(self.getDuration())
self.privFinalize()
def privStep(self, t):
if self.state == CInterval.SPaused or t < self.currT:
# Restarting from a pause.
self.privInitialize(t)
else:
if not self.__softStopped and t > self.softStopT:
self.__step(self.softStopT-self.currT)
self.__softStop()
self.__step(t-self.softStopT)
else:
self.__step(t-self.currT)
Interval.privStep(self,t)
def privFinalize(self):
Interval.privFinalize(self)
if self.cleanup and self.particleEffect:
self.particleEffect.cleanup()
self.particleEffect = None | 0.679817 | 0.331985 |
# In[ ]:
import pandas as pd
data=pd.read_csv("../../../input/merishnasuwal_breast-cancer-prediction-dataset/Breast_cancer_data.csv", delimiter=",")
# In[ ]:
data.head()
# In[ ]:
data.shape
# In[ ]:
data["diagnosis"].value_counts()
# In[ ]:
data.info()
# In[ ]:
Y=data["diagnosis"]
X=data.drop("diagnosis",axis=1)
# In[ ]:
X.shape
# In[ ]:
X.corr()
# In[ ]:
import seaborn as sns
import matplotlib.pyplot as plt
print()
# In[ ]:
print()
# In[ ]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
print()
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from scipy.stats import randint as sp_randint
depths=[1,5,50,100,150,250]
samples=[5, 10,50, 100,250, 500]
clf = DecisionTreeClassifier()
params = {'max_depth' : depths, "min_samples_split":samples}
grid = GridSearchCV(estimator = clf,param_grid=params ,cv = 3,n_jobs = 3,scoring='roc_auc')
grid.fit(X_train, y_train)
print("best depth = ", grid.best_params_)
print("AUC value on train data = ", grid.best_score_*100)
a = grid.best_params_
# In[ ]:
optimal_depth = a.get('depth')
optimal_split=a.get("min_samples_split")
# In[ ]:
clf = DecisionTreeClassifier(max_depth=optimal_depth,min_samples_split=optimal_split)
clf.fit(X_train,y_train)
pred = clf.predict(X_test)
# In[ ]:
from sklearn import metrics
fpr, tpr, threshold = metrics.roc_curve(y_test, pred)
roc_auc = metrics.auc(fpr, tpr)
import matplotlib.pyplot as plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
print()
# In[ ]:
import numpy as np
scores = grid.cv_results_['mean_test_score'].reshape(len(samples),len(depths))
plt.figure(figsize=(16, 12))
print()
plt.xlabel('min_samples_split')
plt.ylabel('max_depth')
plt.xticks(np.arange(len(samples)), samples)
plt.yticks(np.arange(len(depths)), depths)
plt.title('Grid Search AUC Score')
print()
# In[ ]:
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
acc1 = accuracy_score(y_test, pred) * 100
pre1 = precision_score(y_test, pred) * 100
rec1 = recall_score(y_test, pred) * 100
f11 = f1_score(y_test, pred) * 100
print('\nAccuracy=%f%%' % (acc1))
print('\nprecision=%f%%' % (pre1))
print('\nrecall=%f%%' % (rec1))
print('\nF1-Score=%f%%' % (f11))
# In[ ]:
cm = confusion_matrix(y_test,pred)
print()
plt.title('Confusion Matrix for BoW')
print()
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
dtree=DecisionTreeClassifier(max_depth=2,min_samples_split=3)
dtree.fit(X_train,y_train)
# In[ ]:
from matplotlib import pyplot
importance =dtree.feature_importances_
# summarize feature importance
for i,v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i,v))
# plot feature importance
pyplot.bar([x for x in range(len(importance))], importance)
# In[ ]:
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
dot_data = StringIO()
export_graphviz(dtree, out_file=dot_data, filled=True, rounded=True, special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
# In[ ]: | relancer-exp/original_notebooks/merishnasuwal_breast-cancer-prediction-dataset/breast-cancer-prediction-decision-tree.py |
# In[ ]:
import pandas as pd
data=pd.read_csv("../../../input/merishnasuwal_breast-cancer-prediction-dataset/Breast_cancer_data.csv", delimiter=",")
# In[ ]:
data.head()
# In[ ]:
data.shape
# In[ ]:
data["diagnosis"].value_counts()
# In[ ]:
data.info()
# In[ ]:
Y=data["diagnosis"]
X=data.drop("diagnosis",axis=1)
# In[ ]:
X.shape
# In[ ]:
X.corr()
# In[ ]:
import seaborn as sns
import matplotlib.pyplot as plt
print()
# In[ ]:
print()
# In[ ]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
print()
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from scipy.stats import randint as sp_randint
depths=[1,5,50,100,150,250]
samples=[5, 10,50, 100,250, 500]
clf = DecisionTreeClassifier()
params = {'max_depth' : depths, "min_samples_split":samples}
grid = GridSearchCV(estimator = clf,param_grid=params ,cv = 3,n_jobs = 3,scoring='roc_auc')
grid.fit(X_train, y_train)
print("best depth = ", grid.best_params_)
print("AUC value on train data = ", grid.best_score_*100)
a = grid.best_params_
# In[ ]:
optimal_depth = a.get('depth')
optimal_split=a.get("min_samples_split")
# In[ ]:
clf = DecisionTreeClassifier(max_depth=optimal_depth,min_samples_split=optimal_split)
clf.fit(X_train,y_train)
pred = clf.predict(X_test)
# In[ ]:
from sklearn import metrics
fpr, tpr, threshold = metrics.roc_curve(y_test, pred)
roc_auc = metrics.auc(fpr, tpr)
import matplotlib.pyplot as plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
print()
# In[ ]:
import numpy as np
scores = grid.cv_results_['mean_test_score'].reshape(len(samples),len(depths))
plt.figure(figsize=(16, 12))
print()
plt.xlabel('min_samples_split')
plt.ylabel('max_depth')
plt.xticks(np.arange(len(samples)), samples)
plt.yticks(np.arange(len(depths)), depths)
plt.title('Grid Search AUC Score')
print()
# In[ ]:
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
acc1 = accuracy_score(y_test, pred) * 100
pre1 = precision_score(y_test, pred) * 100
rec1 = recall_score(y_test, pred) * 100
f11 = f1_score(y_test, pred) * 100
print('\nAccuracy=%f%%' % (acc1))
print('\nprecision=%f%%' % (pre1))
print('\nrecall=%f%%' % (rec1))
print('\nF1-Score=%f%%' % (f11))
# In[ ]:
cm = confusion_matrix(y_test,pred)
print()
plt.title('Confusion Matrix for BoW')
print()
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
dtree=DecisionTreeClassifier(max_depth=2,min_samples_split=3)
dtree.fit(X_train,y_train)
# In[ ]:
from matplotlib import pyplot
importance =dtree.feature_importances_
# summarize feature importance
for i,v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i,v))
# plot feature importance
pyplot.bar([x for x in range(len(importance))], importance)
# In[ ]:
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
dot_data = StringIO()
export_graphviz(dtree, out_file=dot_data, filled=True, rounded=True, special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
# In[ ]: | 0.484868 | 0.531574 |
import serial
import time
import os
import xmlformatter
import signal
def handler(signum, frame):
print("Forever is over!")
raise Exception("end of time")
def xml_formatter(xml_filename):
#Formatting the xml output
formatter = xmlformatter.Formatter(indent="1", indent_char="\t", encoding_output="UTF-8", preserve=["literal"])
xmldata = formatter.format_file(xml_filename)
f3 = open(xml_filename, 'wb+')
f3.write(xmldata)
f3.close()
def main():
#Variables
start_tag = ""
end_tag = ""
testcases = []
testsuite_st_tag = ""
testsuite_ed_tag = ""
testrun = ""
testcount = 0
#initialising timeout of 10 secs
signal.signal(signal.SIGALRM, handler)
signal.alarm(10)
#print ("timeout starts.......")
#opening the serial port
ser = open('output.txt', 'rb')
f1 = open('test-results.xml', 'w+')
f2 = open('test.log','w+')
while True:
data = ser.readline().decode('ascii')
if (data):
signal.alarm(10)
#print ("timeout reset.......")
print (data.rstrip("\r\n"))
f2.write(data)
if(data[0:10] == "<test-case"):
testcases.append(data)
testcount = testcount + 1
elif(data[0:5] == "<?xml"):
start_tag = data
elif(data[0:9] == "<test-run"):
testrun = data
elif(data[0:11] == "<test-suite"):
testsuite_st_tag = data
elif(data[0:13] == '</test-suite>'):
testsuite_ed_tag = data
elif(data[0:9] == '<failure>'):
failure_start_tag = data
elif(data[0:9] == '<message>'):
failure_message = data
elif(data[0:10] =="</failure>"):
failure_end_tag = data
if(data[0:11] == '</test-run>'):
end_tag = data
break
else:
print ("waiting for input on serial port.......")
#writing to the test-results.xml file in xml format
f1.write(start_tag)
f1.write(testrun)
f1.write(testsuite_st_tag)
f1.write(failure_start_tag)
f1.write(failure_message)
f1.write(failure_end_tag)
for i in range(0,testcount):
f1.write(testcases[i])
f1.write(testsuite_ed_tag)
f1.write(end_tag)
f1.close()
f2.close()
xml_formatter('test-results.xml')
ser.close()
exit()
if __name__ == "__main__":
main() | test/SerialRead.py | import serial
import time
import os
import xmlformatter
import signal
def handler(signum, frame):
print("Forever is over!")
raise Exception("end of time")
def xml_formatter(xml_filename):
#Formatting the xml output
formatter = xmlformatter.Formatter(indent="1", indent_char="\t", encoding_output="UTF-8", preserve=["literal"])
xmldata = formatter.format_file(xml_filename)
f3 = open(xml_filename, 'wb+')
f3.write(xmldata)
f3.close()
def main():
#Variables
start_tag = ""
end_tag = ""
testcases = []
testsuite_st_tag = ""
testsuite_ed_tag = ""
testrun = ""
testcount = 0
#initialising timeout of 10 secs
signal.signal(signal.SIGALRM, handler)
signal.alarm(10)
#print ("timeout starts.......")
#opening the serial port
ser = open('output.txt', 'rb')
f1 = open('test-results.xml', 'w+')
f2 = open('test.log','w+')
while True:
data = ser.readline().decode('ascii')
if (data):
signal.alarm(10)
#print ("timeout reset.......")
print (data.rstrip("\r\n"))
f2.write(data)
if(data[0:10] == "<test-case"):
testcases.append(data)
testcount = testcount + 1
elif(data[0:5] == "<?xml"):
start_tag = data
elif(data[0:9] == "<test-run"):
testrun = data
elif(data[0:11] == "<test-suite"):
testsuite_st_tag = data
elif(data[0:13] == '</test-suite>'):
testsuite_ed_tag = data
elif(data[0:9] == '<failure>'):
failure_start_tag = data
elif(data[0:9] == '<message>'):
failure_message = data
elif(data[0:10] =="</failure>"):
failure_end_tag = data
if(data[0:11] == '</test-run>'):
end_tag = data
break
else:
print ("waiting for input on serial port.......")
#writing to the test-results.xml file in xml format
f1.write(start_tag)
f1.write(testrun)
f1.write(testsuite_st_tag)
f1.write(failure_start_tag)
f1.write(failure_message)
f1.write(failure_end_tag)
for i in range(0,testcount):
f1.write(testcases[i])
f1.write(testsuite_ed_tag)
f1.write(end_tag)
f1.close()
f2.close()
xml_formatter('test-results.xml')
ser.close()
exit()
if __name__ == "__main__":
main() | 0.038394 | 0.198666 |
from functools import partial
from typing import Optional, Tuple
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.compass import compass
from gdsfactory.tech import LAYER
from gdsfactory.types import ComponentOrFactory, Layer
@cell
def pad(
size: Tuple[float, float] = (100.0, 100.0),
layer: Layer = LAYER.M3,
layers_cladding: Optional[Tuple[Layer, ...]] = None,
cladding_offsets: Optional[Tuple[float, ...]] = None,
port_inclusion: float = 0,
) -> Component:
"""Rectangular pad with 4 ports (1, 2, 3, 4)
Args:
size:
layer: pad layer
layers_cladding:
cladding_offsets:
port_inclusion: from edge
"""
c = Component()
rect = compass(size=size, layer=layer, port_inclusion=port_inclusion)
c_ref = c.add_ref(rect)
c.add_ports(c_ref.ports)
c.info["size"] = (float(size[0]), float(size[1]))
c.info["layer"] = layer
if layers_cladding and cladding_offsets:
for layer, cladding_offset in zip(layers_cladding, cladding_offsets):
c.add_ref(
compass(
size=(size[0] + 2 * cladding_offset, size[1] + 2 * cladding_offset),
layer=layer,
)
)
c.add_port(name="pad", port_type="vertical_dc", layer=layer, orientation=0)
return c
@cell
def pad_array(
pad: ComponentOrFactory = pad,
spacing: Tuple[float, float] = (150.0, 150.0),
columns: int = 6,
rows: int = 1,
orientation: float = 270,
) -> Component:
"""Returns 2D array of pads
Args:
pad: pad element
spacing: x, y pitch
columns:
rows:
orientation: port orientation in deg
"""
c = Component()
pad = pad() if callable(pad) else pad
size = pad.settings.full["size"]
c.info["size"] = size
c.add_array(pad, columns=columns, rows=rows, spacing=spacing)
width = size[0] if orientation in [90, 270] else size[1]
for col in range(columns):
for row in range(rows):
c.add_port(
name=f"e{row+1}{col+1}",
midpoint=(col * spacing[0], row * spacing[1]),
width=width,
orientation=orientation,
port_type="electrical",
layer=pad.info["layer"],
)
return c
pad_array90 = partial(pad_array, orientation=90)
pad_array270 = partial(pad_array, orientation=270)
pad_array0 = partial(pad_array, orientation=0, columns=1, rows=3)
pad_array180 = partial(pad_array, orientation=180, columns=1, rows=3)
if __name__ == "__main__":
# c = pad()
# c = pad(layer_to_inclusion={(3, 0): 10})
# print(c.ports)
# c = pad(width=10, height=10)
# print(c.ports.keys())
# c = pad_array90()
c = pad_array0()
# c = pad_array270()
# c.pprint_ports()
# c = pad_array_2d(cols=2, rows=3, port_names=("e2",))
# c = pad_array(columns=2, rows=2, orientation=270)
# c.auto_rename_ports()
c.show() | gdsfactory/components/pad.py | from functools import partial
from typing import Optional, Tuple
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.compass import compass
from gdsfactory.tech import LAYER
from gdsfactory.types import ComponentOrFactory, Layer
@cell
def pad(
size: Tuple[float, float] = (100.0, 100.0),
layer: Layer = LAYER.M3,
layers_cladding: Optional[Tuple[Layer, ...]] = None,
cladding_offsets: Optional[Tuple[float, ...]] = None,
port_inclusion: float = 0,
) -> Component:
"""Rectangular pad with 4 ports (1, 2, 3, 4)
Args:
size:
layer: pad layer
layers_cladding:
cladding_offsets:
port_inclusion: from edge
"""
c = Component()
rect = compass(size=size, layer=layer, port_inclusion=port_inclusion)
c_ref = c.add_ref(rect)
c.add_ports(c_ref.ports)
c.info["size"] = (float(size[0]), float(size[1]))
c.info["layer"] = layer
if layers_cladding and cladding_offsets:
for layer, cladding_offset in zip(layers_cladding, cladding_offsets):
c.add_ref(
compass(
size=(size[0] + 2 * cladding_offset, size[1] + 2 * cladding_offset),
layer=layer,
)
)
c.add_port(name="pad", port_type="vertical_dc", layer=layer, orientation=0)
return c
@cell
def pad_array(
pad: ComponentOrFactory = pad,
spacing: Tuple[float, float] = (150.0, 150.0),
columns: int = 6,
rows: int = 1,
orientation: float = 270,
) -> Component:
"""Returns 2D array of pads
Args:
pad: pad element
spacing: x, y pitch
columns:
rows:
orientation: port orientation in deg
"""
c = Component()
pad = pad() if callable(pad) else pad
size = pad.settings.full["size"]
c.info["size"] = size
c.add_array(pad, columns=columns, rows=rows, spacing=spacing)
width = size[0] if orientation in [90, 270] else size[1]
for col in range(columns):
for row in range(rows):
c.add_port(
name=f"e{row+1}{col+1}",
midpoint=(col * spacing[0], row * spacing[1]),
width=width,
orientation=orientation,
port_type="electrical",
layer=pad.info["layer"],
)
return c
pad_array90 = partial(pad_array, orientation=90)
pad_array270 = partial(pad_array, orientation=270)
pad_array0 = partial(pad_array, orientation=0, columns=1, rows=3)
pad_array180 = partial(pad_array, orientation=180, columns=1, rows=3)
if __name__ == "__main__":
# c = pad()
# c = pad(layer_to_inclusion={(3, 0): 10})
# print(c.ports)
# c = pad(width=10, height=10)
# print(c.ports.keys())
# c = pad_array90()
c = pad_array0()
# c = pad_array270()
# c.pprint_ports()
# c = pad_array_2d(cols=2, rows=3, port_names=("e2",))
# c = pad_array(columns=2, rows=2, orientation=270)
# c.auto_rename_ports()
c.show() | 0.92391 | 0.39036 |
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes, force_str
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth import get_user_model as django_get_user_model
from rest_framework.authtoken.models import Token
from drf_registration.settings import drfr_settings
from drf_registration.utils.common import import_string
def get_user_model():
"""
Get user model base on AUTH_USER_MODEL
"""
return django_get_user_model()
def get_all_users():
"""
Get all users queryset
"""
return get_user_model().objects.all()
def get_user_serializer():
"""
Get user serializer from settings
"""
return import_string(drfr_settings.USER_SERIALIZER)
def get_user_token(user):
"""
Get or create token for user
Args:
user (dict): The user instance
Returns:
[string]: The user token
"""
token, created = Token.objects.get_or_create(user=user)
return token
def remove_user_token(user):
"""
Remove user token
Args:
user (dict): The user instance
"""
# Remove old token
Token.objects.filter(user=user).delete()
def get_user_profile_data(user):
"""
Get user refresh token and access token
Args:
user (dict): The user instance
Returns:
[dict]: The data response include the token
"""
serializer = import_string(drfr_settings.USER_SERIALIZER)
data = serializer(user).data
# Add tokens to data
if has_user_verified(user):
data['token'] = get_user_token(user).key
return data
def has_user_activate_token():
"""
Check to has user verify token from settings
Returns:
[boolean]: True if USER_ACTIVATE_TOKEN_ENABLED is True, else is False
"""
return drfr_settings.USER_ACTIVATE_TOKEN_ENABLED
def has_user_verify_code():
"""
Check to has user verify code from settings
Returns:
[boolean]: True if USER_VERIFY_CODE_ENABLED is True, else is False
"""
return drfr_settings.USER_VERIFY_CODE_ENABLED
def has_user_verified(user):
"""
Check user verify or not
Args:
user (object): The user instance
Returns:
[boolean]: The verified value
"""
return get_user_verified(user)
def get_user_verified(user):
"""
Get user verify value
Args:
user (object): The user instance
Returns:
[boolean]: The verified value
"""
return getattr(user, drfr_settings.USER_VERIFY_FIELD)
def set_user_verified(user, verified=True):
"""
Set user verified
Args:
user (object): The user instance
"""
setattr(user, drfr_settings.USER_VERIFY_FIELD, verified)
user.save()
def generate_user_uid(user):
"""
Generate user UID from user pk
Args:
user (object): The user object
Returns:
[string]: The UID
"""
return urlsafe_base64_encode(force_bytes(user.pk))
def generate_uid_and_token(user, token_generator=None):
"""
Generate UID and token from user information
Args:
user (object): The user object
token_generator (optional): The token generator class. Defaults to None.
Returns:
[object]: The object of uid and token
"""
token_generator = token_generator or default_token_generator
return {
'uidb64': generate_user_uid(user),
'token': token_generator.make_token(user)
}
def get_user_from_uid(uidb64):
"""
Get user from uidb64
Args:
uidb64 (string): The uidb64
Returns:
[optional]: The user object or None
"""
try:
uid = force_str(urlsafe_base64_decode(uidb64))
user = get_user_model().objects.get(pk=uid)
return user
except:
return None | drf_registration/utils/users.py | from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes, force_str
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth import get_user_model as django_get_user_model
from rest_framework.authtoken.models import Token
from drf_registration.settings import drfr_settings
from drf_registration.utils.common import import_string
def get_user_model():
"""
Get user model base on AUTH_USER_MODEL
"""
return django_get_user_model()
def get_all_users():
"""
Get all users queryset
"""
return get_user_model().objects.all()
def get_user_serializer():
"""
Get user serializer from settings
"""
return import_string(drfr_settings.USER_SERIALIZER)
def get_user_token(user):
"""
Get or create token for user
Args:
user (dict): The user instance
Returns:
[string]: The user token
"""
token, created = Token.objects.get_or_create(user=user)
return token
def remove_user_token(user):
"""
Remove user token
Args:
user (dict): The user instance
"""
# Remove old token
Token.objects.filter(user=user).delete()
def get_user_profile_data(user):
"""
Get user refresh token and access token
Args:
user (dict): The user instance
Returns:
[dict]: The data response include the token
"""
serializer = import_string(drfr_settings.USER_SERIALIZER)
data = serializer(user).data
# Add tokens to data
if has_user_verified(user):
data['token'] = get_user_token(user).key
return data
def has_user_activate_token():
"""
Check to has user verify token from settings
Returns:
[boolean]: True if USER_ACTIVATE_TOKEN_ENABLED is True, else is False
"""
return drfr_settings.USER_ACTIVATE_TOKEN_ENABLED
def has_user_verify_code():
"""
Check to has user verify code from settings
Returns:
[boolean]: True if USER_VERIFY_CODE_ENABLED is True, else is False
"""
return drfr_settings.USER_VERIFY_CODE_ENABLED
def has_user_verified(user):
"""
Check user verify or not
Args:
user (object): The user instance
Returns:
[boolean]: The verified value
"""
return get_user_verified(user)
def get_user_verified(user):
"""
Get user verify value
Args:
user (object): The user instance
Returns:
[boolean]: The verified value
"""
return getattr(user, drfr_settings.USER_VERIFY_FIELD)
def set_user_verified(user, verified=True):
"""
Set user verified
Args:
user (object): The user instance
"""
setattr(user, drfr_settings.USER_VERIFY_FIELD, verified)
user.save()
def generate_user_uid(user):
"""
Generate user UID from user pk
Args:
user (object): The user object
Returns:
[string]: The UID
"""
return urlsafe_base64_encode(force_bytes(user.pk))
def generate_uid_and_token(user, token_generator=None):
"""
Generate UID and token from user information
Args:
user (object): The user object
token_generator (optional): The token generator class. Defaults to None.
Returns:
[object]: The object of uid and token
"""
token_generator = token_generator or default_token_generator
return {
'uidb64': generate_user_uid(user),
'token': token_generator.make_token(user)
}
def get_user_from_uid(uidb64):
"""
Get user from uidb64
Args:
uidb64 (string): The uidb64
Returns:
[optional]: The user object or None
"""
try:
uid = force_str(urlsafe_base64_decode(uidb64))
user = get_user_model().objects.get(pk=uid)
return user
except:
return None | 0.778986 | 0.129678 |
import numpy as np
import sys
import tensorflow as tf
import time
import os
import pickle as pkl
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
syspath = os.path.dirname(os.path.realpath(__file__)) + '/../..'
sys.path.insert(0, syspath)
from vbsw_module.functions.df import dataset_weighting
from vbsw_module.algorithms.training import training
def vbsw(training_set, test_set, ratio, N_stat, N_layers, N_units, activation_hidden,
activation_output, N_seeds, batch_size, epochs, optimizer, learning_rate, loss_function, test_losses,
keep_best=True, criterion_for_best=None, saving_period=1, verbose=1, case_name=None, dataset=None):
x_train, y_train = training_set
training_set = dataset_weighting(x_train, y_train, ratio, N_stat, dataset)
if case_name is not None:
save_dir = syspath + "/results/" + case_name
res_name = "res_" + str(os.getpid()) + str(time.time())[:10]
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, res_name)
else:
save_file = None
model = training(training_set=training_set,
test_set=test_set,
N_layers=N_layers,
N_units=N_units,
activation_hidden=activation_hidden,
activation_output=activation_output,
N_seeds=N_seeds,
batch_size=batch_size,
epochs=epochs,
optimizer=optimizer,
learning_rate=learning_rate,
loss_function=loss_function + "_w",
test_losses=test_losses,
keep_best=keep_best,
criterion_for_best=criterion_for_best,
saving_period=saving_period,
verbose=verbose,
save_file=save_file)
if case_name is not None:
with open(save_file, "rb") as f:
results = pkl.load(f)
results["hyperparams"]["ratio"] = ratio
results["hyperparams"]["N_stat"] = N_stat
results["hyperparams"]["vbsw"] = 1
with open(save_file, "wb") as f:
pkl.dump(results, f)
if keep_best:
return model
def vbsw_for_dl(model_init, training_set, test_set, ratio, N_stat, N_seeds,
activation_output, batch_size, epochs, optimizer, learning_rate, loss_function, test_losses,
keep_best=True, criterion_for_best=None, saving_period=1, verbose=1, case_name=None, dataset=None):
x_train_init, y_train = training_set
x_test_init, y_test = test_set
loss_init = model_init.evaluate(x_test_init, y_test)[1]
inputs = model_init.input
output_latent = model_init.layers[-2].output
model_trunc = tf.keras.Model(inputs, output_latent, name="model_trunc")
x_train = model_trunc.predict(x_train_init)
x_test = model_trunc.predict(x_test_init)
training_set = dataset_weighting(x_train, y_train, ratio, N_stat, dataset)
test_set = (x_test, y_test)
if case_name is not None:
save_dir = syspath + "/results/" + case_name
res_name = "res_" + str(os.getpid()) + str(time.time())[:10]
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, res_name)
else:
save_file = None
model = training(training_set=training_set,
test_set=test_set,
N_layers=0,
N_units=0,
activation_hidden="",
activation_output=activation_output,
N_seeds=N_seeds,
batch_size=batch_size,
epochs=epochs,
optimizer=optimizer,
learning_rate=learning_rate,
loss_function=loss_function + "_w",
test_losses=test_losses,
keep_best=keep_best,
criterion_for_best=criterion_for_best,
saving_period=saving_period,
verbose=verbose,
save_file=save_file)
if case_name is not None:
with open(save_file, "rb") as f:
results = pkl.load(f)
results["misc"] = {}
results["misc"]["loss_init"] = loss_init
results["hyperparams"]["ratio"] = ratio
results["hyperparams"]["N_stat"] = N_stat
results["hyperparams"]["vbsw"] = 1
with open(save_file, "wb") as f:
pkl.dump(results, f)
if keep_best:
input = model_trunc.input
x = model_trunc(input)
output = model(x)
final_model = tf.keras.models.Model(input, output)
return final_model | vbsw_module/algorithms/vbsw.py | import numpy as np
import sys
import tensorflow as tf
import time
import os
import pickle as pkl
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
syspath = os.path.dirname(os.path.realpath(__file__)) + '/../..'
sys.path.insert(0, syspath)
from vbsw_module.functions.df import dataset_weighting
from vbsw_module.algorithms.training import training
def vbsw(training_set, test_set, ratio, N_stat, N_layers, N_units, activation_hidden,
activation_output, N_seeds, batch_size, epochs, optimizer, learning_rate, loss_function, test_losses,
keep_best=True, criterion_for_best=None, saving_period=1, verbose=1, case_name=None, dataset=None):
x_train, y_train = training_set
training_set = dataset_weighting(x_train, y_train, ratio, N_stat, dataset)
if case_name is not None:
save_dir = syspath + "/results/" + case_name
res_name = "res_" + str(os.getpid()) + str(time.time())[:10]
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, res_name)
else:
save_file = None
model = training(training_set=training_set,
test_set=test_set,
N_layers=N_layers,
N_units=N_units,
activation_hidden=activation_hidden,
activation_output=activation_output,
N_seeds=N_seeds,
batch_size=batch_size,
epochs=epochs,
optimizer=optimizer,
learning_rate=learning_rate,
loss_function=loss_function + "_w",
test_losses=test_losses,
keep_best=keep_best,
criterion_for_best=criterion_for_best,
saving_period=saving_period,
verbose=verbose,
save_file=save_file)
if case_name is not None:
with open(save_file, "rb") as f:
results = pkl.load(f)
results["hyperparams"]["ratio"] = ratio
results["hyperparams"]["N_stat"] = N_stat
results["hyperparams"]["vbsw"] = 1
with open(save_file, "wb") as f:
pkl.dump(results, f)
if keep_best:
return model
def vbsw_for_dl(model_init, training_set, test_set, ratio, N_stat, N_seeds,
activation_output, batch_size, epochs, optimizer, learning_rate, loss_function, test_losses,
keep_best=True, criterion_for_best=None, saving_period=1, verbose=1, case_name=None, dataset=None):
x_train_init, y_train = training_set
x_test_init, y_test = test_set
loss_init = model_init.evaluate(x_test_init, y_test)[1]
inputs = model_init.input
output_latent = model_init.layers[-2].output
model_trunc = tf.keras.Model(inputs, output_latent, name="model_trunc")
x_train = model_trunc.predict(x_train_init)
x_test = model_trunc.predict(x_test_init)
training_set = dataset_weighting(x_train, y_train, ratio, N_stat, dataset)
test_set = (x_test, y_test)
if case_name is not None:
save_dir = syspath + "/results/" + case_name
res_name = "res_" + str(os.getpid()) + str(time.time())[:10]
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, res_name)
else:
save_file = None
model = training(training_set=training_set,
test_set=test_set,
N_layers=0,
N_units=0,
activation_hidden="",
activation_output=activation_output,
N_seeds=N_seeds,
batch_size=batch_size,
epochs=epochs,
optimizer=optimizer,
learning_rate=learning_rate,
loss_function=loss_function + "_w",
test_losses=test_losses,
keep_best=keep_best,
criterion_for_best=criterion_for_best,
saving_period=saving_period,
verbose=verbose,
save_file=save_file)
if case_name is not None:
with open(save_file, "rb") as f:
results = pkl.load(f)
results["misc"] = {}
results["misc"]["loss_init"] = loss_init
results["hyperparams"]["ratio"] = ratio
results["hyperparams"]["N_stat"] = N_stat
results["hyperparams"]["vbsw"] = 1
with open(save_file, "wb") as f:
pkl.dump(results, f)
if keep_best:
input = model_trunc.input
x = model_trunc(input)
output = model(x)
final_model = tf.keras.models.Model(input, output)
return final_model | 0.412767 | 0.17614 |
from flask import Blueprint, request, jsonify
from os import path
from sense2vec import Sense2Vec
from lib.AutocompleteIndex import AutocompleteIndex
from lib.VectorSearchIndex import VectorSearchIndex
api = Blueprint("api", __name__)
s2v = Sense2Vec().from_disk(path.join(path.dirname(__file__), "../data"))
search_index = VectorSearchIndex(s2v)
autocomplete_index = AutocompleteIndex(s2v)
@api.get("/")
def index():
return "OK", 200
@api.get("/autocomplete")
def autocomplete():
query = request.args.get("query")
if query is None:
return "Must provide a query", 400
results = autocomplete_index.search(query)
enhanced_results = []
for result in results:
best_sense = s2v.get_best_sense(result, ignore_case=False)
other_senses = s2v.get_other_senses(best_sense, ignore_case=False)
senses = [best_sense] + other_senses
pos_with_freqs = [
{"pos": s2v.split_key(sense)[1], "freq": s2v.get_freq(sense)}
for sense in senses
]
enhanced_results.append(
{
"term": result,
"senses": sorted(
pos_with_freqs, key=lambda obj: obj["freq"], reverse=True
),
"freq": s2v.get_freq(best_sense),
}
)
return jsonify(enhanced_results)
@api.get("/bias-rank")
def bias_rank():
"""
requires query params for leftSense, rightSense
this will return to top 100 positive and negative extreme words matching this bias vector by dot product
"""
left_sense = request.args.get("leftSense")
right_sense = request.args.get("rightSense")
if left_sense is None:
return "Must provide a query param: leftSense", 400
if right_sense is None:
return "Must provide a query param: rightSense", 400
print(f"bias: {left_sense} - {right_sense}")
left_sense_vec = s2v[left_sense]
right_sense_vec = s2v[right_sense]
if left_sense_vec is None:
return f"{left_sense} not found", 400
if right_sense_vec is None:
return f"{right_sense} not found", 400
bias_vec = right_sense_vec - left_sense_vec
top_right_bias_senses = search_index.search(bias_vec, 100)
top_left_bias_senses = search_index.search(-1 * bias_vec, 100)
return jsonify(
{
"topLeftSenses": top_left_bias_senses,
"topRightSenses": top_right_bias_senses,
}
) | server/api/routes.py | from flask import Blueprint, request, jsonify
from os import path
from sense2vec import Sense2Vec
from lib.AutocompleteIndex import AutocompleteIndex
from lib.VectorSearchIndex import VectorSearchIndex
api = Blueprint("api", __name__)
s2v = Sense2Vec().from_disk(path.join(path.dirname(__file__), "../data"))
search_index = VectorSearchIndex(s2v)
autocomplete_index = AutocompleteIndex(s2v)
@api.get("/")
def index():
return "OK", 200
@api.get("/autocomplete")
def autocomplete():
query = request.args.get("query")
if query is None:
return "Must provide a query", 400
results = autocomplete_index.search(query)
enhanced_results = []
for result in results:
best_sense = s2v.get_best_sense(result, ignore_case=False)
other_senses = s2v.get_other_senses(best_sense, ignore_case=False)
senses = [best_sense] + other_senses
pos_with_freqs = [
{"pos": s2v.split_key(sense)[1], "freq": s2v.get_freq(sense)}
for sense in senses
]
enhanced_results.append(
{
"term": result,
"senses": sorted(
pos_with_freqs, key=lambda obj: obj["freq"], reverse=True
),
"freq": s2v.get_freq(best_sense),
}
)
return jsonify(enhanced_results)
@api.get("/bias-rank")
def bias_rank():
"""
requires query params for leftSense, rightSense
this will return to top 100 positive and negative extreme words matching this bias vector by dot product
"""
left_sense = request.args.get("leftSense")
right_sense = request.args.get("rightSense")
if left_sense is None:
return "Must provide a query param: leftSense", 400
if right_sense is None:
return "Must provide a query param: rightSense", 400
print(f"bias: {left_sense} - {right_sense}")
left_sense_vec = s2v[left_sense]
right_sense_vec = s2v[right_sense]
if left_sense_vec is None:
return f"{left_sense} not found", 400
if right_sense_vec is None:
return f"{right_sense} not found", 400
bias_vec = right_sense_vec - left_sense_vec
top_right_bias_senses = search_index.search(bias_vec, 100)
top_left_bias_senses = search_index.search(-1 * bias_vec, 100)
return jsonify(
{
"topLeftSenses": top_left_bias_senses,
"topRightSenses": top_right_bias_senses,
}
) | 0.641085 | 0.218253 |
import math
import cv2
import numpy as np
def is_inside_polygon(polygon, point):
# ref:https://stackoverflow.com/a/2922778/1266873
# int pnpoly(int nvert, float *vertx, float *verty, float testx, float testy)
# {
# int i, j, c = 0;
# for (i = 0, j = nvert-1; i < nvert; j = i++) {
# if ( ((verty[i]>testy) != (verty[j]>testy)) &&
# (testx < (vertx[j]-vertx[i]) * (testy-verty[i]) / (verty[j]-verty[i]) + vertx[i]) )
# c = !c;
# }
# return c;
# }
def pnpoly(nvert, vertx, verty, testx, testy):
i = 0
j = nvert - 1
c = False
while True:
j = i
i += 1
if i >= nvert:
break
if (verty[i] > testy) != (verty[j] > testy) and testx < (vertx[j] - vertx[i]) * (testy - verty[i]) / (
verty[j] - verty[i]) + vertx[i]:
c = not c
return c
vertx = []
verty = []
for p in polygon:
vertx.append(p[0])
verty.append(p[1])
if polygon[-1] is not polygon[0]:
p = polygon[0]
vertx.append(p[0])
verty.append(p[1])
return pnpoly(len(vertx), vertx, verty, point[0], point[1])
def is_inside_rect(rect, point):
e = 0.001
x = point[0]
if x < rect[1] - e:
return False
elif x > rect[3] + e:
return False
y = point[1]
if y < rect[0] - e:
return False
elif y > rect[2] + e:
return False
return True
def rects_intersect(rect1, rect2):
class Rectangle:
def intersects(self, other):
a, b = self, other
x1 = max(min(a.x1, a.x2), min(b.x1, b.x2))
y1 = max(min(a.y1, a.y2), min(b.y1, b.y2))
x2 = min(max(a.x1, a.x2), max(b.x1, b.x2))
y2 = min(max(a.y1, a.y2), max(b.y1, b.y2))
return x1 < x2 and y1 < y2
def _set(self, x1, y1, x2, y2):
if x1 > x2 or y1 > y2:
raise ValueError("Coordinates are invalid")
self.x1, self.y1, self.x2, self.y2 = x1, y1, x2, y2
def __init__(self, bbox):
self._set(bbox[0], bbox[1], bbox[2], bbox[3])
return Rectangle(rect1).intersects(Rectangle(rect2))
def rects_overlap(rect1, rect2, min_ratio):
w1 = rect1[2] - rect1[0]
w2 = rect2[2] - rect2[0]
w_min = min(rect1[2], rect2[2]) - max(rect1[0], rect2[0])
if (w_min / w1) >= min_ratio or (w_min / w2) >= min_ratio:
h1 = rect1[3] - rect1[1]
h2 = rect2[3] - rect2[1]
h_min = min(rect1[3], rect2[3]) - max(rect1[1], rect2[1])
return (h_min / h1) >= min_ratio or (h_min / h2) >= min_ratio
return False
def add_padding_rect(rect, padding):
x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]
dw = (x2 - x1) * padding
dh = (y2 - y1) * padding
return [x1 - dw, y1 - dh, x2 + dw, y2 + dh]
def distance(p1, p2):
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def get_rect_values(rect):
# y1, x1, y2, x2
return rect[0], rect[1], rect[2], rect[3]
def get_rect_pnts(rect):
# p1, p2
return (rect[1], rect[0]), (rect[3], rect[2])
def get_rect(p1, p2):
return [p1[1], p1[0], p2[1], p2[0]]
def get_rect_bottom_center(rect):
r1 = rect[1]
r2 = rect[2]
r3 = rect[3]
return int(r1 + (r3 - r1) * 0.5), r2 # bottom center
def get_dist_sq(p1, p2):
d1 = p2[0] - p1[0]
d2 = p2[1] - p1[1]
return d1 * d1 + d2 * d2
def get_center_int(pnts):
x = 0
y = 0
for pnt in pnts:
x += pnt[0]
y += pnt[1]
length = float(len(pnts))
return [int(x / length), int(y / length)] | ndu_gate_camera/utility/geometry_helper.py | import math
import cv2
import numpy as np
def is_inside_polygon(polygon, point):
# ref:https://stackoverflow.com/a/2922778/1266873
# int pnpoly(int nvert, float *vertx, float *verty, float testx, float testy)
# {
# int i, j, c = 0;
# for (i = 0, j = nvert-1; i < nvert; j = i++) {
# if ( ((verty[i]>testy) != (verty[j]>testy)) &&
# (testx < (vertx[j]-vertx[i]) * (testy-verty[i]) / (verty[j]-verty[i]) + vertx[i]) )
# c = !c;
# }
# return c;
# }
def pnpoly(nvert, vertx, verty, testx, testy):
i = 0
j = nvert - 1
c = False
while True:
j = i
i += 1
if i >= nvert:
break
if (verty[i] > testy) != (verty[j] > testy) and testx < (vertx[j] - vertx[i]) * (testy - verty[i]) / (
verty[j] - verty[i]) + vertx[i]:
c = not c
return c
vertx = []
verty = []
for p in polygon:
vertx.append(p[0])
verty.append(p[1])
if polygon[-1] is not polygon[0]:
p = polygon[0]
vertx.append(p[0])
verty.append(p[1])
return pnpoly(len(vertx), vertx, verty, point[0], point[1])
def is_inside_rect(rect, point):
e = 0.001
x = point[0]
if x < rect[1] - e:
return False
elif x > rect[3] + e:
return False
y = point[1]
if y < rect[0] - e:
return False
elif y > rect[2] + e:
return False
return True
def rects_intersect(rect1, rect2):
class Rectangle:
def intersects(self, other):
a, b = self, other
x1 = max(min(a.x1, a.x2), min(b.x1, b.x2))
y1 = max(min(a.y1, a.y2), min(b.y1, b.y2))
x2 = min(max(a.x1, a.x2), max(b.x1, b.x2))
y2 = min(max(a.y1, a.y2), max(b.y1, b.y2))
return x1 < x2 and y1 < y2
def _set(self, x1, y1, x2, y2):
if x1 > x2 or y1 > y2:
raise ValueError("Coordinates are invalid")
self.x1, self.y1, self.x2, self.y2 = x1, y1, x2, y2
def __init__(self, bbox):
self._set(bbox[0], bbox[1], bbox[2], bbox[3])
return Rectangle(rect1).intersects(Rectangle(rect2))
def rects_overlap(rect1, rect2, min_ratio):
w1 = rect1[2] - rect1[0]
w2 = rect2[2] - rect2[0]
w_min = min(rect1[2], rect2[2]) - max(rect1[0], rect2[0])
if (w_min / w1) >= min_ratio or (w_min / w2) >= min_ratio:
h1 = rect1[3] - rect1[1]
h2 = rect2[3] - rect2[1]
h_min = min(rect1[3], rect2[3]) - max(rect1[1], rect2[1])
return (h_min / h1) >= min_ratio or (h_min / h2) >= min_ratio
return False
def add_padding_rect(rect, padding):
x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]
dw = (x2 - x1) * padding
dh = (y2 - y1) * padding
return [x1 - dw, y1 - dh, x2 + dw, y2 + dh]
def distance(p1, p2):
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def get_rect_values(rect):
# y1, x1, y2, x2
return rect[0], rect[1], rect[2], rect[3]
def get_rect_pnts(rect):
# p1, p2
return (rect[1], rect[0]), (rect[3], rect[2])
def get_rect(p1, p2):
return [p1[1], p1[0], p2[1], p2[0]]
def get_rect_bottom_center(rect):
r1 = rect[1]
r2 = rect[2]
r3 = rect[3]
return int(r1 + (r3 - r1) * 0.5), r2 # bottom center
def get_dist_sq(p1, p2):
d1 = p2[0] - p1[0]
d2 = p2[1] - p1[1]
return d1 * d1 + d2 * d2
def get_center_int(pnts):
x = 0
y = 0
for pnt in pnts:
x += pnt[0]
y += pnt[1]
length = float(len(pnts))
return [int(x / length), int(y / length)] | 0.467818 | 0.501404 |
import os
import time
import numpy as np
import pandas as pd
import librosa
import configparser
import soundfile as sf
from keras.models import model_from_json
from keras import backend as K
from sklearn.cluster import AgglomerativeClustering, KMeans, DBSCAN
import sklearn
import tensorflow as tf
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn import manifold
class SpeakerClusterAnalyzer:
def __init__(self):
self.loaded = False
self.model = None
def refFun(self, S):
return np.log10(1 + 10000 * S)
def load(self):
if not self.loaded:
# We load the JSON file and create the model
json_file = open(os.path.join(os.sep, 'data/sound/clustering/model.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
self.model = model_from_json(loaded_model_json)
# We load the weights into our model
self.model.load_weights(
os.path.join(os.sep, "data/sound/clustering/weights.h5"))
cfg = configparser.ConfigParser()
cfg.read(os.path.join(os.path.dirname(__file__), 'config.cfg'))
self.SOUND_FORMAT = cfg.get("IO", "sound_format")
self.PARAM_FRAME_LENGTH = int(cfg.get("MEL", "frame_length"))
self.PARAM_NUMBER_MELS = int(cfg.get("MEL", "n_mels"))
self.loaded = True
print("* Loaded model from disk")
def analyze(self, inputFile, count=2):
if not(self.loaded):
raise UnloadedException()
timeS = time.time()
try:
(signal, samplerate) = sf.read(inputFile)
except:
print(
"Error with chunk file. Unable to perform features extraction on the file.")
raise Exception()
# The number of columns in the dataset (except for index)
dataset_shape = (self.PARAM_FRAME_LENGTH / 10) * self.PARAM_NUMBER_MELS
X_test_vectors = [ np.repeat(0, dataset_shape) ]
signal = librosa.to_mono(np.transpose(signal))
signal, _ = librosa.effects.trim(signal, top_db=50)
#spectrogram = librosa.feature.melspectrogram(signal, sr=samplerate, n_fft=1024, hop_length=160, fmin=240, fmax=3000)
spectrogram = librosa.feature.melspectrogram(signal, sr=samplerate, n_fft=1024, hop_length=160)
logSpectrogram = self.refFun(spectrogram)
signalLength = float(len(signal) / samplerate) * 1000
indexPosition = 0
while indexPosition < signalLength - self.PARAM_FRAME_LENGTH:
row = np.asarray(logSpectrogram[:, int(indexPosition / 10):int((indexPosition + self.PARAM_FRAME_LENGTH) / 10)]).ravel()
X_test_vectors.append(row)
indexPosition += self.PARAM_FRAME_LENGTH
X_test_vectors = X_test_vectors[1:] # We remove first row which is only 0
X_test = []
for i in range(len(X_test_vectors)):
matrix = np.zeros((self.PARAM_NUMBER_MELS, int(self.PARAM_FRAME_LENGTH / 10)))
for l in range(self.PARAM_NUMBER_MELS):
for m in range(int(self.PARAM_FRAME_LENGTH / 10)):
matrix[l, m] = X_test_vectors[i][l * int(self.PARAM_FRAME_LENGTH / 10) + m]
X_test.append([matrix])
# Creating vector into clustering space
cluster_space_layer = K.function([self.model.layers[0].input], [self.model.layers[7].output])
layer_output = cluster_space_layer([X_test])[0]
cosinus_dist = 1. - sklearn.metrics.pairwise.cosine_similarity(layer_output)
cosinus_dist[cosinus_dist < 0] = 0
cosine_tsne = manifold.TSNE(n_components=2, metric='precomputed').fit_transform(cosinus_dist)
Z = linkage(layer_output, metric='cosine', method='complete')
minDist = max([row[2] for row in Z])
nb_clusters = len(Z)
for i in range(len(Z)-1):
if (minDist > Z[i+1][2] - Z[i][2]):
minDist = Z[i+1][2] - Z[i][2]
nb_clusters = i
if count is None:
count = 2
int(count)
clustering = AgglomerativeClustering(affinity='cosine', linkage="complete", n_clusters=count).fit_predict(layer_output)
# Now we need to find indexes when current speaker changes
flags = []
currentSpeaker = clustering[0]
for i in range(1, len(clustering)):
if clustering[i] != currentSpeaker:
currentSpeaker = clustering[i]
flags.append(i)
finalClustering = []
for flag in flags:
fragment = signal[(flag-1)*samplerate:(flag+1)*samplerate]
chroma = librosa.feature.chroma_cens(y=fragment, sr=samplerate)
#librosa.output.write_wav("output/test_fragment.wav", test_fragment, samplerate)
bounds = librosa.segment.agglomerative(chroma, 3)
speakerStartPos = (flag-1) + librosa.frames_to_time(bounds, sr=samplerate)[1]
finalClustering.append(float("{0:.3f}".format(speakerStartPos)))
flags.insert(0, 0)
finalClustering.insert(0, 0)
result = [[] for i in range(count)]
for i in range(1, len(flags)):
print(flags[i] - 1)
n = clustering[flags[i] - 1]
result[n].append((finalClustering[i-1], finalClustering[i] - 0.001))
result[clustering[-1]].append((finalClustering[-1], "EOF"))
#clustering = KMeans(n_clusters=4).fit_predict(layer_output)
return {'res': result, 'exec_time': time.time() - timeS} | app/src/aipcloud/sound/clustering/clustering.py |
import os
import time
import numpy as np
import pandas as pd
import librosa
import configparser
import soundfile as sf
from keras.models import model_from_json
from keras import backend as K
from sklearn.cluster import AgglomerativeClustering, KMeans, DBSCAN
import sklearn
import tensorflow as tf
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn import manifold
class SpeakerClusterAnalyzer:
def __init__(self):
self.loaded = False
self.model = None
def refFun(self, S):
return np.log10(1 + 10000 * S)
def load(self):
if not self.loaded:
# We load the JSON file and create the model
json_file = open(os.path.join(os.sep, 'data/sound/clustering/model.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
self.model = model_from_json(loaded_model_json)
# We load the weights into our model
self.model.load_weights(
os.path.join(os.sep, "data/sound/clustering/weights.h5"))
cfg = configparser.ConfigParser()
cfg.read(os.path.join(os.path.dirname(__file__), 'config.cfg'))
self.SOUND_FORMAT = cfg.get("IO", "sound_format")
self.PARAM_FRAME_LENGTH = int(cfg.get("MEL", "frame_length"))
self.PARAM_NUMBER_MELS = int(cfg.get("MEL", "n_mels"))
self.loaded = True
print("* Loaded model from disk")
def analyze(self, inputFile, count=2):
if not(self.loaded):
raise UnloadedException()
timeS = time.time()
try:
(signal, samplerate) = sf.read(inputFile)
except:
print(
"Error with chunk file. Unable to perform features extraction on the file.")
raise Exception()
# The number of columns in the dataset (except for index)
dataset_shape = (self.PARAM_FRAME_LENGTH / 10) * self.PARAM_NUMBER_MELS
X_test_vectors = [ np.repeat(0, dataset_shape) ]
signal = librosa.to_mono(np.transpose(signal))
signal, _ = librosa.effects.trim(signal, top_db=50)
#spectrogram = librosa.feature.melspectrogram(signal, sr=samplerate, n_fft=1024, hop_length=160, fmin=240, fmax=3000)
spectrogram = librosa.feature.melspectrogram(signal, sr=samplerate, n_fft=1024, hop_length=160)
logSpectrogram = self.refFun(spectrogram)
signalLength = float(len(signal) / samplerate) * 1000
indexPosition = 0
while indexPosition < signalLength - self.PARAM_FRAME_LENGTH:
row = np.asarray(logSpectrogram[:, int(indexPosition / 10):int((indexPosition + self.PARAM_FRAME_LENGTH) / 10)]).ravel()
X_test_vectors.append(row)
indexPosition += self.PARAM_FRAME_LENGTH
X_test_vectors = X_test_vectors[1:] # We remove first row which is only 0
X_test = []
for i in range(len(X_test_vectors)):
matrix = np.zeros((self.PARAM_NUMBER_MELS, int(self.PARAM_FRAME_LENGTH / 10)))
for l in range(self.PARAM_NUMBER_MELS):
for m in range(int(self.PARAM_FRAME_LENGTH / 10)):
matrix[l, m] = X_test_vectors[i][l * int(self.PARAM_FRAME_LENGTH / 10) + m]
X_test.append([matrix])
# Creating vector into clustering space
cluster_space_layer = K.function([self.model.layers[0].input], [self.model.layers[7].output])
layer_output = cluster_space_layer([X_test])[0]
cosinus_dist = 1. - sklearn.metrics.pairwise.cosine_similarity(layer_output)
cosinus_dist[cosinus_dist < 0] = 0
cosine_tsne = manifold.TSNE(n_components=2, metric='precomputed').fit_transform(cosinus_dist)
Z = linkage(layer_output, metric='cosine', method='complete')
minDist = max([row[2] for row in Z])
nb_clusters = len(Z)
for i in range(len(Z)-1):
if (minDist > Z[i+1][2] - Z[i][2]):
minDist = Z[i+1][2] - Z[i][2]
nb_clusters = i
if count is None:
count = 2
int(count)
clustering = AgglomerativeClustering(affinity='cosine', linkage="complete", n_clusters=count).fit_predict(layer_output)
# Now we need to find indexes when current speaker changes
flags = []
currentSpeaker = clustering[0]
for i in range(1, len(clustering)):
if clustering[i] != currentSpeaker:
currentSpeaker = clustering[i]
flags.append(i)
finalClustering = []
for flag in flags:
fragment = signal[(flag-1)*samplerate:(flag+1)*samplerate]
chroma = librosa.feature.chroma_cens(y=fragment, sr=samplerate)
#librosa.output.write_wav("output/test_fragment.wav", test_fragment, samplerate)
bounds = librosa.segment.agglomerative(chroma, 3)
speakerStartPos = (flag-1) + librosa.frames_to_time(bounds, sr=samplerate)[1]
finalClustering.append(float("{0:.3f}".format(speakerStartPos)))
flags.insert(0, 0)
finalClustering.insert(0, 0)
result = [[] for i in range(count)]
for i in range(1, len(flags)):
print(flags[i] - 1)
n = clustering[flags[i] - 1]
result[n].append((finalClustering[i-1], finalClustering[i] - 0.001))
result[clustering[-1]].append((finalClustering[-1], "EOF"))
#clustering = KMeans(n_clusters=4).fit_predict(layer_output)
return {'res': result, 'exec_time': time.time() - timeS} | 0.529507 | 0.221414 |
from typing import List, Tuple, Dict
class ScoreContributionSpecification:
"""
Specifies how to calculate the score of a goal-directed benchmark.
The global score will be a weighted average of top-x scores.
This class specifies which top-x to consider and what the corresponding weights are.
"""
def __init__(self, contributions: List[Tuple[int, float]]) -> None:
"""
Args:
contributions: List of tuples (top_count, weight) for the score contributions
"""
self.contributions = contributions
@property
def top_counts(self) -> List[int]:
return [x[0] for x in self.contributions]
@property
def weights(self) -> List[float]:
return [x[1] for x in self.contributions]
def uniform_specification(*top_counts: int) -> ScoreContributionSpecification:
"""
Creates an instance of ScoreContributionSpecification where all the top-x contributions have equal weight
Args:
top_counts: list of values, where each value x will correspond to the top-x contribution
"""
contributions = [(x, 1.0) for x in top_counts]
return ScoreContributionSpecification(contributions=contributions)
def compute_global_score(contribution_specification: ScoreContributionSpecification,
scores: List[float]) -> Tuple[float, Dict[str, float]]:
"""
Computes the global score according to the contribution specification.
Args:
contribution_specification: Score contribution specification
scores: List of all scores - list must be long enough for all top_counts in contribution_specification
Returns:
Tuple with the global score and a dict with the considered top-x scores
"""
sorted_scores = sorted(scores, reverse=True)
global_score = 0.0
top_x_dict = {}
for top_count, weight in contribution_specification.contributions:
score = sum(sorted_scores[:top_count]) / top_count
top_x_dict[f'top_{top_count}'] = score
global_score += score * weight
global_score /= sum(contribution_specification.weights)
return global_score, top_x_dict | examples/generation/docking_generation/guacamol_tdc/guacamol/guacamol/goal_directed_score_contributions.py | from typing import List, Tuple, Dict
class ScoreContributionSpecification:
"""
Specifies how to calculate the score of a goal-directed benchmark.
The global score will be a weighted average of top-x scores.
This class specifies which top-x to consider and what the corresponding weights are.
"""
def __init__(self, contributions: List[Tuple[int, float]]) -> None:
"""
Args:
contributions: List of tuples (top_count, weight) for the score contributions
"""
self.contributions = contributions
@property
def top_counts(self) -> List[int]:
return [x[0] for x in self.contributions]
@property
def weights(self) -> List[float]:
return [x[1] for x in self.contributions]
def uniform_specification(*top_counts: int) -> ScoreContributionSpecification:
"""
Creates an instance of ScoreContributionSpecification where all the top-x contributions have equal weight
Args:
top_counts: list of values, where each value x will correspond to the top-x contribution
"""
contributions = [(x, 1.0) for x in top_counts]
return ScoreContributionSpecification(contributions=contributions)
def compute_global_score(contribution_specification: ScoreContributionSpecification,
scores: List[float]) -> Tuple[float, Dict[str, float]]:
"""
Computes the global score according to the contribution specification.
Args:
contribution_specification: Score contribution specification
scores: List of all scores - list must be long enough for all top_counts in contribution_specification
Returns:
Tuple with the global score and a dict with the considered top-x scores
"""
sorted_scores = sorted(scores, reverse=True)
global_score = 0.0
top_x_dict = {}
for top_count, weight in contribution_specification.contributions:
score = sum(sorted_scores[:top_count]) / top_count
top_x_dict[f'top_{top_count}'] = score
global_score += score * weight
global_score /= sum(contribution_specification.weights)
return global_score, top_x_dict | 0.960556 | 0.740221 |
from database.database import MongoManager
from pydantic.networks import EmailStr
from auth.manager.auth import authenticate_manager, create_manager, send_sign_up_code_verification
from models.manager import ManagerCreate, ManagerLogin
from auth.constants import REFRESH_TOKEN_EXPIRES_TIME, SECRET_KEY, TOKEN_EXPIRES_TIME
from fastapi import HTTPException, Depends, APIRouter, status, Body, BackgroundTasks
from fastapi_jwt_auth import AuthJWT
from database import get_database
from pydantic import BaseModel
router = APIRouter()
class Settings(BaseModel):
authjwt_secret_key:str = SECRET_KEY
authjwt_access_token_expires = TOKEN_EXPIRES_TIME
authjwt_refresh_token_expires = REFRESH_TOKEN_EXPIRES_TIME
@AuthJWT.load_config
def get_config():
return Settings()
@router.post("/signup")
async def sign_up(background_tasks: BackgroundTasks,new_manager: ManagerCreate = Body(...), db:MongoManager=Depends(get_database),):
manager = await create_manager(db=db,manager=new_manager)
if manager:
background_tasks.add_task(send_sign_up_code_verification,manager.email,db)
return manager
else:
return "email_already_exists"
@router.post("/resend-signup-verification")
async def resend_signup_verification(background_tasks: BackgroundTasks,email: EmailStr = Body(...), db:MongoManager=Depends(get_database)):
manager = await db.manager_repo.get_managerDB({'email':email})
if manager and not (manager.verified):
background_tasks.add_task(send_sign_up_code_verification,manager.email,db)
return "verification_sent"
else:
return "user_not_exists_or_alredy_verified"
@router.post('/login')
async def login(creds: ManagerLogin, Authorize: AuthJWT = Depends(), db = Depends(get_database)):
email = await authenticate_manager(creds.username, creds.password,db)
if not email:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
# Use create_access_token() and create_refresh_token() to create our
access_token = Authorize.create_access_token(subject=creds.username)
refresh_token = Authorize.create_refresh_token(subject=creds.username)
return {"access_token": access_token, "refresh_token": refresh_token}
@router.post('/refresh')
def refresh(Authorize: AuthJWT = Depends()):
Authorize.jwt_refresh_token_required()
current_user = Authorize.get_jwt_subject()
new_access_token = Authorize.create_access_token(subject=current_user)
return {"access_token": new_access_token} | auth/manager/router.py | from database.database import MongoManager
from pydantic.networks import EmailStr
from auth.manager.auth import authenticate_manager, create_manager, send_sign_up_code_verification
from models.manager import ManagerCreate, ManagerLogin
from auth.constants import REFRESH_TOKEN_EXPIRES_TIME, SECRET_KEY, TOKEN_EXPIRES_TIME
from fastapi import HTTPException, Depends, APIRouter, status, Body, BackgroundTasks
from fastapi_jwt_auth import AuthJWT
from database import get_database
from pydantic import BaseModel
router = APIRouter()
class Settings(BaseModel):
authjwt_secret_key:str = SECRET_KEY
authjwt_access_token_expires = TOKEN_EXPIRES_TIME
authjwt_refresh_token_expires = REFRESH_TOKEN_EXPIRES_TIME
@AuthJWT.load_config
def get_config():
return Settings()
@router.post("/signup")
async def sign_up(background_tasks: BackgroundTasks,new_manager: ManagerCreate = Body(...), db:MongoManager=Depends(get_database),):
manager = await create_manager(db=db,manager=new_manager)
if manager:
background_tasks.add_task(send_sign_up_code_verification,manager.email,db)
return manager
else:
return "email_already_exists"
@router.post("/resend-signup-verification")
async def resend_signup_verification(background_tasks: BackgroundTasks,email: EmailStr = Body(...), db:MongoManager=Depends(get_database)):
manager = await db.manager_repo.get_managerDB({'email':email})
if manager and not (manager.verified):
background_tasks.add_task(send_sign_up_code_verification,manager.email,db)
return "verification_sent"
else:
return "user_not_exists_or_alredy_verified"
@router.post('/login')
async def login(creds: ManagerLogin, Authorize: AuthJWT = Depends(), db = Depends(get_database)):
email = await authenticate_manager(creds.username, creds.password,db)
if not email:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
# Use create_access_token() and create_refresh_token() to create our
access_token = Authorize.create_access_token(subject=creds.username)
refresh_token = Authorize.create_refresh_token(subject=creds.username)
return {"access_token": access_token, "refresh_token": refresh_token}
@router.post('/refresh')
def refresh(Authorize: AuthJWT = Depends()):
Authorize.jwt_refresh_token_required()
current_user = Authorize.get_jwt_subject()
new_access_token = Authorize.create_access_token(subject=current_user)
return {"access_token": new_access_token} | 0.334916 | 0.075109 |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Backend Register."""
import os
from .common.class_factory import ClassFactory
from ..search_space.networks import NetworkFactory
def register_pytorch():
"""Register class factory of pytorch."""
# register trainer
import vega.core.trainer.timm_trainer_callback
# register evaluator
from vega.core.evaluator.evaluator import Evaluator
from vega.core.evaluator.hava_d_evaluator import HavaDEvaluator
from vega.core.evaluator.davinci_mobile_evaluator import DavinciMobileEvaluator
from vega.core.evaluator.gpu_evaluator import GpuEvaluator
# register metrics
import vega.core.metrics.pytorch
# reigister datasets
import vega.datasets.pytorch
# register networks
import vega.search_space.networks.pytorch
import vega.model_zoo
def register_tensorflow():
"""Register class factory of tensorflow."""
# register metrics
import vega.core.metrics.tensorflow
# register datasets
import vega.datasets.tensorflow
# register networks
import vega.search_space.networks.tensorflow
def set_backend(backend='pytorch', device_category='GPU'):
"""Set backend.
:param backend: backend type, default pytorch
:type backend: str
"""
if "BACKEND_TYPE" in os.environ:
return
if 'CUDA_VISIBLE_DEVICES' in os.environ:
os.environ['DEVICE_CATEGORY'] = 'GPU'
elif 'NPU-VISIBLE-DEVICES' in os.environ:
os.environ['DEVICE_CATEGORY'] = 'NPU'
os.environ['ORIGIN_RANK_TABLE_FILE'] = os.environ.get('RANK_TABLE_FILE', '')
os.environ['ORIGIN_RANK_SIZE'] = os.environ['RANK_SIZE']
if device_category is not None:
os.environ['DEVICE_CATEGORY'] = device_category
if backend == 'pytorch':
os.environ['BACKEND_TYPE'] = 'PYTORCH'
register_pytorch()
elif backend == 'tensorflow':
os.environ['BACKEND_TYPE'] = 'TENSORFLOW'
register_tensorflow()
else:
raise Exception('backend must be pytorch or tensorflow')
import vega.core.trainer.trainer
import vega.search_space.search_algs.ps_differential
import vega.algorithms
def is_gpu_device():
"""Return whether is gpu device or not."""
return os.environ.get('DEVICE_CATEGORY', None) == 'GPU'
def is_npu_device():
"""Return whether is npu device or not."""
return os.environ.get('DEVICE_CATEGORY', None) == 'NPU'
def is_torch_backend():
"""Return whether is pytorch backend or not."""
return os.environ.get('BACKEND_TYPE', None) == 'PYTORCH'
def is_tf_backend():
"""Return whether is tensorflow backend or not."""
return os.environ.get('BACKEND_TYPE', None) == 'TENSORFLOW' | built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/core/backend_register.py |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Backend Register."""
import os
from .common.class_factory import ClassFactory
from ..search_space.networks import NetworkFactory
def register_pytorch():
"""Register class factory of pytorch."""
# register trainer
import vega.core.trainer.timm_trainer_callback
# register evaluator
from vega.core.evaluator.evaluator import Evaluator
from vega.core.evaluator.hava_d_evaluator import HavaDEvaluator
from vega.core.evaluator.davinci_mobile_evaluator import DavinciMobileEvaluator
from vega.core.evaluator.gpu_evaluator import GpuEvaluator
# register metrics
import vega.core.metrics.pytorch
# reigister datasets
import vega.datasets.pytorch
# register networks
import vega.search_space.networks.pytorch
import vega.model_zoo
def register_tensorflow():
"""Register class factory of tensorflow."""
# register metrics
import vega.core.metrics.tensorflow
# register datasets
import vega.datasets.tensorflow
# register networks
import vega.search_space.networks.tensorflow
def set_backend(backend='pytorch', device_category='GPU'):
"""Set backend.
:param backend: backend type, default pytorch
:type backend: str
"""
if "BACKEND_TYPE" in os.environ:
return
if 'CUDA_VISIBLE_DEVICES' in os.environ:
os.environ['DEVICE_CATEGORY'] = 'GPU'
elif 'NPU-VISIBLE-DEVICES' in os.environ:
os.environ['DEVICE_CATEGORY'] = 'NPU'
os.environ['ORIGIN_RANK_TABLE_FILE'] = os.environ.get('RANK_TABLE_FILE', '')
os.environ['ORIGIN_RANK_SIZE'] = os.environ['RANK_SIZE']
if device_category is not None:
os.environ['DEVICE_CATEGORY'] = device_category
if backend == 'pytorch':
os.environ['BACKEND_TYPE'] = 'PYTORCH'
register_pytorch()
elif backend == 'tensorflow':
os.environ['BACKEND_TYPE'] = 'TENSORFLOW'
register_tensorflow()
else:
raise Exception('backend must be pytorch or tensorflow')
import vega.core.trainer.trainer
import vega.search_space.search_algs.ps_differential
import vega.algorithms
def is_gpu_device():
"""Return whether is gpu device or not."""
return os.environ.get('DEVICE_CATEGORY', None) == 'GPU'
def is_npu_device():
"""Return whether is npu device or not."""
return os.environ.get('DEVICE_CATEGORY', None) == 'NPU'
def is_torch_backend():
"""Return whether is pytorch backend or not."""
return os.environ.get('BACKEND_TYPE', None) == 'PYTORCH'
def is_tf_backend():
"""Return whether is tensorflow backend or not."""
return os.environ.get('BACKEND_TYPE', None) == 'TENSORFLOW' | 0.758063 | 0.111628 |
import json
from urllib import parse
from paddleflow.common.exception.paddleflow_sdk_exception import PaddleFlowSDKException
from paddleflow.utils import api_client
from paddleflow.common import api
from paddleflow.queue.queue_info import QueueInfo
from paddleflow.queue.queue_info import GrantInfo
class QueueServiceApi(object):
"""queue service api"""
def __init__(self):
"""
"""
@classmethod
def add_queue(self, host, name, namespace, clusterName, maxResources, minResources=None,
schedulingPolicy=None, location=None, quotaType=None, header=None):
"""
add queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
body = {
"namespace": namespace,
"name": name,
"clusterName": clusterName,
"maxResources": maxResources,
}
if minResources:
body['minResources'] = minResources
if schedulingPolicy:
body['schedulingPolicy'] = schedulingPolicy
if location:
body['location'] = location
if quotaType:
body['quotaType'] = quotaType
response = api_client.call_api(method="POST", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE), headers=header,
json=body)
if not response:
raise PaddleFlowSDKException("Connection Error", "add queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def grant_queue(self, host, username, queuename, header=None):
"""
grant queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
body = {
"username": username,
"resourceType": "queue",
"resourceID": queuename
}
response = api_client.call_api(method="POST", url=parse.urljoin(host, api.PADDLE_FLOW_GRANT),
headers=header, json=body)
if not response:
raise PaddleFlowSDKException("Connection Error", "grant queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def ungrant_queue(self, host, username, queuename, header=None):
"""
ungrant queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
## call grant
params = {
"username": username,
"resourceType": "queue",
"resourceID": queuename
}
response = api_client.call_api(method="DELETE", url=parse.urljoin(host, api.PADDLE_FLOW_GRANT),
headers=header, params=params)
if not response.text:
return True, None
if not response:
raise PaddleFlowSDKException("Connection Error", "ungrant queue failed due to HTTPError")
data = json.loads(response.text)
if data and 'message' in data:
return False, data['message']
return True, None
@classmethod
def del_queue(self, host, queuename, header=None):
"""
delete queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
response = api_client.call_api(method="DELETE", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE +
"/%s" % queuename),
headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "delete queue failed due to HTTPError")
if not response.text:
return True, None
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def stop_queue(self, host, queuename, action=None, header=None):
"""
delete queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
if action:
params = {"action":action}
else:
params = {"action":'close'}
response = api_client.call_api(method="PUT", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE +
"/%s" % queuename),
params=params, headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "stop queue failed due to HTTPError")
if not response.text:
return True, None
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def list_queue(self, host, header=None, maxsize=100, marker=None):
"""
list queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
if not isinstance(maxsize, int) or maxsize <= 0:
raise PaddleFlowSDKException("InvalidRequest", "maxsize should be int and greater than 0")
params = {
"maxKeys": maxsize
}
if marker:
params['marker'] = marker
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE),
params=params, headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "list queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
queueList = []
if len(data['queueList']):
for queue in data['queueList']:
queueinfo = QueueInfo(queue['name'], queue['status'], queue['namespace'], queue['clusterName'], None,
queue['maxResources'], queue['minResources'], None, None,
queue['createTime'], queue['updateTime'])
queueList.append(queueinfo)
return True, queueList, data.get('nextMarker', None)
@classmethod
def show_queue(self, host, queuename, header=None):
"""
show queue info
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE + "/%s" % queuename),
headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "show queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
queueInfo = QueueInfo(data['name'], data['status'], data['namespace'], data['clusterName'], None,
data['maxResources'], data.get('minResources'), data.get('location'),
data.get('schedulingPolicy'), data['createTime'], data['updateTime'])
return True, queueInfo
@classmethod
def show_grant(self, host, username=None, header=None, maxsize=100):
"""
show grant resources
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
if not isinstance(maxsize, int) or maxsize <= 0:
raise PaddleFlowSDKException("InvalidRequest", "maxsize should be int and greater than 0")
params = {
"maxKeys": maxsize
}
if username:
params['username'] = username
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_GRANT),
params=params, headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "show grant failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
grantList = []
if len(data['grantList']):
for grant in data['grantList']:
if grant['resourceType'] != "queue":
continue
grantinfo = GrantInfo(grant['userName'], grant['resourceID'])
grantList.append(grantinfo)
return True, grantList
@classmethod
def flavour(self, host, header=None):
"""
list flavour
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_FLAVOUR),
headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "list flavour failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, data | client/paddleflow/queue/queue_api.py |
import json
from urllib import parse
from paddleflow.common.exception.paddleflow_sdk_exception import PaddleFlowSDKException
from paddleflow.utils import api_client
from paddleflow.common import api
from paddleflow.queue.queue_info import QueueInfo
from paddleflow.queue.queue_info import GrantInfo
class QueueServiceApi(object):
"""queue service api"""
def __init__(self):
"""
"""
@classmethod
def add_queue(self, host, name, namespace, clusterName, maxResources, minResources=None,
schedulingPolicy=None, location=None, quotaType=None, header=None):
"""
add queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
body = {
"namespace": namespace,
"name": name,
"clusterName": clusterName,
"maxResources": maxResources,
}
if minResources:
body['minResources'] = minResources
if schedulingPolicy:
body['schedulingPolicy'] = schedulingPolicy
if location:
body['location'] = location
if quotaType:
body['quotaType'] = quotaType
response = api_client.call_api(method="POST", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE), headers=header,
json=body)
if not response:
raise PaddleFlowSDKException("Connection Error", "add queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def grant_queue(self, host, username, queuename, header=None):
"""
grant queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
body = {
"username": username,
"resourceType": "queue",
"resourceID": queuename
}
response = api_client.call_api(method="POST", url=parse.urljoin(host, api.PADDLE_FLOW_GRANT),
headers=header, json=body)
if not response:
raise PaddleFlowSDKException("Connection Error", "grant queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def ungrant_queue(self, host, username, queuename, header=None):
"""
ungrant queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
## call grant
params = {
"username": username,
"resourceType": "queue",
"resourceID": queuename
}
response = api_client.call_api(method="DELETE", url=parse.urljoin(host, api.PADDLE_FLOW_GRANT),
headers=header, params=params)
if not response.text:
return True, None
if not response:
raise PaddleFlowSDKException("Connection Error", "ungrant queue failed due to HTTPError")
data = json.loads(response.text)
if data and 'message' in data:
return False, data['message']
return True, None
@classmethod
def del_queue(self, host, queuename, header=None):
"""
delete queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
response = api_client.call_api(method="DELETE", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE +
"/%s" % queuename),
headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "delete queue failed due to HTTPError")
if not response.text:
return True, None
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def stop_queue(self, host, queuename, action=None, header=None):
"""
delete queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
if action:
params = {"action":action}
else:
params = {"action":'close'}
response = api_client.call_api(method="PUT", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE +
"/%s" % queuename),
params=params, headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "stop queue failed due to HTTPError")
if not response.text:
return True, None
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def list_queue(self, host, header=None, maxsize=100, marker=None):
"""
list queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
if not isinstance(maxsize, int) or maxsize <= 0:
raise PaddleFlowSDKException("InvalidRequest", "maxsize should be int and greater than 0")
params = {
"maxKeys": maxsize
}
if marker:
params['marker'] = marker
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE),
params=params, headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "list queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
queueList = []
if len(data['queueList']):
for queue in data['queueList']:
queueinfo = QueueInfo(queue['name'], queue['status'], queue['namespace'], queue['clusterName'], None,
queue['maxResources'], queue['minResources'], None, None,
queue['createTime'], queue['updateTime'])
queueList.append(queueinfo)
return True, queueList, data.get('nextMarker', None)
@classmethod
def show_queue(self, host, queuename, header=None):
"""
show queue info
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE + "/%s" % queuename),
headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "show queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
queueInfo = QueueInfo(data['name'], data['status'], data['namespace'], data['clusterName'], None,
data['maxResources'], data.get('minResources'), data.get('location'),
data.get('schedulingPolicy'), data['createTime'], data['updateTime'])
return True, queueInfo
@classmethod
def show_grant(self, host, username=None, header=None, maxsize=100):
"""
show grant resources
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
if not isinstance(maxsize, int) or maxsize <= 0:
raise PaddleFlowSDKException("InvalidRequest", "maxsize should be int and greater than 0")
params = {
"maxKeys": maxsize
}
if username:
params['username'] = username
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_GRANT),
params=params, headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "show grant failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
grantList = []
if len(data['grantList']):
for grant in data['grantList']:
if grant['resourceType'] != "queue":
continue
grantinfo = GrantInfo(grant['userName'], grant['resourceID'])
grantList.append(grantinfo)
return True, grantList
@classmethod
def flavour(self, host, header=None):
"""
list flavour
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_FLAVOUR),
headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "list flavour failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, data | 0.5144 | 0.107813 |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class RoleClassPartitive(GenericTypeCode):
"""
v3.RoleClassPartitive
From: http://terminology.hl7.org/ValueSet/v3-RoleClassPartitive in v3-codesystems.xml
An association between two Entities where the playing Entity is considered in
some way "part" of the scoping Entity, e.g., as a member, component,
ingredient, or content. Being "part" in the broadest sense of the word can
mean anything from being an integral structural component to a mere incidental
temporary association of a playing Entity with a (generally larger) scoping
Entity.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/v3-RoleClass
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/v3-RoleClass"
class RoleClassPartitiveValues:
"""
Corresponds to the Role class
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Role = RoleClassPartitive("ROL")
"""
The player of the role is a child of the scoping entity, in a generic sense.
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Child = RoleClassPartitive("CHILD")
"""
A role played by an entity that receives credentials from the scoping entity.
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
CredentialedEntity = RoleClassPartitive("CRED")
"""
nurse practitioner
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
NursePractitioner = RoleClassPartitive("NURPRAC")
"""
nurse
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Nurse = RoleClassPartitive("NURS")
"""
physician assistant
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
PhysicianAssistant = RoleClassPartitive("PA")
"""
physician
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Physician = RoleClassPartitive("PHYS") | spark_auto_mapper_fhir/value_sets/role_class_partitive.py | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class RoleClassPartitive(GenericTypeCode):
"""
v3.RoleClassPartitive
From: http://terminology.hl7.org/ValueSet/v3-RoleClassPartitive in v3-codesystems.xml
An association between two Entities where the playing Entity is considered in
some way "part" of the scoping Entity, e.g., as a member, component,
ingredient, or content. Being "part" in the broadest sense of the word can
mean anything from being an integral structural component to a mere incidental
temporary association of a playing Entity with a (generally larger) scoping
Entity.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/v3-RoleClass
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/v3-RoleClass"
class RoleClassPartitiveValues:
"""
Corresponds to the Role class
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Role = RoleClassPartitive("ROL")
"""
The player of the role is a child of the scoping entity, in a generic sense.
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Child = RoleClassPartitive("CHILD")
"""
A role played by an entity that receives credentials from the scoping entity.
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
CredentialedEntity = RoleClassPartitive("CRED")
"""
nurse practitioner
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
NursePractitioner = RoleClassPartitive("NURPRAC")
"""
nurse
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Nurse = RoleClassPartitive("NURS")
"""
physician assistant
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
PhysicianAssistant = RoleClassPartitive("PA")
"""
physician
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Physician = RoleClassPartitive("PHYS") | 0.7874 | 0.263831 |
import os
from plotly.graph_objs import Bar, Figure, Layout
from ..memberlookup import message_to_author, id_to_name
from ..statistic import statistic
from ..plotly_helpers import marker, try_saving_plotly_figure
class Total(object):
def __init__(self, author):
self.author = author
self.hearts_received = 0
def add_message(self, message):
self.hearts_received += len(message.favorited_by)
@statistic
class HeartsReceivedPlot(object):
def calculate(self, group, messages, ignore_users=[], **kwargs):
id_to_totals = {}
for message in messages:
user_id = message.user_id
if user_id == "system": continue
if user_id not in id_to_totals:
id_to_totals[user_id] = Total(message_to_author(message))
id_to_totals[user_id].add_message(message)
totals = sorted(id_to_totals.values(),
key=lambda total: total.author)
self._totals = [total for total in totals
if total.author not in ignore_users]
self._group_name = group.name
def show(self):
if not os.path.exists(self._group_name):
os.mkdir(self._group_name)
x_axis = [total.author for total in self._totals]
y_axis = [total.hearts_received for total in self._totals]
hearts_received = Bar(
x=x_axis,
y=y_axis,
marker=marker,
)
data = [hearts_received]
layout = Layout(
title="%s - Total Hearts Received" % self._group_name,
autosize=False,
width=30*len(x_axis) + 300,
height=800,
showlegend=False,
annotations=[
dict(x=xi, y=yi,
text=str(yi),
xanchor='center',
yanchor='bottom',
showarrow=False,
) for xi, yi in zip(x_axis, y_axis)],
yaxis=dict(title="Hearts Received"),
)
figure = Figure(data=data, layout=layout)
filename = os.path.join(self._group_name, "%s - Hearts Received.png"
% self._group_name)
try_saving_plotly_figure(figure, filename) | groupmestats/statistics/heartsreceivedplot.py | import os
from plotly.graph_objs import Bar, Figure, Layout
from ..memberlookup import message_to_author, id_to_name
from ..statistic import statistic
from ..plotly_helpers import marker, try_saving_plotly_figure
class Total(object):
def __init__(self, author):
self.author = author
self.hearts_received = 0
def add_message(self, message):
self.hearts_received += len(message.favorited_by)
@statistic
class HeartsReceivedPlot(object):
def calculate(self, group, messages, ignore_users=[], **kwargs):
id_to_totals = {}
for message in messages:
user_id = message.user_id
if user_id == "system": continue
if user_id not in id_to_totals:
id_to_totals[user_id] = Total(message_to_author(message))
id_to_totals[user_id].add_message(message)
totals = sorted(id_to_totals.values(),
key=lambda total: total.author)
self._totals = [total for total in totals
if total.author not in ignore_users]
self._group_name = group.name
def show(self):
if not os.path.exists(self._group_name):
os.mkdir(self._group_name)
x_axis = [total.author for total in self._totals]
y_axis = [total.hearts_received for total in self._totals]
hearts_received = Bar(
x=x_axis,
y=y_axis,
marker=marker,
)
data = [hearts_received]
layout = Layout(
title="%s - Total Hearts Received" % self._group_name,
autosize=False,
width=30*len(x_axis) + 300,
height=800,
showlegend=False,
annotations=[
dict(x=xi, y=yi,
text=str(yi),
xanchor='center',
yanchor='bottom',
showarrow=False,
) for xi, yi in zip(x_axis, y_axis)],
yaxis=dict(title="Hearts Received"),
)
figure = Figure(data=data, layout=layout)
filename = os.path.join(self._group_name, "%s - Hearts Received.png"
% self._group_name)
try_saving_plotly_figure(figure, filename) | 0.356447 | 0.133641 |
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rjoyride.proto\x12\x07joyride\"7\n\x0bRideRequest\x12\r\n\x05start\x18\x01 \x01(\t\x12\x0b\n\x03\x65nd\x18\x02 \x01(\t\x12\x0c\n\x04time\x18\x03 \x01(\x05\"*\n\tRideReply\x12\x0c\n\x04node\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t\"*\n\nRideRating\x12\x0e\n\x06rating\x18\x01 \x01(\x03\x12\x0c\n\x04path\x18\x02 \x03(\x03\"\x06\n\x04Null2|\n\x07JoyRide\x12:\n\nGetJoyRide\x12\x14.joyride.RideRequest\x1a\x12.joyride.RideReply\"\x00\x30\x01\x12\x35\n\rGetRideRating\x12\x13.joyride.RideRating\x1a\r.joyride.Null\"\x00\x42\x30\n\x18io.grpc.examples.joyrideB\x0cJoyrideProtoP\x01\xa2\x02\x03HLWb\x06proto3')
_RIDEREQUEST = DESCRIPTOR.message_types_by_name['RideRequest']
_RIDEREPLY = DESCRIPTOR.message_types_by_name['RideReply']
_RIDERATING = DESCRIPTOR.message_types_by_name['RideRating']
_NULL = DESCRIPTOR.message_types_by_name['Null']
RideRequest = _reflection.GeneratedProtocolMessageType('RideRequest', (_message.Message,), {
'DESCRIPTOR' : _RIDEREQUEST,
'__module__' : 'joyride_pb2'
# @@protoc_insertion_point(class_scope:joyride.RideRequest)
})
_sym_db.RegisterMessage(RideRequest)
RideReply = _reflection.GeneratedProtocolMessageType('RideReply', (_message.Message,), {
'DESCRIPTOR' : _RIDEREPLY,
'__module__' : 'joyride_pb2'
# @@protoc_insertion_point(class_scope:joyride.RideReply)
})
_sym_db.RegisterMessage(RideReply)
RideRating = _reflection.GeneratedProtocolMessageType('RideRating', (_message.Message,), {
'DESCRIPTOR' : _RIDERATING,
'__module__' : 'joyride_pb2'
# @@protoc_insertion_point(class_scope:joyride.RideRating)
})
_sym_db.RegisterMessage(RideRating)
Null = _reflection.GeneratedProtocolMessageType('Null', (_message.Message,), {
'DESCRIPTOR' : _NULL,
'__module__' : 'joyride_pb2'
# @@protoc_insertion_point(class_scope:joyride.Null)
})
_sym_db.RegisterMessage(Null)
_JOYRIDE = DESCRIPTOR.services_by_name['JoyRide']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\030io.grpc.examples.joyrideB\014JoyrideProtoP\001\242\002\003HLW'
_RIDEREQUEST._serialized_start=26
_RIDEREQUEST._serialized_end=81
_RIDEREPLY._serialized_start=83
_RIDEREPLY._serialized_end=125
_RIDERATING._serialized_start=127
_RIDERATING._serialized_end=169
_NULL._serialized_start=171
_NULL._serialized_end=177
_JOYRIDE._serialized_start=179
_JOYRIDE._serialized_end=303
# @@protoc_insertion_point(module_scope) | gen/joyride_pb2.py | """Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rjoyride.proto\x12\x07joyride\"7\n\x0bRideRequest\x12\r\n\x05start\x18\x01 \x01(\t\x12\x0b\n\x03\x65nd\x18\x02 \x01(\t\x12\x0c\n\x04time\x18\x03 \x01(\x05\"*\n\tRideReply\x12\x0c\n\x04node\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t\"*\n\nRideRating\x12\x0e\n\x06rating\x18\x01 \x01(\x03\x12\x0c\n\x04path\x18\x02 \x03(\x03\"\x06\n\x04Null2|\n\x07JoyRide\x12:\n\nGetJoyRide\x12\x14.joyride.RideRequest\x1a\x12.joyride.RideReply\"\x00\x30\x01\x12\x35\n\rGetRideRating\x12\x13.joyride.RideRating\x1a\r.joyride.Null\"\x00\x42\x30\n\x18io.grpc.examples.joyrideB\x0cJoyrideProtoP\x01\xa2\x02\x03HLWb\x06proto3')
_RIDEREQUEST = DESCRIPTOR.message_types_by_name['RideRequest']
_RIDEREPLY = DESCRIPTOR.message_types_by_name['RideReply']
_RIDERATING = DESCRIPTOR.message_types_by_name['RideRating']
_NULL = DESCRIPTOR.message_types_by_name['Null']
RideRequest = _reflection.GeneratedProtocolMessageType('RideRequest', (_message.Message,), {
'DESCRIPTOR' : _RIDEREQUEST,
'__module__' : 'joyride_pb2'
# @@protoc_insertion_point(class_scope:joyride.RideRequest)
})
_sym_db.RegisterMessage(RideRequest)
RideReply = _reflection.GeneratedProtocolMessageType('RideReply', (_message.Message,), {
'DESCRIPTOR' : _RIDEREPLY,
'__module__' : 'joyride_pb2'
# @@protoc_insertion_point(class_scope:joyride.RideReply)
})
_sym_db.RegisterMessage(RideReply)
RideRating = _reflection.GeneratedProtocolMessageType('RideRating', (_message.Message,), {
'DESCRIPTOR' : _RIDERATING,
'__module__' : 'joyride_pb2'
# @@protoc_insertion_point(class_scope:joyride.RideRating)
})
_sym_db.RegisterMessage(RideRating)
Null = _reflection.GeneratedProtocolMessageType('Null', (_message.Message,), {
'DESCRIPTOR' : _NULL,
'__module__' : 'joyride_pb2'
# @@protoc_insertion_point(class_scope:joyride.Null)
})
_sym_db.RegisterMessage(Null)
_JOYRIDE = DESCRIPTOR.services_by_name['JoyRide']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\030io.grpc.examples.joyrideB\014JoyrideProtoP\001\242\002\003HLW'
_RIDEREQUEST._serialized_start=26
_RIDEREQUEST._serialized_end=81
_RIDEREPLY._serialized_start=83
_RIDEREPLY._serialized_end=125
_RIDERATING._serialized_start=127
_RIDERATING._serialized_end=169
_NULL._serialized_start=171
_NULL._serialized_end=177
_JOYRIDE._serialized_start=179
_JOYRIDE._serialized_end=303
# @@protoc_insertion_point(module_scope) | 0.282196 | 0.091707 |
import sys
import xbmc
import xbmcgui
import xbmcaddon
import time
import kodi
import log_utils
import utils
from salts_lib import salts_utils
from salts_lib import image_proxy
from salts_lib import utils2
from salts_lib.utils2 import i18n
from salts_lib.constants import MODES
from salts_lib.db_utils import DB_Connection
from salts_lib.trakt_api import Trakt_API
logger = log_utils.Logger.get_logger()
logger.disable()
class Service(xbmc.Player):
def __init__(self, *args, **kwargs):
logger.log('Service: starting...', log_utils.LOGNOTICE)
self.db_connection = DB_Connection()
xbmc.Player.__init__(self, *args, **kwargs)
self.win = xbmcgui.Window(10000)
self.reset()
def reset(self):
logger.log('Service: Resetting...', log_utils.LOGDEBUG)
self.win.clearProperty('salts.playing')
self.win.clearProperty('salts.playing.trakt_id')
self.win.clearProperty('salts.playing.season')
self.win.clearProperty('salts.playing.episode')
self.win.clearProperty('salts.playing.srt')
self.win.clearProperty('salts.playing.trakt_resume')
self.win.clearProperty('salts.playing.salts_resume')
self.win.clearProperty('salts.playing.library')
self._from_library = False
self.tracked = False
self._totalTime = 999999
self.trakt_id = None
self.season = None
self.episode = None
self._lastPos = 0
def onPlayBackStarted(self):
logger.log('Service: Playback started', log_utils.LOGNOTICE)
playing = self.win.getProperty('salts.playing') == 'True'
self.trakt_id = self.win.getProperty('salts.playing.trakt_id')
self.season = self.win.getProperty('salts.playing.season')
self.episode = self.win.getProperty('salts.playing.episode')
srt_path = self.win.getProperty('salts.playing.srt')
trakt_resume = self.win.getProperty('salts.playing.trakt_resume')
salts_resume = self.win.getProperty('salts.playing.salts_resume')
self._from_library = self.win.getProperty('salts.playing.library') == 'True'
if playing: # Playback is ours
logger.log('Service: tracking progress...', log_utils.LOGNOTICE)
self.tracked = True
if srt_path:
logger.log('Service: Enabling subtitles: %s' % (srt_path), log_utils.LOGDEBUG)
self.setSubtitles(srt_path)
else:
self.showSubtitles(False)
self._totalTime = 0
while self._totalTime == 0:
try:
self._totalTime = self.getTotalTime()
except RuntimeError:
self._totalTime = 0
break
kodi.sleep(1000)
if salts_resume:
logger.log("SaltsRD Local Resume: Resume Time: %s Total Time: %s" % (salts_resume, self._totalTime), log_utils.LOGDEBUG)
self.seekTime(float(salts_resume))
elif trakt_resume:
resume_time = float(trakt_resume) * self._totalTime / 100
logger.log("SaltsRD Trakt Resume: Percent: %s, Resume Time: %s Total Time: %s" % (trakt_resume, resume_time, self._totalTime), log_utils.LOGDEBUG)
self.seekTime(resume_time)
def onPlayBackStopped(self):
logger.log('Service: Playback Stopped', log_utils.LOGNOTICE)
if self.tracked:
# clear the playlist if SaltsRD was playing and only one item in playlist to
# use playlist to determine playback method in get_sources
pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
plugin_url = 'plugin://%s/' % (kodi.get_id())
if pl.size() == 1 and pl[0].getfilename().lower().startswith(plugin_url):
logger.log('Service: Clearing Single Item SaltsRD Playlist', log_utils.LOGDEBUG)
pl.clear()
playedTime = float(self._lastPos)
try: percent_played = int((playedTime / self._totalTime) * 100)
except: percent_played = 0 # guard div by zero
pTime = utils.format_time(playedTime)
tTime = utils.format_time(self._totalTime)
logger.log('Service: Played %s of %s total = %s%%' % (pTime, tTime, percent_played), log_utils.LOGDEBUG)
if playedTime == 0 and self._totalTime == 999999:
logger.log('Kodi silently failed to start playback', log_utils.LOGWARNING)
elif playedTime >= 5:
if percent_played <= 98:
logger.log('Service: Setting bookmark on |%s|%s|%s| to %s seconds' % (self.trakt_id, self.season, self.episode, playedTime), log_utils.LOGDEBUG)
self.db_connection.set_bookmark(self.trakt_id, playedTime, self.season, self.episode)
if percent_played >= 75 and self._from_library:
if kodi.has_addon('script.trakt'):
run = 'RunScript(script.trakt, action=sync, silent=True)'
xbmc.executebuiltin(run)
self.reset()
def onPlayBackEnded(self):
logger.log('Service: Playback completed', log_utils.LOGNOTICE)
self.onPlayBackStopped()
def disable_global_cx(was_on):
if kodi.has_addon('plugin.program.mega.favourites'):
active_plugin = xbmc.getInfoLabel('Container.PluginName')
sf = xbmcaddon.Addon('plugin.program.super.favourites')
if active_plugin == kodi.get_id():
if sf.getSetting('CONTEXT') == 'true':
logger.log('Disabling Global CX while SaltsRD is active', log_utils.LOGDEBUG)
was_on = True
sf.setSetting('CONTEXT', 'false')
elif was_on:
logger.log('Re-enabling Global CX while SaltsRD is not active', log_utils.LOGDEBUG)
sf.setSetting('CONTEXT', 'true')
was_on = False
return was_on
def check_cooldown(cd_begin):
# black_list = ['plugin.video.livestreams']
# active_plugin = xbmc.getInfoLabel('Container.PluginName')
# if active_plugin in black_list:
# cd_begin = time.time()
active = 'false' if (time.time() - cd_begin) > 30 else 'true'
if kodi.get_setting('cool_down') != active:
kodi.set_setting('cool_down', active)
return cd_begin
def show_next_up(last_label, sf_begin):
token = kodi.get_setting('trakt_oauth_token')
if token and xbmc.getInfoLabel('Container.PluginName') == kodi.get_id() and xbmc.getInfoLabel('Container.Content') == 'tvshows':
if xbmc.getInfoLabel('ListItem.label') != last_label:
sf_begin = time.time()
last_label = xbmc.getInfoLabel('ListItem.label')
if sf_begin and (time.time() - sf_begin) >= int(kodi.get_setting('next_up_delay')):
liz_url = xbmc.getInfoLabel('ListItem.FileNameAndPath')
queries = kodi.parse_query(liz_url[liz_url.find('?'):])
if 'trakt_id' in queries:
try: list_size = int(kodi.get_setting('list_size'))
except: list_size = 30
try: trakt_timeout = int(kodi.get_setting('trakt_timeout'))
except: trakt_timeout = 20
trakt_api = Trakt_API(token, kodi.get_setting('use_https') == 'true', list_size, trakt_timeout, kodi.get_setting('trakt_offline') == 'true')
progress = trakt_api.get_show_progress(queries['trakt_id'], full=True)
if 'next_episode' in progress and progress['next_episode']:
if progress['completed'] or kodi.get_setting('next_unwatched') == 'true':
next_episode = progress['next_episode']
date = utils2.make_day(utils2.make_air_date(next_episode['first_aired']))
if kodi.get_setting('next_time') != '0':
date_time = '%s@%s' % (date, utils2.make_time(utils.iso_2_utc(next_episode['first_aired']), 'next_time'))
else:
date_time = date
msg = '[[COLOR deeppink]%s[/COLOR]] - %sx%s' % (date_time, next_episode['season'], next_episode['number'])
if next_episode['title']: msg += ' - %s' % (next_episode['title'])
duration = int(kodi.get_setting('next_up_duration')) * 1000
kodi.notify(header=i18n('next_episode'), msg=msg, duration=duration)
sf_begin = 0
else:
last_label = ''
return last_label, sf_begin
def main(argv=None): # @UnusedVariable
if sys.argv: argv = sys.argv # @UnusedVariable
MAX_ERRORS = 10
errors = 0
last_label = ''
sf_begin = 0
cd_begin = 0
was_on = False
logger.log('Service: Installed Version: %s' % (kodi.get_version()), log_utils.LOGNOTICE)
monitor = xbmc.Monitor()
proxy = image_proxy.ImageProxy()
service = Service()
salts_utils.do_startup_task(MODES.UPDATE_SUBS)
salts_utils.do_startup_task(MODES.PRUNE_CACHE)
while not monitor.abortRequested():
try:
is_playing = service.isPlaying()
salts_utils.do_scheduled_task(MODES.UPDATE_SUBS, is_playing)
salts_utils.do_scheduled_task(MODES.PRUNE_CACHE, is_playing)
if service.tracked and service.isPlayingVideo():
service._lastPos = service.getTime()
was_on = disable_global_cx(was_on)
cd_begin = check_cooldown(cd_begin)
if not proxy.running: proxy.start_proxy()
if kodi.get_setting('show_next_up') == 'true':
last_label, sf_begin = show_next_up(last_label, sf_begin)
except Exception as e:
errors += 1
if errors >= MAX_ERRORS:
logger.log('Service: Error (%s) received..(%s/%s)...Ending Service...' % (e, errors, MAX_ERRORS), log_utils.LOGERROR)
break
else:
logger.log('Service: Error (%s) received..(%s/%s)...Continuing Service...' % (e, errors, MAX_ERRORS), log_utils.LOGERROR)
else:
errors = 0
if monitor.waitForAbort(.5):
break
proxy.stop_proxy()
logger.log('Service: shutting down...', log_utils.LOGNOTICE)
if __name__ == '__main__':
sys.exit(main()) | plugin.video.saltsrd.lite/service.py | import sys
import xbmc
import xbmcgui
import xbmcaddon
import time
import kodi
import log_utils
import utils
from salts_lib import salts_utils
from salts_lib import image_proxy
from salts_lib import utils2
from salts_lib.utils2 import i18n
from salts_lib.constants import MODES
from salts_lib.db_utils import DB_Connection
from salts_lib.trakt_api import Trakt_API
logger = log_utils.Logger.get_logger()
logger.disable()
class Service(xbmc.Player):
def __init__(self, *args, **kwargs):
logger.log('Service: starting...', log_utils.LOGNOTICE)
self.db_connection = DB_Connection()
xbmc.Player.__init__(self, *args, **kwargs)
self.win = xbmcgui.Window(10000)
self.reset()
def reset(self):
logger.log('Service: Resetting...', log_utils.LOGDEBUG)
self.win.clearProperty('salts.playing')
self.win.clearProperty('salts.playing.trakt_id')
self.win.clearProperty('salts.playing.season')
self.win.clearProperty('salts.playing.episode')
self.win.clearProperty('salts.playing.srt')
self.win.clearProperty('salts.playing.trakt_resume')
self.win.clearProperty('salts.playing.salts_resume')
self.win.clearProperty('salts.playing.library')
self._from_library = False
self.tracked = False
self._totalTime = 999999
self.trakt_id = None
self.season = None
self.episode = None
self._lastPos = 0
def onPlayBackStarted(self):
logger.log('Service: Playback started', log_utils.LOGNOTICE)
playing = self.win.getProperty('salts.playing') == 'True'
self.trakt_id = self.win.getProperty('salts.playing.trakt_id')
self.season = self.win.getProperty('salts.playing.season')
self.episode = self.win.getProperty('salts.playing.episode')
srt_path = self.win.getProperty('salts.playing.srt')
trakt_resume = self.win.getProperty('salts.playing.trakt_resume')
salts_resume = self.win.getProperty('salts.playing.salts_resume')
self._from_library = self.win.getProperty('salts.playing.library') == 'True'
if playing: # Playback is ours
logger.log('Service: tracking progress...', log_utils.LOGNOTICE)
self.tracked = True
if srt_path:
logger.log('Service: Enabling subtitles: %s' % (srt_path), log_utils.LOGDEBUG)
self.setSubtitles(srt_path)
else:
self.showSubtitles(False)
self._totalTime = 0
while self._totalTime == 0:
try:
self._totalTime = self.getTotalTime()
except RuntimeError:
self._totalTime = 0
break
kodi.sleep(1000)
if salts_resume:
logger.log("SaltsRD Local Resume: Resume Time: %s Total Time: %s" % (salts_resume, self._totalTime), log_utils.LOGDEBUG)
self.seekTime(float(salts_resume))
elif trakt_resume:
resume_time = float(trakt_resume) * self._totalTime / 100
logger.log("SaltsRD Trakt Resume: Percent: %s, Resume Time: %s Total Time: %s" % (trakt_resume, resume_time, self._totalTime), log_utils.LOGDEBUG)
self.seekTime(resume_time)
def onPlayBackStopped(self):
logger.log('Service: Playback Stopped', log_utils.LOGNOTICE)
if self.tracked:
# clear the playlist if SaltsRD was playing and only one item in playlist to
# use playlist to determine playback method in get_sources
pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
plugin_url = 'plugin://%s/' % (kodi.get_id())
if pl.size() == 1 and pl[0].getfilename().lower().startswith(plugin_url):
logger.log('Service: Clearing Single Item SaltsRD Playlist', log_utils.LOGDEBUG)
pl.clear()
playedTime = float(self._lastPos)
try: percent_played = int((playedTime / self._totalTime) * 100)
except: percent_played = 0 # guard div by zero
pTime = utils.format_time(playedTime)
tTime = utils.format_time(self._totalTime)
logger.log('Service: Played %s of %s total = %s%%' % (pTime, tTime, percent_played), log_utils.LOGDEBUG)
if playedTime == 0 and self._totalTime == 999999:
logger.log('Kodi silently failed to start playback', log_utils.LOGWARNING)
elif playedTime >= 5:
if percent_played <= 98:
logger.log('Service: Setting bookmark on |%s|%s|%s| to %s seconds' % (self.trakt_id, self.season, self.episode, playedTime), log_utils.LOGDEBUG)
self.db_connection.set_bookmark(self.trakt_id, playedTime, self.season, self.episode)
if percent_played >= 75 and self._from_library:
if kodi.has_addon('script.trakt'):
run = 'RunScript(script.trakt, action=sync, silent=True)'
xbmc.executebuiltin(run)
self.reset()
def onPlayBackEnded(self):
logger.log('Service: Playback completed', log_utils.LOGNOTICE)
self.onPlayBackStopped()
def disable_global_cx(was_on):
if kodi.has_addon('plugin.program.mega.favourites'):
active_plugin = xbmc.getInfoLabel('Container.PluginName')
sf = xbmcaddon.Addon('plugin.program.super.favourites')
if active_plugin == kodi.get_id():
if sf.getSetting('CONTEXT') == 'true':
logger.log('Disabling Global CX while SaltsRD is active', log_utils.LOGDEBUG)
was_on = True
sf.setSetting('CONTEXT', 'false')
elif was_on:
logger.log('Re-enabling Global CX while SaltsRD is not active', log_utils.LOGDEBUG)
sf.setSetting('CONTEXT', 'true')
was_on = False
return was_on
def check_cooldown(cd_begin):
# black_list = ['plugin.video.livestreams']
# active_plugin = xbmc.getInfoLabel('Container.PluginName')
# if active_plugin in black_list:
# cd_begin = time.time()
active = 'false' if (time.time() - cd_begin) > 30 else 'true'
if kodi.get_setting('cool_down') != active:
kodi.set_setting('cool_down', active)
return cd_begin
def show_next_up(last_label, sf_begin):
token = kodi.get_setting('trakt_oauth_token')
if token and xbmc.getInfoLabel('Container.PluginName') == kodi.get_id() and xbmc.getInfoLabel('Container.Content') == 'tvshows':
if xbmc.getInfoLabel('ListItem.label') != last_label:
sf_begin = time.time()
last_label = xbmc.getInfoLabel('ListItem.label')
if sf_begin and (time.time() - sf_begin) >= int(kodi.get_setting('next_up_delay')):
liz_url = xbmc.getInfoLabel('ListItem.FileNameAndPath')
queries = kodi.parse_query(liz_url[liz_url.find('?'):])
if 'trakt_id' in queries:
try: list_size = int(kodi.get_setting('list_size'))
except: list_size = 30
try: trakt_timeout = int(kodi.get_setting('trakt_timeout'))
except: trakt_timeout = 20
trakt_api = Trakt_API(token, kodi.get_setting('use_https') == 'true', list_size, trakt_timeout, kodi.get_setting('trakt_offline') == 'true')
progress = trakt_api.get_show_progress(queries['trakt_id'], full=True)
if 'next_episode' in progress and progress['next_episode']:
if progress['completed'] or kodi.get_setting('next_unwatched') == 'true':
next_episode = progress['next_episode']
date = utils2.make_day(utils2.make_air_date(next_episode['first_aired']))
if kodi.get_setting('next_time') != '0':
date_time = '%s@%s' % (date, utils2.make_time(utils.iso_2_utc(next_episode['first_aired']), 'next_time'))
else:
date_time = date
msg = '[[COLOR deeppink]%s[/COLOR]] - %sx%s' % (date_time, next_episode['season'], next_episode['number'])
if next_episode['title']: msg += ' - %s' % (next_episode['title'])
duration = int(kodi.get_setting('next_up_duration')) * 1000
kodi.notify(header=i18n('next_episode'), msg=msg, duration=duration)
sf_begin = 0
else:
last_label = ''
return last_label, sf_begin
def main(argv=None): # @UnusedVariable
if sys.argv: argv = sys.argv # @UnusedVariable
MAX_ERRORS = 10
errors = 0
last_label = ''
sf_begin = 0
cd_begin = 0
was_on = False
logger.log('Service: Installed Version: %s' % (kodi.get_version()), log_utils.LOGNOTICE)
monitor = xbmc.Monitor()
proxy = image_proxy.ImageProxy()
service = Service()
salts_utils.do_startup_task(MODES.UPDATE_SUBS)
salts_utils.do_startup_task(MODES.PRUNE_CACHE)
while not monitor.abortRequested():
try:
is_playing = service.isPlaying()
salts_utils.do_scheduled_task(MODES.UPDATE_SUBS, is_playing)
salts_utils.do_scheduled_task(MODES.PRUNE_CACHE, is_playing)
if service.tracked and service.isPlayingVideo():
service._lastPos = service.getTime()
was_on = disable_global_cx(was_on)
cd_begin = check_cooldown(cd_begin)
if not proxy.running: proxy.start_proxy()
if kodi.get_setting('show_next_up') == 'true':
last_label, sf_begin = show_next_up(last_label, sf_begin)
except Exception as e:
errors += 1
if errors >= MAX_ERRORS:
logger.log('Service: Error (%s) received..(%s/%s)...Ending Service...' % (e, errors, MAX_ERRORS), log_utils.LOGERROR)
break
else:
logger.log('Service: Error (%s) received..(%s/%s)...Continuing Service...' % (e, errors, MAX_ERRORS), log_utils.LOGERROR)
else:
errors = 0
if monitor.waitForAbort(.5):
break
proxy.stop_proxy()
logger.log('Service: shutting down...', log_utils.LOGNOTICE)
if __name__ == '__main__':
sys.exit(main()) | 0.225843 | 0.065635 |
from pathlib import Path
from typing import Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
import tensorflow.python.util.deprecation as deprecation
import tensorflow_probability as tfp
from tqdm import tqdm
from ..datalib.metrics import MetricAnalyser
deprecation._PRINT_DEPRECATION_WARNINGS = False
tfd = tfp.distributions
tfk = tfp.math.psd_kernels
def convert_index_points(array: np.ndarray) -> tf.Tensor:
"""Convert an array into a tensor appropriate for GP index points.
Args:
array (:obj:`np.ndarray`): The array to extend.
Returns:
tensor (:obj:`tf.Tensor`): The converted Tensor.
"""
return tf.constant(array, dtype=tf.float64)
def get_default_kernel() -> tfp.math.psd_kernels.ExponentiatedQuadratic:
"""Get a default kernel."""
amplitude = tf.Variable(1.0, trainable=True, dtype=tf.float64, name="amplitude")
length_scale = tf.Variable(
1.0, trainable=True, dtype=tf.float64, name="length_scale"
)
return tfk.ExponentiatedQuadratic(
amplitude=amplitude,
length_scale=length_scale,
)
class GPTrainer(tf.Module):
"""Class for training hyperparameters for GP kernels.
Args:
observation_index_points (:obj:`tf.Tensor`): The observed index points (`x` values).
observations (:obj:`tf.Tensor`): The observed samples (`y` values).
checkpoint_dir (str or :obj:`Path`, optional): The directory to check
for checkpoints and to save checkpoints to.
kernel: The kernel to use. Must be instantiated with trainable
parameters. Defaults to a radial basis function.
Attributes:
observation_index_points (:obj:`tf.Tensor`): The observed index points (`x` values).
observations (:obj:`tf.Tensor`): The observed samples (`y` values).
checkpoint_dir (str or :obj:`Path`, optional): The directory to check for
checkpoints and to save checkpoints to.
trainable_vars: The trainable variables (parameters) of the kernel as a dictionary.
The keys are the variables' names, stripped of the colon.
kernel (:obj:`tf.Tensor`): The kernel to use for the Gaussian process.
optimizer (:obj:`Optimizer`): The optimizer to use for determining
:attr:`trainable_vars`.
training_steps (:obj:`tf.Tensor`): The current number of training epochs executed.
loss (:obj:`tf.Tensor`): The current loss on the training data
(A negative log likelihood).
metrics (dict): Contains metric names and values.
Default to `np.nan` when uncalculated.
ckpt (:obj:`Checkpoint`, optional): A tensorflow training checkpoint.
Defaults to `None` if `checkpoint_dir` is not passed.
ckpt_manager (:obj:`CheckpointManager`, optional): A checkpoint manager, used to save
:attr:`ckpt` to file.
Defaults to `None` if `checkpoint_dir` is not passed.
gp_prior (:obj:`GaussianProcess`): A Gaussian process using :attr:`kernel` and
using :attr:`observation_index_points` as indices.
"""
def __init__(
self,
observation_index_points: tf.Tensor,
observations: tf.Tensor,
checkpoint_dir: Optional[Union[str, Path]] = None,
kernel: Optional[tfp.math.psd_kernels.PositiveSemidefiniteKernel] = None,
):
"""Initialze attributes, kernel, optimizer and checkpoint manager."""
self.observation_index_points = tf.Variable(
observation_index_points,
dtype=tf.float64,
trainable=False,
name="observation_index_points",
)
self.observations = tf.Variable(
observations,
dtype=tf.float64,
trainable=False,
name="observations",
)
if kernel is None:
self.kernel: tfp.math.psd_kernels.PositiveSemidefiniteKernel = (
get_default_kernel()
)
else:
self.kernel = kernel
self.trainable_vars: Dict[str, tf.Variable] = {
var.name.strip(":"): var for var in self.kernel.trainable_variables
}
self.optimizer = tf.optimizers.Adam()
self.training_steps = tf.Variable(
0, dtype=tf.int32, trainable=False, name="training_steps"
)
self.loss = tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="training_nll",
)
self.metrics = {
"nll": tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="validation_nll",
),
"mae": tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="validation_mae",
),
"sharpness": tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="validation_sharpness",
),
"variation": tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="validation_coeff_variance",
),
"calibration_err": tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="validation_calibration_error",
),
}
if checkpoint_dir:
self.ckpt = tf.train.Checkpoint(
step=self.training_steps,
loss=self.loss,
val_nll=self.metrics["nll"],
val_mae=self.metrics["mae"],
val_sharpness=self.metrics["sharpness"],
val_coeff_var=self.metrics["variation"],
val_cal_err=self.metrics["calibration_err"],
**self.trainable_vars,
)
self.ckpt_manager = tf.train.CheckpointManager(
self.ckpt,
checkpoint_dir,
max_to_keep=1,
step_counter=self.training_steps,
)
self.ckpt.restore(self.ckpt_manager.latest_checkpoint)
if self.ckpt_manager.latest_checkpoint:
print(f"Restored from {self.ckpt_manager.latest_checkpoint}")
else:
print("No checkpoints found.")
else:
self.ckpt = None
self.ckpt_manager = None
self.gp_prior = tfd.GaussianProcess(self.kernel, self.observation_index_points)
@staticmethod
def load_model(model_dir: str):
"""Load a `GPTrainer` model from a file.
Args:
model_dir (str): The directory to import the model from.
Returns:
The model as a TensorFlow AutoTrackable object.
"""
return tf.saved_model.load(model_dir)
def get_model(
self, index_points: tf.Tensor
) -> tfp.python.distributions.GaussianProcessRegressionModel:
"""Get a regression model for a set of index points.
Args:
index_points (:obj:`tf.Tensor`): The index points to fit
regression model.
Returns:
gprm (:obj:`GaussianProcessRegressionModel`): The regression model.
"""
return tfd.GaussianProcessRegressionModel(
kernel=self.kernel,
index_points=index_points,
observation_index_points=self.observation_index_points,
observations=self.observations,
)
@tf.function(input_signature=[tf.TensorSpec(None, tf.float64)])
def predict(self, points: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Predict targets and the standard deviation of the distribution.
Args:
points (:obj:`tf.Tensor`): The points (`x` values) to make predictions with.
Returns:
mean (:obj:`tf.Tensor`): The mean of the distribution at each point.
stddev (:obj:`tf.Tensor`): The standard deviation of the distribution
at each point.
"""
gprm = self.get_model(points)
return gprm.mean(), gprm.stddev()
def train_model(
self,
val_points: tf.Tensor,
val_obs: tf.Tensor,
epochs: int = 1000,
patience: Optional[int] = None,
save_dir: Optional[Union[str, Path]] = None,
metrics: List[str] = [],
) -> Iterator[Dict[str, float]]:
"""Optimize model parameters.
Args:
val_points (:obj:`tf.Tensor`): The validation points.
val_obs (:obj:`tf.Tensor`): The validation targets.
epochs (int): The number of training epochs.
patience (int, optional): The number of epochs after which to
stop training if no improvement is seen on the loss of the
validation data.
save_dir (str or :obj:`Path`, optional): Where to save the model.
metrics (list of str): A list of valid metrics to calculate.
Possible valid metrics are given in :class:`GPMetrics`.
Yields:
metrics (dict of str: float): A dictionary of the metrics after the
last training epoch.
"""
best_val_nll: float = self.metrics["nll"].numpy()
if np.isnan(best_val_nll):
# Set to infinity so < logic works
best_val_nll = np.inf
if (self.ckpt_manager or patience) and "nll" not in metrics:
# We need to track NLL for these to work
metrics.append("nll")
steps_since_improvement: int = 1
gp_metrics = MetricAnalyser(val_points, val_obs, self.get_model(val_points))
for i in tqdm(range(epochs), "Training epochs"):
self.loss.assign(self.optimize_cycle())
self.training_steps.assign_add(1)
# * Determine and assign metrics
if gp_metrics.REQUIRES_MEAN.intersection(metrics):
gp_metrics.update_mean()
if gp_metrics.REQUIRES_STDDEV.intersection(metrics):
gp_metrics.update_stddevs()
try:
metric_dict: Dict[str, float] = {
metric: getattr(gp_metrics, metric) for metric in metrics
}
except AttributeError as e:
raise ValueError(f"Invalid metric: {e}")
for metric, value in metric_dict.items():
self.metrics[metric].assign(value)
metric_dict["loss"] = self.loss.numpy()
yield metric_dict
if patience or self.ckpt_manager:
if self.metrics["nll"] < best_val_nll:
best_val_nll = self.metrics["nll"].numpy()
steps_since_improvement = 1
if self.ckpt_manager:
self.ckpt_manager.save(self.training_steps)
else:
steps_since_improvement += 1
if patience and steps_since_improvement >= patience:
print(
"Patience exceeded: "
f"{steps_since_improvement} steps since NLL improvement."
)
break
if save_dir:
tf.saved_model.save(self, save_dir)
@tf.function
def optimize_cycle(self) -> tf.Tensor:
"""Perform one training step.
Returns:
loss (:obj:`Tensor`): A Tensor containing the negative log probability loss.
"""
with tf.GradientTape() as tape:
loss = -self.gp_prior.log_prob(self.observations)
grads = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
return loss | unlockgnn/gp/gp_trainer.py | from pathlib import Path
from typing import Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
import tensorflow.python.util.deprecation as deprecation
import tensorflow_probability as tfp
from tqdm import tqdm
from ..datalib.metrics import MetricAnalyser
deprecation._PRINT_DEPRECATION_WARNINGS = False
tfd = tfp.distributions
tfk = tfp.math.psd_kernels
def convert_index_points(array: np.ndarray) -> tf.Tensor:
"""Convert an array into a tensor appropriate for GP index points.
Args:
array (:obj:`np.ndarray`): The array to extend.
Returns:
tensor (:obj:`tf.Tensor`): The converted Tensor.
"""
return tf.constant(array, dtype=tf.float64)
def get_default_kernel() -> tfp.math.psd_kernels.ExponentiatedQuadratic:
"""Get a default kernel."""
amplitude = tf.Variable(1.0, trainable=True, dtype=tf.float64, name="amplitude")
length_scale = tf.Variable(
1.0, trainable=True, dtype=tf.float64, name="length_scale"
)
return tfk.ExponentiatedQuadratic(
amplitude=amplitude,
length_scale=length_scale,
)
class GPTrainer(tf.Module):
"""Class for training hyperparameters for GP kernels.
Args:
observation_index_points (:obj:`tf.Tensor`): The observed index points (`x` values).
observations (:obj:`tf.Tensor`): The observed samples (`y` values).
checkpoint_dir (str or :obj:`Path`, optional): The directory to check
for checkpoints and to save checkpoints to.
kernel: The kernel to use. Must be instantiated with trainable
parameters. Defaults to a radial basis function.
Attributes:
observation_index_points (:obj:`tf.Tensor`): The observed index points (`x` values).
observations (:obj:`tf.Tensor`): The observed samples (`y` values).
checkpoint_dir (str or :obj:`Path`, optional): The directory to check for
checkpoints and to save checkpoints to.
trainable_vars: The trainable variables (parameters) of the kernel as a dictionary.
The keys are the variables' names, stripped of the colon.
kernel (:obj:`tf.Tensor`): The kernel to use for the Gaussian process.
optimizer (:obj:`Optimizer`): The optimizer to use for determining
:attr:`trainable_vars`.
training_steps (:obj:`tf.Tensor`): The current number of training epochs executed.
loss (:obj:`tf.Tensor`): The current loss on the training data
(A negative log likelihood).
metrics (dict): Contains metric names and values.
Default to `np.nan` when uncalculated.
ckpt (:obj:`Checkpoint`, optional): A tensorflow training checkpoint.
Defaults to `None` if `checkpoint_dir` is not passed.
ckpt_manager (:obj:`CheckpointManager`, optional): A checkpoint manager, used to save
:attr:`ckpt` to file.
Defaults to `None` if `checkpoint_dir` is not passed.
gp_prior (:obj:`GaussianProcess`): A Gaussian process using :attr:`kernel` and
using :attr:`observation_index_points` as indices.
"""
def __init__(
self,
observation_index_points: tf.Tensor,
observations: tf.Tensor,
checkpoint_dir: Optional[Union[str, Path]] = None,
kernel: Optional[tfp.math.psd_kernels.PositiveSemidefiniteKernel] = None,
):
"""Initialze attributes, kernel, optimizer and checkpoint manager."""
self.observation_index_points = tf.Variable(
observation_index_points,
dtype=tf.float64,
trainable=False,
name="observation_index_points",
)
self.observations = tf.Variable(
observations,
dtype=tf.float64,
trainable=False,
name="observations",
)
if kernel is None:
self.kernel: tfp.math.psd_kernels.PositiveSemidefiniteKernel = (
get_default_kernel()
)
else:
self.kernel = kernel
self.trainable_vars: Dict[str, tf.Variable] = {
var.name.strip(":"): var for var in self.kernel.trainable_variables
}
self.optimizer = tf.optimizers.Adam()
self.training_steps = tf.Variable(
0, dtype=tf.int32, trainable=False, name="training_steps"
)
self.loss = tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="training_nll",
)
self.metrics = {
"nll": tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="validation_nll",
),
"mae": tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="validation_mae",
),
"sharpness": tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="validation_sharpness",
),
"variation": tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="validation_coeff_variance",
),
"calibration_err": tf.Variable(
np.nan,
dtype=tf.float64,
trainable=False,
name="validation_calibration_error",
),
}
if checkpoint_dir:
self.ckpt = tf.train.Checkpoint(
step=self.training_steps,
loss=self.loss,
val_nll=self.metrics["nll"],
val_mae=self.metrics["mae"],
val_sharpness=self.metrics["sharpness"],
val_coeff_var=self.metrics["variation"],
val_cal_err=self.metrics["calibration_err"],
**self.trainable_vars,
)
self.ckpt_manager = tf.train.CheckpointManager(
self.ckpt,
checkpoint_dir,
max_to_keep=1,
step_counter=self.training_steps,
)
self.ckpt.restore(self.ckpt_manager.latest_checkpoint)
if self.ckpt_manager.latest_checkpoint:
print(f"Restored from {self.ckpt_manager.latest_checkpoint}")
else:
print("No checkpoints found.")
else:
self.ckpt = None
self.ckpt_manager = None
self.gp_prior = tfd.GaussianProcess(self.kernel, self.observation_index_points)
@staticmethod
def load_model(model_dir: str):
"""Load a `GPTrainer` model from a file.
Args:
model_dir (str): The directory to import the model from.
Returns:
The model as a TensorFlow AutoTrackable object.
"""
return tf.saved_model.load(model_dir)
def get_model(
self, index_points: tf.Tensor
) -> tfp.python.distributions.GaussianProcessRegressionModel:
"""Get a regression model for a set of index points.
Args:
index_points (:obj:`tf.Tensor`): The index points to fit
regression model.
Returns:
gprm (:obj:`GaussianProcessRegressionModel`): The regression model.
"""
return tfd.GaussianProcessRegressionModel(
kernel=self.kernel,
index_points=index_points,
observation_index_points=self.observation_index_points,
observations=self.observations,
)
@tf.function(input_signature=[tf.TensorSpec(None, tf.float64)])
def predict(self, points: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Predict targets and the standard deviation of the distribution.
Args:
points (:obj:`tf.Tensor`): The points (`x` values) to make predictions with.
Returns:
mean (:obj:`tf.Tensor`): The mean of the distribution at each point.
stddev (:obj:`tf.Tensor`): The standard deviation of the distribution
at each point.
"""
gprm = self.get_model(points)
return gprm.mean(), gprm.stddev()
def train_model(
self,
val_points: tf.Tensor,
val_obs: tf.Tensor,
epochs: int = 1000,
patience: Optional[int] = None,
save_dir: Optional[Union[str, Path]] = None,
metrics: List[str] = [],
) -> Iterator[Dict[str, float]]:
"""Optimize model parameters.
Args:
val_points (:obj:`tf.Tensor`): The validation points.
val_obs (:obj:`tf.Tensor`): The validation targets.
epochs (int): The number of training epochs.
patience (int, optional): The number of epochs after which to
stop training if no improvement is seen on the loss of the
validation data.
save_dir (str or :obj:`Path`, optional): Where to save the model.
metrics (list of str): A list of valid metrics to calculate.
Possible valid metrics are given in :class:`GPMetrics`.
Yields:
metrics (dict of str: float): A dictionary of the metrics after the
last training epoch.
"""
best_val_nll: float = self.metrics["nll"].numpy()
if np.isnan(best_val_nll):
# Set to infinity so < logic works
best_val_nll = np.inf
if (self.ckpt_manager or patience) and "nll" not in metrics:
# We need to track NLL for these to work
metrics.append("nll")
steps_since_improvement: int = 1
gp_metrics = MetricAnalyser(val_points, val_obs, self.get_model(val_points))
for i in tqdm(range(epochs), "Training epochs"):
self.loss.assign(self.optimize_cycle())
self.training_steps.assign_add(1)
# * Determine and assign metrics
if gp_metrics.REQUIRES_MEAN.intersection(metrics):
gp_metrics.update_mean()
if gp_metrics.REQUIRES_STDDEV.intersection(metrics):
gp_metrics.update_stddevs()
try:
metric_dict: Dict[str, float] = {
metric: getattr(gp_metrics, metric) for metric in metrics
}
except AttributeError as e:
raise ValueError(f"Invalid metric: {e}")
for metric, value in metric_dict.items():
self.metrics[metric].assign(value)
metric_dict["loss"] = self.loss.numpy()
yield metric_dict
if patience or self.ckpt_manager:
if self.metrics["nll"] < best_val_nll:
best_val_nll = self.metrics["nll"].numpy()
steps_since_improvement = 1
if self.ckpt_manager:
self.ckpt_manager.save(self.training_steps)
else:
steps_since_improvement += 1
if patience and steps_since_improvement >= patience:
print(
"Patience exceeded: "
f"{steps_since_improvement} steps since NLL improvement."
)
break
if save_dir:
tf.saved_model.save(self, save_dir)
@tf.function
def optimize_cycle(self) -> tf.Tensor:
"""Perform one training step.
Returns:
loss (:obj:`Tensor`): A Tensor containing the negative log probability loss.
"""
with tf.GradientTape() as tape:
loss = -self.gp_prior.log_prob(self.observations)
grads = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
return loss | 0.971766 | 0.616503 |
import pytest
from click.testing import CliRunner
import os
import json
from .utils import git_repo
from keras.layers import Dense, Flatten, Reshape, Input, LSTM, Embedding, Input, Concatenate
from keras.models import Sequential, Model
from keras import backend as K
import wandb
from wandb import wandb_run
from wandb.keras import WandbCallback
# Tests which rely on row history in memory should set `History.keep_rows = True`
from wandb.history import History
History.keep_rows = True
import sys
import glob
@pytest.fixture
def dummy_model(request):
K.clear_session()
multi = request.node.get_closest_marker('multiclass')
image_output = request.node.get_closest_marker('image_output')
if multi:
nodes = 10
loss = 'categorical_crossentropy'
else:
nodes = 1
loss = 'binary_crossentropy'
nodes = 1 if not multi else 10
if image_output:
nodes = 300
model = Sequential()
model.add(Flatten(input_shape=(10, 10, 3)))
model.add(Dense(nodes, activation='sigmoid'))
if image_output:
model.add(Dense(nodes, activation="relu"))
model.add(Reshape((10, 10, 3)))
model.compile(optimizer='adam',
loss=loss,
metrics=['accuracy'])
return model
@pytest.fixture
def rnn_model(request):
K.clear_session()
model = Sequential()
model.add(LSTM(10, input_shape=(4, 1)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mae', metrics=['accuracy'])
return model
@pytest.fixture
def dummy_data(request):
multi = request.node.get_closest_marker('multiclass')
image_output = request.node.get_closest_marker('image_output')
cats = 10 if multi else 1
import numpy as np
data = np.random.randint(255, size=(100, 10, 10, 3))
labels = np.random.randint(2, size=(100, cats))
if image_output:
labels = data
return (data, labels)
def test_basic_keras(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36,
callbacks=[WandbCallback()])
wandb.run.summary.load()
assert wandb.run.history.rows[0]["epoch"] == 0
key = "accuracy" if wandb.run.summary.get("accuracy") else "acc"
assert wandb.run.summary[key] > 0
assert len(wandb.run.summary["graph"].nodes) == 3
def test_keras_timeseries(rnn_model, wandb_init_run):
import numpy as np
data = np.random.random(size=(100, 4, 1))
labels = np.random.random(size=(100, 1))
rnn_model.fit(data, labels, epochs=2, batch_size=36, validation_data=(data, labels),
callbacks=[WandbCallback()])
assert wandb.run.history.rows[-1].get("examples") == None
def test_basic_keras_multi_fit(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36,
callbacks=[WandbCallback()])
dummy_model.fit(*dummy_data, epochs=2, batch_size=36,
callbacks=[WandbCallback()])
wandb.run.summary.load()
assert wandb.run.history.rows[0]["epoch"] == 0
assert wandb.run.history.rows[-1]["epoch"] == 1
assert wandb.run.history.rows[-1]["_step"] == 3
key = "accuracy" if wandb.run.summary.get("accuracy") else "acc"
assert wandb.run.summary[key] > 0
assert len(wandb.run.summary["graph"].nodes) == 3
def test_keras_log_batch(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=10,
callbacks=[WandbCallback(log_batch_frequency=2)])
wandb.run.summary.load()
print("History", wandb.run.history.rows)
assert len(wandb.run.history.rows) == 12
def test_keras_image_bad_data(dummy_model, dummy_data, wandb_init_run):
error = False
data, labels = dummy_data
try:
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=(data.reshape(10), labels),
callbacks=[WandbCallback(data_type="image")])
except ValueError:
error = True
assert error
def test_keras_image_binary(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image")])
assert len(wandb.run.history.rows[0]["examples"]['captions']) == 36
def test_keras_image_binary_captions(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", predictions=10, labels=["Rad", "Nice"])])
assert wandb.run.history.rows[0]["examples"]['captions'][0] in [
"Rad", "Nice"]
@pytest.mark.multiclass
def test_keras_image_multiclass(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", predictions=10)])
assert len(wandb.run.history.rows[0]["examples"]['captions']) == 10
@pytest.mark.multiclass
def test_keras_image_multiclass_captions(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", predictions=10, labels=["Rad", "Nice", "Fun", "Rad", "Nice", "Fun", "Rad", "Nice", "Fun", "Rad"])])
assert wandb.run.history.rows[0]["examples"]['captions'][0] in [
"Rad", "Nice", "Fun"]
@pytest.mark.image_output
def test_keras_image_output(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", predictions=10)])
assert wandb.run.history.rows[0]["examples"]['count'] == 30
assert wandb.run.history.rows[0]["examples"]['grouping'] == 3
def test_keras_log_weights(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", log_weights=True)])
assert wandb.run.history.rows[0]['parameters/dense_1.weights']['_type'] == "histogram"
def test_keras_save_model(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", save_model=True)])
assert len(glob.glob(wandb.run.dir + "/model-best.h5")) == 1
def test_keras_convert_sequential():
# necessary to keep the names of the layers consistent
K.clear_session()
model = Sequential()
model.add(Dense(4, input_shape=(3,)))
model.add(Dense(5))
model.add(Dense(6))
wandb_model = wandb.data_types.Graph.from_keras(model)
wandb_model_out = wandb_model._to_graph_json()
print(wandb_model_out)
assert wandb_model_out == {'format': 'keras',
'nodes': [
{'name': 'dense_1_input', 'id': 'dense_1_input', 'class_name': 'InputLayer',
'output_shape': (None, 3), 'num_parameters': 0},
{'name': 'dense_1', 'id': 'dense_1', 'class_name': 'Dense',
'output_shape': (None, 4), 'num_parameters': 16},
{'name': 'dense_2', 'id': 'dense_2', 'class_name': 'Dense',
'output_shape': (None, 5), 'num_parameters': 25},
{'name': 'dense_3', 'id': 'dense_3', 'class_name': 'Dense', 'output_shape': (None, 6), 'num_parameters': 36}],
'edges': [['dense_1_input', 'dense_1'], ['dense_1', 'dense_2'], ['dense_2', 'dense_3']]}
def test_keras_convert_model_non_sequential():
# necessary to keep the names of the layers consistent
K.clear_session()
# example from the Keras docs
main_input = Input(shape=(100,), dtype='int32', name='main_input')
x = Embedding(output_dim=512, input_dim=10000,
input_length=100)(main_input)
lstm_out = LSTM(32)(x)
auxiliary_output = Dense(1, activation='sigmoid',
name='aux_output')(lstm_out)
auxiliary_input = Input(shape=(5,), name='aux_input')
x = Concatenate()([lstm_out, auxiliary_input])
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
main_output = Dense(1, activation='sigmoid', name='main_output')(x)
model = Model(inputs=[main_input, auxiliary_input],
outputs=[main_output, auxiliary_output])
wandb_model = wandb.data_types.Graph.from_keras(model)
wandb_model_out = wandb_model._to_graph_json()
assert wandb_model_out['nodes'][0] == {'name': 'main_input', 'id': 'main_input',
'class_name': 'InputLayer', 'output_shape': (None, 100), 'num_parameters': 0}
assert wandb_model_out['edges'] == [
['main_input', 'embedding_1'], ['embedding_1',
'lstm_1'], ['lstm_1', 'concatenate_1'],
['aux_input', 'concatenate_1'], [
'concatenate_1', 'dense_1'], ['dense_1', 'dense_2'],
['dense_2', 'dense_3'], ['dense_3', 'main_output'], ['lstm_1', 'aux_output']] | tests/test_keras.py | import pytest
from click.testing import CliRunner
import os
import json
from .utils import git_repo
from keras.layers import Dense, Flatten, Reshape, Input, LSTM, Embedding, Input, Concatenate
from keras.models import Sequential, Model
from keras import backend as K
import wandb
from wandb import wandb_run
from wandb.keras import WandbCallback
# Tests which rely on row history in memory should set `History.keep_rows = True`
from wandb.history import History
History.keep_rows = True
import sys
import glob
@pytest.fixture
def dummy_model(request):
K.clear_session()
multi = request.node.get_closest_marker('multiclass')
image_output = request.node.get_closest_marker('image_output')
if multi:
nodes = 10
loss = 'categorical_crossentropy'
else:
nodes = 1
loss = 'binary_crossentropy'
nodes = 1 if not multi else 10
if image_output:
nodes = 300
model = Sequential()
model.add(Flatten(input_shape=(10, 10, 3)))
model.add(Dense(nodes, activation='sigmoid'))
if image_output:
model.add(Dense(nodes, activation="relu"))
model.add(Reshape((10, 10, 3)))
model.compile(optimizer='adam',
loss=loss,
metrics=['accuracy'])
return model
@pytest.fixture
def rnn_model(request):
K.clear_session()
model = Sequential()
model.add(LSTM(10, input_shape=(4, 1)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mae', metrics=['accuracy'])
return model
@pytest.fixture
def dummy_data(request):
multi = request.node.get_closest_marker('multiclass')
image_output = request.node.get_closest_marker('image_output')
cats = 10 if multi else 1
import numpy as np
data = np.random.randint(255, size=(100, 10, 10, 3))
labels = np.random.randint(2, size=(100, cats))
if image_output:
labels = data
return (data, labels)
def test_basic_keras(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36,
callbacks=[WandbCallback()])
wandb.run.summary.load()
assert wandb.run.history.rows[0]["epoch"] == 0
key = "accuracy" if wandb.run.summary.get("accuracy") else "acc"
assert wandb.run.summary[key] > 0
assert len(wandb.run.summary["graph"].nodes) == 3
def test_keras_timeseries(rnn_model, wandb_init_run):
import numpy as np
data = np.random.random(size=(100, 4, 1))
labels = np.random.random(size=(100, 1))
rnn_model.fit(data, labels, epochs=2, batch_size=36, validation_data=(data, labels),
callbacks=[WandbCallback()])
assert wandb.run.history.rows[-1].get("examples") == None
def test_basic_keras_multi_fit(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36,
callbacks=[WandbCallback()])
dummy_model.fit(*dummy_data, epochs=2, batch_size=36,
callbacks=[WandbCallback()])
wandb.run.summary.load()
assert wandb.run.history.rows[0]["epoch"] == 0
assert wandb.run.history.rows[-1]["epoch"] == 1
assert wandb.run.history.rows[-1]["_step"] == 3
key = "accuracy" if wandb.run.summary.get("accuracy") else "acc"
assert wandb.run.summary[key] > 0
assert len(wandb.run.summary["graph"].nodes) == 3
def test_keras_log_batch(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=10,
callbacks=[WandbCallback(log_batch_frequency=2)])
wandb.run.summary.load()
print("History", wandb.run.history.rows)
assert len(wandb.run.history.rows) == 12
def test_keras_image_bad_data(dummy_model, dummy_data, wandb_init_run):
error = False
data, labels = dummy_data
try:
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=(data.reshape(10), labels),
callbacks=[WandbCallback(data_type="image")])
except ValueError:
error = True
assert error
def test_keras_image_binary(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image")])
assert len(wandb.run.history.rows[0]["examples"]['captions']) == 36
def test_keras_image_binary_captions(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", predictions=10, labels=["Rad", "Nice"])])
assert wandb.run.history.rows[0]["examples"]['captions'][0] in [
"Rad", "Nice"]
@pytest.mark.multiclass
def test_keras_image_multiclass(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", predictions=10)])
assert len(wandb.run.history.rows[0]["examples"]['captions']) == 10
@pytest.mark.multiclass
def test_keras_image_multiclass_captions(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", predictions=10, labels=["Rad", "Nice", "Fun", "Rad", "Nice", "Fun", "Rad", "Nice", "Fun", "Rad"])])
assert wandb.run.history.rows[0]["examples"]['captions'][0] in [
"Rad", "Nice", "Fun"]
@pytest.mark.image_output
def test_keras_image_output(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", predictions=10)])
assert wandb.run.history.rows[0]["examples"]['count'] == 30
assert wandb.run.history.rows[0]["examples"]['grouping'] == 3
def test_keras_log_weights(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", log_weights=True)])
assert wandb.run.history.rows[0]['parameters/dense_1.weights']['_type'] == "histogram"
def test_keras_save_model(dummy_model, dummy_data, wandb_init_run):
dummy_model.fit(*dummy_data, epochs=2, batch_size=36, validation_data=dummy_data,
callbacks=[WandbCallback(data_type="image", save_model=True)])
assert len(glob.glob(wandb.run.dir + "/model-best.h5")) == 1
def test_keras_convert_sequential():
# necessary to keep the names of the layers consistent
K.clear_session()
model = Sequential()
model.add(Dense(4, input_shape=(3,)))
model.add(Dense(5))
model.add(Dense(6))
wandb_model = wandb.data_types.Graph.from_keras(model)
wandb_model_out = wandb_model._to_graph_json()
print(wandb_model_out)
assert wandb_model_out == {'format': 'keras',
'nodes': [
{'name': 'dense_1_input', 'id': 'dense_1_input', 'class_name': 'InputLayer',
'output_shape': (None, 3), 'num_parameters': 0},
{'name': 'dense_1', 'id': 'dense_1', 'class_name': 'Dense',
'output_shape': (None, 4), 'num_parameters': 16},
{'name': 'dense_2', 'id': 'dense_2', 'class_name': 'Dense',
'output_shape': (None, 5), 'num_parameters': 25},
{'name': 'dense_3', 'id': 'dense_3', 'class_name': 'Dense', 'output_shape': (None, 6), 'num_parameters': 36}],
'edges': [['dense_1_input', 'dense_1'], ['dense_1', 'dense_2'], ['dense_2', 'dense_3']]}
def test_keras_convert_model_non_sequential():
# necessary to keep the names of the layers consistent
K.clear_session()
# example from the Keras docs
main_input = Input(shape=(100,), dtype='int32', name='main_input')
x = Embedding(output_dim=512, input_dim=10000,
input_length=100)(main_input)
lstm_out = LSTM(32)(x)
auxiliary_output = Dense(1, activation='sigmoid',
name='aux_output')(lstm_out)
auxiliary_input = Input(shape=(5,), name='aux_input')
x = Concatenate()([lstm_out, auxiliary_input])
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
main_output = Dense(1, activation='sigmoid', name='main_output')(x)
model = Model(inputs=[main_input, auxiliary_input],
outputs=[main_output, auxiliary_output])
wandb_model = wandb.data_types.Graph.from_keras(model)
wandb_model_out = wandb_model._to_graph_json()
assert wandb_model_out['nodes'][0] == {'name': 'main_input', 'id': 'main_input',
'class_name': 'InputLayer', 'output_shape': (None, 100), 'num_parameters': 0}
assert wandb_model_out['edges'] == [
['main_input', 'embedding_1'], ['embedding_1',
'lstm_1'], ['lstm_1', 'concatenate_1'],
['aux_input', 'concatenate_1'], [
'concatenate_1', 'dense_1'], ['dense_1', 'dense_2'],
['dense_2', 'dense_3'], ['dense_3', 'main_output'], ['lstm_1', 'aux_output']] | 0.691289 | 0.380068 |
from typing import Union
import numpy as np
from numpy import ndarray
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .base import TK2Conv2D, TK2Linear
from ..tn_module import LambdaLayer
class TK2Block(nn.Module):
def __init__(self, c_in: int, c_out: int, r: int, stride: int = 1, option='A'):
"""Tucker-2 Block.
Parameters
----------
c_in : int
The input channel size.
c_out : int
The output channel size.
r : int
The rank of this block.
stride : int
The conv stride
option : str
Set "A" or "B" to choose the shortcut type
"""
super(TK2Block, self).__init__()
in_planes = c_in
planes = c_out
self.conv1 = TK2Conv2D(c_in, c_out, [r, r], 3, padding=1,
stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = TK2Conv2D(c_out, c_out, [r, r], 3,
stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, fc00:db20:35b:7399::5, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
TK2Conv2D(c_in, c_out, [r, r],
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes)
)
def forward(self, inputs: Tensor) -> Tensor:
"""Forwarding method.
Parameters
----------
inputs : torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times C \\times H \\times W}`
Returns
-------
torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times C' \\times H' \\times W'}`
"""
out = F.relu(self.bn1(self.conv1(inputs)))
out = self.bn2(self.conv2(out))
out += self.shortcut(inputs)
out = F.relu(out)
return out
class TK2ResNet(nn.Module):
def __init__(self, block, rs: Union[list, np.ndarray], num_blocks: list, num_classes:int):
"""ResNet based on Tensor Ring.
Parameters
----------
block :
The block class of ResNet
rs : Union[list, numpy.ndarray]
rs :math:`\in \mathbb{R}^{6}`. The ranks of network
num_blocks : list
The number of each layer
num_classes : int
The number of classes
"""
super(TK2ResNet, self).__init__()
assert len(rs) == 7, "The length of ranks should be 7."
self.conv1 = TK2Conv2D(3, 16, [rs[0], rs[0]], kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, 16, rs[1], num_blocks[0])
self.down_sample2 = block(16, 32, rs[2], stride=2)
self.layer2 = self._make_layer(block, 32, 32, rs[3], num_blocks[1]-1)
self.down_sample3 = block(32, 64, rs[4], stride=2)
self.layer3 = self._make_layer(block, 64, 64, rs[5], num_blocks[2]-1)
self.linear = TK2Linear([4, 4, 4], num_classes, [rs[6], rs[6]], bias=True)
def _make_layer(self, block, c_in: int, c_out: int, r: int, blocks: int) -> nn.Sequential:
"""Make each block layer.
Parameters
----------
block :
The block class of ResNet
c_in : int
The input channel size
c_out : int
The output channel size
r : int
The rank of this block
blocks : int
The number of block
Returns
-------
torch.nn.Sequential
The block network
"""
strides = [1]*blocks
layers = []
for stride in strides:
layers.append(block(c_in, c_out, r, stride=stride))
return nn.Sequential(*layers)
def forward(self, inputs: Tensor) -> Tensor:
"""Forwarding method.
Parameters
----------
inputs : torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times C \\times H \\times W}`
Returns
-------
torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times num\_classes}`
"""
out = F.relu(self.bn1(self.conv1(inputs)))
out = self.layer1(out)
out = self.down_sample2(out)
out = self.layer2(out)
out = self.down_sample3(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class TK2ResNet20(TK2ResNet):
def __init__(self, rs: Union[list, np.ndarray], num_classes: int):
"""ResNet-20 based on Tucker-2.
Parameters
----------
rs : Union[list, numpy.ndarray]
rs :math:`\in \mathbb{R}^{7}`. The ranks of network
num_classes : int
The number of classes
"""
super(TK2ResNet20, self).__init__(block=TK2Block, rs=rs, num_blocks=[3, 3, 3], num_classes=num_classes)
class TK2ResNet32(TK2ResNet):
def __init__(self, rs: Union[list, np.ndarray], num_classes: int):
"""ResNet-32 based on Tucker-2.
Parameters
----------
rs : Union[list, numpy.ndarray]
rs :math:`\in \mathbb{R}^{7}`. The ranks of network
num_classes : int
The number of classes
"""
super(TK2ResNet32, self).__init__(block=TK2Block, rs=rs, num_blocks=[5, 5, 5], num_classes=num_classes) | tednet/tnn/tucker2/tk2_resnet.py | from typing import Union
import numpy as np
from numpy import ndarray
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .base import TK2Conv2D, TK2Linear
from ..tn_module import LambdaLayer
class TK2Block(nn.Module):
def __init__(self, c_in: int, c_out: int, r: int, stride: int = 1, option='A'):
"""Tucker-2 Block.
Parameters
----------
c_in : int
The input channel size.
c_out : int
The output channel size.
r : int
The rank of this block.
stride : int
The conv stride
option : str
Set "A" or "B" to choose the shortcut type
"""
super(TK2Block, self).__init__()
in_planes = c_in
planes = c_out
self.conv1 = TK2Conv2D(c_in, c_out, [r, r], 3, padding=1,
stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = TK2Conv2D(c_out, c_out, [r, r], 3,
stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, fc00:db20:35b:7399::5, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
TK2Conv2D(c_in, c_out, [r, r],
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes)
)
def forward(self, inputs: Tensor) -> Tensor:
"""Forwarding method.
Parameters
----------
inputs : torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times C \\times H \\times W}`
Returns
-------
torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times C' \\times H' \\times W'}`
"""
out = F.relu(self.bn1(self.conv1(inputs)))
out = self.bn2(self.conv2(out))
out += self.shortcut(inputs)
out = F.relu(out)
return out
class TK2ResNet(nn.Module):
def __init__(self, block, rs: Union[list, np.ndarray], num_blocks: list, num_classes:int):
"""ResNet based on Tensor Ring.
Parameters
----------
block :
The block class of ResNet
rs : Union[list, numpy.ndarray]
rs :math:`\in \mathbb{R}^{6}`. The ranks of network
num_blocks : list
The number of each layer
num_classes : int
The number of classes
"""
super(TK2ResNet, self).__init__()
assert len(rs) == 7, "The length of ranks should be 7."
self.conv1 = TK2Conv2D(3, 16, [rs[0], rs[0]], kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, 16, rs[1], num_blocks[0])
self.down_sample2 = block(16, 32, rs[2], stride=2)
self.layer2 = self._make_layer(block, 32, 32, rs[3], num_blocks[1]-1)
self.down_sample3 = block(32, 64, rs[4], stride=2)
self.layer3 = self._make_layer(block, 64, 64, rs[5], num_blocks[2]-1)
self.linear = TK2Linear([4, 4, 4], num_classes, [rs[6], rs[6]], bias=True)
def _make_layer(self, block, c_in: int, c_out: int, r: int, blocks: int) -> nn.Sequential:
"""Make each block layer.
Parameters
----------
block :
The block class of ResNet
c_in : int
The input channel size
c_out : int
The output channel size
r : int
The rank of this block
blocks : int
The number of block
Returns
-------
torch.nn.Sequential
The block network
"""
strides = [1]*blocks
layers = []
for stride in strides:
layers.append(block(c_in, c_out, r, stride=stride))
return nn.Sequential(*layers)
def forward(self, inputs: Tensor) -> Tensor:
"""Forwarding method.
Parameters
----------
inputs : torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times C \\times H \\times W}`
Returns
-------
torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times num\_classes}`
"""
out = F.relu(self.bn1(self.conv1(inputs)))
out = self.layer1(out)
out = self.down_sample2(out)
out = self.layer2(out)
out = self.down_sample3(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class TK2ResNet20(TK2ResNet):
def __init__(self, rs: Union[list, np.ndarray], num_classes: int):
"""ResNet-20 based on Tucker-2.
Parameters
----------
rs : Union[list, numpy.ndarray]
rs :math:`\in \mathbb{R}^{7}`. The ranks of network
num_classes : int
The number of classes
"""
super(TK2ResNet20, self).__init__(block=TK2Block, rs=rs, num_blocks=[3, 3, 3], num_classes=num_classes)
class TK2ResNet32(TK2ResNet):
def __init__(self, rs: Union[list, np.ndarray], num_classes: int):
"""ResNet-32 based on Tucker-2.
Parameters
----------
rs : Union[list, numpy.ndarray]
rs :math:`\in \mathbb{R}^{7}`. The ranks of network
num_classes : int
The number of classes
"""
super(TK2ResNet32, self).__init__(block=TK2Block, rs=rs, num_blocks=[5, 5, 5], num_classes=num_classes) | 0.94933 | 0.566558 |
from rally.common import objects
from rally import consts
from rally.deployment import engine
@engine.configure(name="ExistingCloud")
class ExistingCloud(engine.Engine):
"""Just use an existing OpenStack deployment without deploying anything.
To use ExistingCloud, you should put credential information to the config:
.. code-block:: json
{
"type": "ExistingCloud",
"auth_url": "http://localhost:5000/v2.0/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin": {
"username": "admin",
"password": "password",
"tenant_name": "demo"
},
"https_insecure": False,
"https_cacert": "",
}
Or, using keystone v3 API endpoint:
.. code-block:: json
{
"type": "ExistingCloud",
"auth_url": "http://localhost:5000/v3/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin": {
"username": "admin",
"password": "<PASSWORD>",
"user_domain_name": "admin",
"project_name": "admin",
"project_domain_name": "admin",
},
"https_insecure": False,
"https_cacert": "",
}
To specify extra options use can use special "extra" parameter:
.. code-block:: json
{
"type": "ExistingCloud",
"auth_url": "http://localhost:5000/v2.0/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin": {
"username": "admin",
"password": "password",
"tenant_name": "demo"
},
"https_insecure": False,
"https_cacert": "",
"extra": {"some_var": "some_value"}
}
"""
CONFIG_SCHEMA = {
"type": "object",
"definitions": {
"user": {
"type": "object",
"oneOf": [
{
# v2.0 authentication
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"tenant_name": {"type": "string"},
},
"required": ["username", "password", "tenant_name"],
"additionalProperties": False
},
{
# Authentication in project scope
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"domain_name": {"type": "string"},
"user_domain_name": {"type": "string"},
"admin_domain_name": {"type": "string"},
"project_name": {"type": "string"},
"project_domain_name": {"type": "string"},
},
"required": ["username", "password", "project_name"],
"additionalProperties": False
}
],
}
},
"properties": {
"type": {"type": "string"},
"auth_url": {"type": "string"},
"region_name": {"type": "string"},
"endpoint": {"type": ["string", "null"]},
"endpoint_type": {"enum": [consts.EndpointType.ADMIN,
consts.EndpointType.INTERNAL,
consts.EndpointType.PUBLIC,
None]},
"https_insecure": {"type": "boolean"},
"https_cacert": {"type": "string"},
"admin": {"$ref": "#/definitions/user"},
"users": {
"type": "array",
"items": {"$ref": "#/definitions/user"}
},
"extra": {"type": "object", "additionalProperties": True}
},
"required": ["type", "auth_url", "admin"],
"additionalProperties": False
}
def _create_credential(self, common, user, permission):
return objects.Credential(
common["auth_url"], user["username"], user["password"],
tenant_name=user.get("project_name", user.get("tenant_name")),
permission=permission,
region_name=common.get("region_name"),
endpoint_type=common.get("endpoint_type"),
endpoint=common.get("endpoint"),
domain_name=user.get("domain_name"),
user_domain_name=user.get("user_domain_name", None),
admin_domain_name=user.get("admin_domain_name", "Default"),
project_domain_name=user.get("project_domain_name", None),
https_insecure=common.get("https_insecure", False),
https_cacert=common.get("https_cacert")
)
def deploy(self):
permissions = consts.EndpointPermission
users = [self._create_credential(self.config, user, permissions.USER)
for user in self.config.get("users", [])]
admin = self._create_credential(self.config,
self.config.get("admin"),
permissions.ADMIN)
return {"admin": admin, "users": users}
def cleanup(self):
pass | rally/deployment/engines/existing.py |
from rally.common import objects
from rally import consts
from rally.deployment import engine
@engine.configure(name="ExistingCloud")
class ExistingCloud(engine.Engine):
"""Just use an existing OpenStack deployment without deploying anything.
To use ExistingCloud, you should put credential information to the config:
.. code-block:: json
{
"type": "ExistingCloud",
"auth_url": "http://localhost:5000/v2.0/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin": {
"username": "admin",
"password": "password",
"tenant_name": "demo"
},
"https_insecure": False,
"https_cacert": "",
}
Or, using keystone v3 API endpoint:
.. code-block:: json
{
"type": "ExistingCloud",
"auth_url": "http://localhost:5000/v3/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin": {
"username": "admin",
"password": "<PASSWORD>",
"user_domain_name": "admin",
"project_name": "admin",
"project_domain_name": "admin",
},
"https_insecure": False,
"https_cacert": "",
}
To specify extra options use can use special "extra" parameter:
.. code-block:: json
{
"type": "ExistingCloud",
"auth_url": "http://localhost:5000/v2.0/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin": {
"username": "admin",
"password": "password",
"tenant_name": "demo"
},
"https_insecure": False,
"https_cacert": "",
"extra": {"some_var": "some_value"}
}
"""
CONFIG_SCHEMA = {
"type": "object",
"definitions": {
"user": {
"type": "object",
"oneOf": [
{
# v2.0 authentication
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"tenant_name": {"type": "string"},
},
"required": ["username", "password", "tenant_name"],
"additionalProperties": False
},
{
# Authentication in project scope
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"domain_name": {"type": "string"},
"user_domain_name": {"type": "string"},
"admin_domain_name": {"type": "string"},
"project_name": {"type": "string"},
"project_domain_name": {"type": "string"},
},
"required": ["username", "password", "project_name"],
"additionalProperties": False
}
],
}
},
"properties": {
"type": {"type": "string"},
"auth_url": {"type": "string"},
"region_name": {"type": "string"},
"endpoint": {"type": ["string", "null"]},
"endpoint_type": {"enum": [consts.EndpointType.ADMIN,
consts.EndpointType.INTERNAL,
consts.EndpointType.PUBLIC,
None]},
"https_insecure": {"type": "boolean"},
"https_cacert": {"type": "string"},
"admin": {"$ref": "#/definitions/user"},
"users": {
"type": "array",
"items": {"$ref": "#/definitions/user"}
},
"extra": {"type": "object", "additionalProperties": True}
},
"required": ["type", "auth_url", "admin"],
"additionalProperties": False
}
def _create_credential(self, common, user, permission):
return objects.Credential(
common["auth_url"], user["username"], user["password"],
tenant_name=user.get("project_name", user.get("tenant_name")),
permission=permission,
region_name=common.get("region_name"),
endpoint_type=common.get("endpoint_type"),
endpoint=common.get("endpoint"),
domain_name=user.get("domain_name"),
user_domain_name=user.get("user_domain_name", None),
admin_domain_name=user.get("admin_domain_name", "Default"),
project_domain_name=user.get("project_domain_name", None),
https_insecure=common.get("https_insecure", False),
https_cacert=common.get("https_cacert")
)
def deploy(self):
permissions = consts.EndpointPermission
users = [self._create_credential(self.config, user, permissions.USER)
for user in self.config.get("users", [])]
admin = self._create_credential(self.config,
self.config.get("admin"),
permissions.ADMIN)
return {"admin": admin, "users": users}
def cleanup(self):
pass | 0.527803 | 0.37777 |
import click
from fandogh_cli.fandogh_client.secrets_client import list_secret, create_secret, put_secret, delete_secret
from .utils import format_text, TextStyle
from .presenter import present
from .base_commands import FandoghCommand
@click.group("secret")
def secret():
"""Secret management commands"""
@click.command("list", cls=FandoghCommand)
def list():
"""list secrets filtered by type"""
table = present(lambda: list_secret(),
renderer='table',
headers=['Name', 'Secret Type', 'Created at'],
columns=['name', 'type', 'created_at'])
if len(table.strip()):
click.echo(table)
else:
click.echo(format_text("There is no secret available", TextStyle.WARNING))
@click.command("create", cls=FandoghCommand)
@click.option('--name', '-n', 'secret_name', help='a unique name for secret', prompt='Name for the secret')
@click.option('--type', '-t', 'secret_type', help='type of secret to list', prompt="Secret Type")
@click.option('--field', '-f', 'fields', help='fields to store in secret', multiple=True)
def create(secret_type, fields, secret_name):
"""Create new secret"""
result = create_secret(secret_name, secret_type, fields, )
click.echo(result['message'])
@click.command("put", cls=FandoghCommand)
@click.option('--name', '-n', 'secret_name', help='a unique name for secret', prompt='Name for the secret')
@click.option('--type', '-t', 'secret_type', help='type of secret to list', prompt="Secret Type")
@click.option('--field', '-f', 'fields', help='fields to store in secret', multiple=True)
def put(secret_type, fields, secret_name):
"""Put new values for a secret """
result = put_secret(secret_name, secret_type, fields)
click.echo(result['message'])
@click.command("delete", cls=FandoghCommand)
@click.option('--name', '-n', 'secret_name', help='name of the secret to delete', prompt='Name for the secret')
def delete(secret_name):
"""Delete a secret by name"""
if click.confirm("You are about to delete a secret named '{}', you cannot undo this action. Are sure?".format(
secret_name
)):
result = delete_secret(secret_name)
click.echo(result['message'])
secret.add_command(list)
secret.add_command(put)
secret.add_command(create)
secret.add_command(delete) | fandogh_cli/secret_commands.py | import click
from fandogh_cli.fandogh_client.secrets_client import list_secret, create_secret, put_secret, delete_secret
from .utils import format_text, TextStyle
from .presenter import present
from .base_commands import FandoghCommand
@click.group("secret")
def secret():
"""Secret management commands"""
@click.command("list", cls=FandoghCommand)
def list():
"""list secrets filtered by type"""
table = present(lambda: list_secret(),
renderer='table',
headers=['Name', 'Secret Type', 'Created at'],
columns=['name', 'type', 'created_at'])
if len(table.strip()):
click.echo(table)
else:
click.echo(format_text("There is no secret available", TextStyle.WARNING))
@click.command("create", cls=FandoghCommand)
@click.option('--name', '-n', 'secret_name', help='a unique name for secret', prompt='Name for the secret')
@click.option('--type', '-t', 'secret_type', help='type of secret to list', prompt="Secret Type")
@click.option('--field', '-f', 'fields', help='fields to store in secret', multiple=True)
def create(secret_type, fields, secret_name):
"""Create new secret"""
result = create_secret(secret_name, secret_type, fields, )
click.echo(result['message'])
@click.command("put", cls=FandoghCommand)
@click.option('--name', '-n', 'secret_name', help='a unique name for secret', prompt='Name for the secret')
@click.option('--type', '-t', 'secret_type', help='type of secret to list', prompt="Secret Type")
@click.option('--field', '-f', 'fields', help='fields to store in secret', multiple=True)
def put(secret_type, fields, secret_name):
"""Put new values for a secret """
result = put_secret(secret_name, secret_type, fields)
click.echo(result['message'])
@click.command("delete", cls=FandoghCommand)
@click.option('--name', '-n', 'secret_name', help='name of the secret to delete', prompt='Name for the secret')
def delete(secret_name):
"""Delete a secret by name"""
if click.confirm("You are about to delete a secret named '{}', you cannot undo this action. Are sure?".format(
secret_name
)):
result = delete_secret(secret_name)
click.echo(result['message'])
secret.add_command(list)
secret.add_command(put)
secret.add_command(create)
secret.add_command(delete) | 0.347094 | 0.087642 |
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
import sys
import matplotlib.pyplot as P
import torch.nn.functional as F
from IPython.core.debugger import Pdb
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='./', help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--imageSize', type=int, default=32, help='the height / width of the input image to network')
parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--network', default='', help="path to wide residual network (to continue training)")
opt = parser.parse_args()
print(opt)
#define datasets
trainset = dset.CIFAR10(root=opt.dataroot, train = True, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
)
validset = dset.CIFAR10(root=opt.dataroot, train = False, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
)
assert trainset
assert validset
#define loaders
trainloader = torch.utils.data.DataLoader(trainset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
validloader = torch.utils.data.DataLoader(validset, batch_size=opt.batchSize,
shuffle=False, num_workers=int(opt.workers))
numClass = 10
hexa = 16
k = 10
# custom weights initialization called on WRN
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class _WRN(nn.Module):
def __init__(self, numClass, hexa, k):
super(_WRN, self).__init__()
# input is (nc) x 32 x 32
self.conv0 = nn.Conv2d(3, hexa, 3, 1, 1, bias=False)
# input is (hexa) X 32 x 32
self.group0_conv11 = nn.Conv2d(hexa, hexa*k, 1, 1, 0, bias=False)
self.group0_block0 = nn.Sequential(
# input is hexa x 32 x 32
nn.BatchNorm2d(hexa),
nn.ReLU(inplace=True),
nn.Conv2d(hexa, hexa*k, 3, 1, 1, bias=False),
# state size. (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k, 3, 1, 1, bias=False),
)
self.group0_block1 = nn.Sequential(
# input is (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k, 3, 1, 1, bias=False),
# state size. (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k, 3, 1, 1, bias=False),
)
self.group0_block2 = nn.Sequential(
# input is (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k, 3, 1, 1, bias=False),
# state size. (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k, 3, 1, 1, bias=False),
)
# input is (hexa*k) X 32 X 32
self.group1_conv11 = nn.Conv2d(hexa*k, hexa*k*2, 1, 2, 0, bias=False)
self.group1_block0 = nn.Sequential(
# input is (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k*2, 3, 2, 1, bias=False),
# state size. (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*2, 3, 1, 1, bias=False),
)
self.group1_block1 = nn.Sequential(
# input is (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*2, 3, 1, 1, bias=False),
# state size. (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*2, 3, 1, 1, bias=False),
)
self.group1_block2 = nn.Sequential(
# input is (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*2, 3, 1, 1, bias=False),
# state size. (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*2, 3, 1, 1, bias=False),
)
# input is (hexa*k*2) X 16 X 16
self.group2_conv11 = nn.Conv2d(hexa*k*2, hexa*k*4, 1, 2, 0, bias=False)
self.group2_block0 = nn.Sequential(
# input is (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*4, 3, 2, 1, bias=False),
# state size. (hexa*k*4) x 8 x 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*4, hexa*k*4, 3, 1, 1, bias=False),
)
self.group2_block1 = nn.Sequential(
# input is (hexa*k*4) x 8 x 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*4, hexa*k*4, 3, 1, 1, bias=False),
# state size. (hexa*k*4) x 8 x 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*4, hexa*k*4, 3, 1, 1, bias=False),
)
self.group2_block2 = nn.Sequential(
# input is (hexa*k*4) x 8 x 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*4, hexa*k*4, 3, 1, 1, bias=False),
# state size. (hexa*k*4) x 8 x 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*4, hexa*k*4, 3, 1, 1, bias=False),
)
self.avg = nn.Sequential(
# input is (hexa*k*4) X 8 X 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.AvgPool2d(8),
)
self.top = nn.Linear(hexa*k*4, numClass,bias=True)
def forward(self, input):
#conv0
output = self.conv0(input)
#group0
residual = self.group0_block0(output)
straight = self.group0_conv11(output)
output = self.group0_block1(residual+straight)
output = self.group0_block2(output)+output
#group1
residual = self.group1_block0(output)
straight = self.group1_conv11(output)
output = self.group1_block1(residual+straight)
output = self.group1_block2(output)+output
#group2
residual = self.group2_block0(output)
straight = self.group2_conv11(output)
output = self.group2_block1(residual+straight)
output = self.group2_block2(output)+output
#top
output = self.avg(output)
output = self.top(output.view(output.size(0), -1))
return output
WRN = _WRN(numClass, hexa, k)
WRN.apply(weights_init)
if opt.network != '':
WRN.load_state_dict(torch.load(opt.network))
print(WRN)
criterion = nn.CrossEntropyLoss()
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
label = torch.LongTensor(opt.batchSize, numClass)
# setup optimizer
Pdb().set_trace()
for i, data in enumerate(validloader):
WRN.eval()
input_cpu, label_cpu = data
batch_size = input_cpu.size(0)
input.resize_as_(input_cpu).copy_(input_cpu)
label.resize_as_(label_cpu).copy_(label_cpu)
inputv = Variable(input)
labelv = Variable(label)
out1 = F.conv2d(inputv, WRN.conv0.weight,WRN.conv0.bias,1,1)
out2 = F.conv2d(out1, WRN.group0_block0[2].weight,WRN.group0_block0[2].bias ,1,1)
output = F.conv2d(out2,
WRN.group0_block0[5].weight,WRN.group0_block0[5].bias,1,1)
Pdb().set_trace() | demo.py | from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
import sys
import matplotlib.pyplot as P
import torch.nn.functional as F
from IPython.core.debugger import Pdb
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='./', help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--imageSize', type=int, default=32, help='the height / width of the input image to network')
parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--network', default='', help="path to wide residual network (to continue training)")
opt = parser.parse_args()
print(opt)
#define datasets
trainset = dset.CIFAR10(root=opt.dataroot, train = True, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
)
validset = dset.CIFAR10(root=opt.dataroot, train = False, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
)
assert trainset
assert validset
#define loaders
trainloader = torch.utils.data.DataLoader(trainset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
validloader = torch.utils.data.DataLoader(validset, batch_size=opt.batchSize,
shuffle=False, num_workers=int(opt.workers))
numClass = 10
hexa = 16
k = 10
# custom weights initialization called on WRN
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class _WRN(nn.Module):
def __init__(self, numClass, hexa, k):
super(_WRN, self).__init__()
# input is (nc) x 32 x 32
self.conv0 = nn.Conv2d(3, hexa, 3, 1, 1, bias=False)
# input is (hexa) X 32 x 32
self.group0_conv11 = nn.Conv2d(hexa, hexa*k, 1, 1, 0, bias=False)
self.group0_block0 = nn.Sequential(
# input is hexa x 32 x 32
nn.BatchNorm2d(hexa),
nn.ReLU(inplace=True),
nn.Conv2d(hexa, hexa*k, 3, 1, 1, bias=False),
# state size. (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k, 3, 1, 1, bias=False),
)
self.group0_block1 = nn.Sequential(
# input is (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k, 3, 1, 1, bias=False),
# state size. (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k, 3, 1, 1, bias=False),
)
self.group0_block2 = nn.Sequential(
# input is (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k, 3, 1, 1, bias=False),
# state size. (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k, 3, 1, 1, bias=False),
)
# input is (hexa*k) X 32 X 32
self.group1_conv11 = nn.Conv2d(hexa*k, hexa*k*2, 1, 2, 0, bias=False)
self.group1_block0 = nn.Sequential(
# input is (hexa*k) x 32 x 32
nn.BatchNorm2d(hexa*k),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k, hexa*k*2, 3, 2, 1, bias=False),
# state size. (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*2, 3, 1, 1, bias=False),
)
self.group1_block1 = nn.Sequential(
# input is (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*2, 3, 1, 1, bias=False),
# state size. (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*2, 3, 1, 1, bias=False),
)
self.group1_block2 = nn.Sequential(
# input is (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*2, 3, 1, 1, bias=False),
# state size. (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*2, 3, 1, 1, bias=False),
)
# input is (hexa*k*2) X 16 X 16
self.group2_conv11 = nn.Conv2d(hexa*k*2, hexa*k*4, 1, 2, 0, bias=False)
self.group2_block0 = nn.Sequential(
# input is (hexa*k*2) x 16 x 16
nn.BatchNorm2d(hexa*k*2),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*2, hexa*k*4, 3, 2, 1, bias=False),
# state size. (hexa*k*4) x 8 x 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*4, hexa*k*4, 3, 1, 1, bias=False),
)
self.group2_block1 = nn.Sequential(
# input is (hexa*k*4) x 8 x 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*4, hexa*k*4, 3, 1, 1, bias=False),
# state size. (hexa*k*4) x 8 x 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*4, hexa*k*4, 3, 1, 1, bias=False),
)
self.group2_block2 = nn.Sequential(
# input is (hexa*k*4) x 8 x 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*4, hexa*k*4, 3, 1, 1, bias=False),
# state size. (hexa*k*4) x 8 x 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.Conv2d(hexa*k*4, hexa*k*4, 3, 1, 1, bias=False),
)
self.avg = nn.Sequential(
# input is (hexa*k*4) X 8 X 8
nn.BatchNorm2d(hexa*k*4),
nn.ReLU(inplace=True),
nn.AvgPool2d(8),
)
self.top = nn.Linear(hexa*k*4, numClass,bias=True)
def forward(self, input):
#conv0
output = self.conv0(input)
#group0
residual = self.group0_block0(output)
straight = self.group0_conv11(output)
output = self.group0_block1(residual+straight)
output = self.group0_block2(output)+output
#group1
residual = self.group1_block0(output)
straight = self.group1_conv11(output)
output = self.group1_block1(residual+straight)
output = self.group1_block2(output)+output
#group2
residual = self.group2_block0(output)
straight = self.group2_conv11(output)
output = self.group2_block1(residual+straight)
output = self.group2_block2(output)+output
#top
output = self.avg(output)
output = self.top(output.view(output.size(0), -1))
return output
WRN = _WRN(numClass, hexa, k)
WRN.apply(weights_init)
if opt.network != '':
WRN.load_state_dict(torch.load(opt.network))
print(WRN)
criterion = nn.CrossEntropyLoss()
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
label = torch.LongTensor(opt.batchSize, numClass)
# setup optimizer
Pdb().set_trace()
for i, data in enumerate(validloader):
WRN.eval()
input_cpu, label_cpu = data
batch_size = input_cpu.size(0)
input.resize_as_(input_cpu).copy_(input_cpu)
label.resize_as_(label_cpu).copy_(label_cpu)
inputv = Variable(input)
labelv = Variable(label)
out1 = F.conv2d(inputv, WRN.conv0.weight,WRN.conv0.bias,1,1)
out2 = F.conv2d(out1, WRN.group0_block0[2].weight,WRN.group0_block0[2].bias ,1,1)
output = F.conv2d(out2,
WRN.group0_block0[5].weight,WRN.group0_block0[5].bias,1,1)
Pdb().set_trace() | 0.624637 | 0.397529 |
from torch.utils.data import DataLoader
from utils.parser_utils import get_args
args, device = get_args()
from utils.dataset_tools import check_download_dataset
from data import ConvertToThreeChannels, FewShotLearningDatasetParallel
from torchvision import transforms
from experiment_builder import ExperimentBuilder
from few_shot_learning_system import *
# Combines the arguments, model, data and experiment builders to run an experiment
if args.classifier_type == 'maml++_high-end':
model = EmbeddingMAMLFewShotClassifier(**args.__dict__)
elif args.classifier_type == 'maml++_low-end':
model = VGGMAMLFewShotClassifier(**args.__dict__)
elif args.classifier_type == 'vgg-fine-tune-scratch':
model = FineTuneFromScratchFewShotClassifier(**args.__dict__)
elif args.classifier_type == 'vgg-fine-tune-pretrained':
model = FineTuneFromPretrainedFewShotClassifier(**args.__dict__)
elif args.classifier_type == 'vgg-matching_network':
model = MatchingNetworkFewShotClassifier(**args.__dict__)
else:
raise NotImplementedError
check_download_dataset(dataset_name=args.dataset_name)
if args.image_channels == 3:
transforms = [transforms.Resize(size=(args.image_height, args.image_width)), transforms.ToTensor(),
ConvertToThreeChannels(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]
elif args.image_channels == 1:
transforms = [transforms.Resize(size=(args.image_height, args.image_width)), transforms.ToTensor()]
train_setup_dict = dict(dataset_name=args.dataset_name,
indexes_of_folders_indicating_class=args.indexes_of_folders_indicating_class,
train_val_test_split=args.train_val_test_split,
labels_as_int=args.labels_as_int, transforms=transforms,
num_classes_per_set=args.num_classes_per_set,
num_samples_per_support_class=args.num_samples_per_support_class,
num_samples_per_target_class=args.num_samples_per_target_class,
seed=args.seed,
sets_are_pre_split=args.sets_are_pre_split,
load_into_memory=args.load_into_memory, set_name='train',
num_tasks_per_epoch=args.total_epochs * args.total_iter_per_epoch,
num_channels=args.image_channels,
num_support_sets=args.num_support_sets,
overwrite_classes_in_each_task=args.overwrite_classes_in_each_task,
class_change_interval=args.class_change_interval)
val_setup_dict = dict(dataset_name=args.dataset_name,
indexes_of_folders_indicating_class=args.indexes_of_folders_indicating_class,
train_val_test_split=args.train_val_test_split,
labels_as_int=args.labels_as_int, transforms=transforms,
num_classes_per_set=args.num_classes_per_set,
num_samples_per_support_class=args.num_samples_per_support_class,
num_samples_per_target_class=args.num_samples_per_target_class,
seed=args.seed,
sets_are_pre_split=args.sets_are_pre_split,
load_into_memory=args.load_into_memory, set_name='val',
num_tasks_per_epoch=600 ,
num_channels=args.image_channels,
num_support_sets=args.num_support_sets,
overwrite_classes_in_each_task=args.overwrite_classes_in_each_task,
class_change_interval=args.class_change_interval)
test_setup_dict = dict(dataset_name=args.dataset_name,
indexes_of_folders_indicating_class=args.indexes_of_folders_indicating_class,
train_val_test_split=args.train_val_test_split,
labels_as_int=args.labels_as_int, transforms=transforms,
num_classes_per_set=args.num_classes_per_set,
num_samples_per_support_class=args.num_samples_per_support_class,
num_samples_per_target_class=args.num_samples_per_target_class,
seed=args.seed,
sets_are_pre_split=args.sets_are_pre_split,
load_into_memory=args.load_into_memory, set_name='test',
num_tasks_per_epoch=600,
num_channels=args.image_channels,
num_support_sets=args.num_support_sets,
overwrite_classes_in_each_task=args.overwrite_classes_in_each_task,
class_change_interval=args.class_change_interval)
train_data = FewShotLearningDatasetParallel(**train_setup_dict)
val_data = FewShotLearningDatasetParallel(**val_setup_dict)
test_data = FewShotLearningDatasetParallel(**test_setup_dict)
data_dict = {'train': DataLoader(train_data, batch_size=args.batch_size,
num_workers=args.num_dataprovider_workers),
'val': DataLoader(val_data, batch_size=args.batch_size,
num_workers=args.num_dataprovider_workers),
'test': DataLoader(test_data, batch_size=args.batch_size,
num_workers=args.num_dataprovider_workers)}
maml_system = ExperimentBuilder(model=model, data_dict=data_dict, experiment_name=args.experiment_name,
continue_from_epoch=args.continue_from_epoch,
total_iter_per_epoch=args.total_iter_per_epoch,
num_evaluation_tasks=args.num_evaluation_tasks, total_epochs=args.total_epochs,
batch_size=args.batch_size, max_models_to_save=args.max_models_to_save,
evaluate_on_test_set_only=args.evaluate_on_test_set_only,
args=args)
maml_system.run_experiment() | train_continual_learning_few_shot_system.py | from torch.utils.data import DataLoader
from utils.parser_utils import get_args
args, device = get_args()
from utils.dataset_tools import check_download_dataset
from data import ConvertToThreeChannels, FewShotLearningDatasetParallel
from torchvision import transforms
from experiment_builder import ExperimentBuilder
from few_shot_learning_system import *
# Combines the arguments, model, data and experiment builders to run an experiment
if args.classifier_type == 'maml++_high-end':
model = EmbeddingMAMLFewShotClassifier(**args.__dict__)
elif args.classifier_type == 'maml++_low-end':
model = VGGMAMLFewShotClassifier(**args.__dict__)
elif args.classifier_type == 'vgg-fine-tune-scratch':
model = FineTuneFromScratchFewShotClassifier(**args.__dict__)
elif args.classifier_type == 'vgg-fine-tune-pretrained':
model = FineTuneFromPretrainedFewShotClassifier(**args.__dict__)
elif args.classifier_type == 'vgg-matching_network':
model = MatchingNetworkFewShotClassifier(**args.__dict__)
else:
raise NotImplementedError
check_download_dataset(dataset_name=args.dataset_name)
if args.image_channels == 3:
transforms = [transforms.Resize(size=(args.image_height, args.image_width)), transforms.ToTensor(),
ConvertToThreeChannels(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]
elif args.image_channels == 1:
transforms = [transforms.Resize(size=(args.image_height, args.image_width)), transforms.ToTensor()]
train_setup_dict = dict(dataset_name=args.dataset_name,
indexes_of_folders_indicating_class=args.indexes_of_folders_indicating_class,
train_val_test_split=args.train_val_test_split,
labels_as_int=args.labels_as_int, transforms=transforms,
num_classes_per_set=args.num_classes_per_set,
num_samples_per_support_class=args.num_samples_per_support_class,
num_samples_per_target_class=args.num_samples_per_target_class,
seed=args.seed,
sets_are_pre_split=args.sets_are_pre_split,
load_into_memory=args.load_into_memory, set_name='train',
num_tasks_per_epoch=args.total_epochs * args.total_iter_per_epoch,
num_channels=args.image_channels,
num_support_sets=args.num_support_sets,
overwrite_classes_in_each_task=args.overwrite_classes_in_each_task,
class_change_interval=args.class_change_interval)
val_setup_dict = dict(dataset_name=args.dataset_name,
indexes_of_folders_indicating_class=args.indexes_of_folders_indicating_class,
train_val_test_split=args.train_val_test_split,
labels_as_int=args.labels_as_int, transforms=transforms,
num_classes_per_set=args.num_classes_per_set,
num_samples_per_support_class=args.num_samples_per_support_class,
num_samples_per_target_class=args.num_samples_per_target_class,
seed=args.seed,
sets_are_pre_split=args.sets_are_pre_split,
load_into_memory=args.load_into_memory, set_name='val',
num_tasks_per_epoch=600 ,
num_channels=args.image_channels,
num_support_sets=args.num_support_sets,
overwrite_classes_in_each_task=args.overwrite_classes_in_each_task,
class_change_interval=args.class_change_interval)
test_setup_dict = dict(dataset_name=args.dataset_name,
indexes_of_folders_indicating_class=args.indexes_of_folders_indicating_class,
train_val_test_split=args.train_val_test_split,
labels_as_int=args.labels_as_int, transforms=transforms,
num_classes_per_set=args.num_classes_per_set,
num_samples_per_support_class=args.num_samples_per_support_class,
num_samples_per_target_class=args.num_samples_per_target_class,
seed=args.seed,
sets_are_pre_split=args.sets_are_pre_split,
load_into_memory=args.load_into_memory, set_name='test',
num_tasks_per_epoch=600,
num_channels=args.image_channels,
num_support_sets=args.num_support_sets,
overwrite_classes_in_each_task=args.overwrite_classes_in_each_task,
class_change_interval=args.class_change_interval)
train_data = FewShotLearningDatasetParallel(**train_setup_dict)
val_data = FewShotLearningDatasetParallel(**val_setup_dict)
test_data = FewShotLearningDatasetParallel(**test_setup_dict)
data_dict = {'train': DataLoader(train_data, batch_size=args.batch_size,
num_workers=args.num_dataprovider_workers),
'val': DataLoader(val_data, batch_size=args.batch_size,
num_workers=args.num_dataprovider_workers),
'test': DataLoader(test_data, batch_size=args.batch_size,
num_workers=args.num_dataprovider_workers)}
maml_system = ExperimentBuilder(model=model, data_dict=data_dict, experiment_name=args.experiment_name,
continue_from_epoch=args.continue_from_epoch,
total_iter_per_epoch=args.total_iter_per_epoch,
num_evaluation_tasks=args.num_evaluation_tasks, total_epochs=args.total_epochs,
batch_size=args.batch_size, max_models_to_save=args.max_models_to_save,
evaluate_on_test_set_only=args.evaluate_on_test_set_only,
args=args)
maml_system.run_experiment() | 0.752831 | 0.409516 |
__author__ = '__Rebecca__'
__version__ = '0.0.2'
import os
import json
import time
import requests
import multiprocessing
from bcy_single_climber import bcy_single_climber
'''
链接:https://bcy.net/home/timeline/loaduserposts?since=0&grid_type=timeline&uid=3482886&limit=20
单次最大请求20条
since=0为从头获取
获取第21条时,since=【第20条的id】
当全部获取完时,返回值data为空
'''
class bcy_user_item(object):
def __init__(self, data):
self.__data = data
if not self.__data['tl_type'] == 'item':
raise Exception('Initlize failed! Format wrong.')
self.type = self.__data['tl_type']
self.detail = self.__data['item_detail']
self.item_id = self.detail['item_id']
self.user_id = self.detail['uid']
self.avatar = self.detail['avatar']
class bcy_user_climber(object):
def __init__(self, user_id):
self.__user_id = user_id
self.__items = []
self.__item_get_url = 'https://bcy.net/home/timeline/loaduserposts'
self.__max_limit = 20
def get_items(self):
item_list = []
last_item = None
while True:
params = { 'since':0 if not last_item else last_item.item_id,
'grid_type':'timeline',
'uid':self.__user_id,
'limit':self.__max_limit
}
item_data = requests.get(self.__item_get_url, params=params)
item_json = json.loads(item_data.text)
item_data = item_json['data']
# 若data为空
if not item_data:
break
for i in item_data:
temp_item = bcy_user_item(i)
item_list.append(temp_item)
last_item = item_list[-1]
self.__items = item_list
return item_list
def get_page_url(self):
if not self.__items:
self.get_items()
page_home = 'https://bcy.net/item/detail/'
url_lst = []
for i in self.__items:
temp_url = page_home + i.item_id
url_lst.append(temp_url)
return url_lst
def begin_download(self, page_url_list=None):
if not page_url_list:
page_url_list = self.get_page_url()
for i in page_url_list:
single = bcy_single_climber(url = i)
single.start()
return True
if __name__ == '__main__':
uitem = bcy_user_climber(3482886)
# back = uitem.begin_download()
p = multiprocessing.Process(target = uitem.begin_download, args = ())
p.start()
p.join()
print('Hello world') | bcy_user_climber.py |
__author__ = '__Rebecca__'
__version__ = '0.0.2'
import os
import json
import time
import requests
import multiprocessing
from bcy_single_climber import bcy_single_climber
'''
链接:https://bcy.net/home/timeline/loaduserposts?since=0&grid_type=timeline&uid=3482886&limit=20
单次最大请求20条
since=0为从头获取
获取第21条时,since=【第20条的id】
当全部获取完时,返回值data为空
'''
class bcy_user_item(object):
def __init__(self, data):
self.__data = data
if not self.__data['tl_type'] == 'item':
raise Exception('Initlize failed! Format wrong.')
self.type = self.__data['tl_type']
self.detail = self.__data['item_detail']
self.item_id = self.detail['item_id']
self.user_id = self.detail['uid']
self.avatar = self.detail['avatar']
class bcy_user_climber(object):
def __init__(self, user_id):
self.__user_id = user_id
self.__items = []
self.__item_get_url = 'https://bcy.net/home/timeline/loaduserposts'
self.__max_limit = 20
def get_items(self):
item_list = []
last_item = None
while True:
params = { 'since':0 if not last_item else last_item.item_id,
'grid_type':'timeline',
'uid':self.__user_id,
'limit':self.__max_limit
}
item_data = requests.get(self.__item_get_url, params=params)
item_json = json.loads(item_data.text)
item_data = item_json['data']
# 若data为空
if not item_data:
break
for i in item_data:
temp_item = bcy_user_item(i)
item_list.append(temp_item)
last_item = item_list[-1]
self.__items = item_list
return item_list
def get_page_url(self):
if not self.__items:
self.get_items()
page_home = 'https://bcy.net/item/detail/'
url_lst = []
for i in self.__items:
temp_url = page_home + i.item_id
url_lst.append(temp_url)
return url_lst
def begin_download(self, page_url_list=None):
if not page_url_list:
page_url_list = self.get_page_url()
for i in page_url_list:
single = bcy_single_climber(url = i)
single.start()
return True
if __name__ == '__main__':
uitem = bcy_user_climber(3482886)
# back = uitem.begin_download()
p = multiprocessing.Process(target = uitem.begin_download, args = ())
p.start()
p.join()
print('Hello world') | 0.1532 | 0.046812 |
import builtins
import mock
import numpy as np
from sudoku.sudoku import Sudoku
def is_valid_array(arr):
return np.array_equal(sorted(arr), np.arange(1, 10, 1))
def check_valid_puzzle(board):
# Check that all digits 1-9 are represented in the row and are not duplicated.
for i in range(9):
if not is_valid_array(board[i]):
return False
# Check that all digits 1-9 are represented in the column and are not duplicated.
for i in range(9):
if not is_valid_array(board[:, i]):
return False
coords = [(0, 0), (0, 3), (0, 6), (3, 0), (3, 3), (3, 6), (6, 0), (6, 3), (6, 6)]
# Check that all digits 1-9 are represented in the subset and are not duplicated.
for i, j in coords:
block = board[i : i + 3, j : j + 3]
if not is_valid_array(block.reshape([1, 9])[0]):
return False
return True
def test_generate():
"""Test the generate algorithm is correct."""
assert check_valid_puzzle(Sudoku.generate())
def test_verify_true(sudoku_board):
s = sudoku_board
# Correct guess
assert s.verify(row=0, column=0, value=1)
def test_verify_false(sudoku_board):
s = sudoku_board
# Incorrect guess
assert not s.verify(row=0, column=0, value=8)
def test_attempt_true(sudoku_board):
s = sudoku_board
s._masked_coordinates = {(0, 0)}
s._attempts_so_far = 3
# Correct guess
s.attempt(row=0, column=0, value=1)
# The _attempts_so_far count is unchanged since the guess was correct.
assert sudoku_board._attempts_so_far == 3
def test_attempt_false(sudoku_board):
s = sudoku_board
s._attempts_so_far = 3
# Incorrect guess
s.attempt(row=0, column=0, value=8)
# The _attempts_so_far count has increased by one since the guess was incorrect.
assert s._attempts_so_far == 4
def test_attempts_exhausted_true(sudoku_board):
"""The _attempts_so_far is less than _max_attempts and therefore
attempts_exhausted() is False."""
s = sudoku_board
s._attempts_so_far = 5
s._max_attempts = 6
assert not s.attempts_exhausted
def test_attempts_exhausted_false(sudoku_board):
"""The _attempts_so_far is greater than _max_attempts and
therefore attempts_exhausted() is True."""
s = sudoku_board
s._attempts_so_far = 3
s._max_attempts = 2
assert s.attempts_exhausted
def test_in_play_masked_coordinates_exist_and_attempts_exist(sudoku_board):
"""in_play will be True since there are still missing coordinates and
attempts_exhausted is False."""
s = sudoku_board
# A masked coordinate needs to be found.
s._masked_coordinates = {(0, 0)}
# Still chances to guess.
s._max_attempts = 2
s._attempts_so_far = 1
# Continue playing.
assert s.in_play
def test_in_play_coordinates_unmasked_empty_and_attempts_exist(sudoku_board):
"""in_play will be False since there are no masked_coordinates, even though
attempts_exhausted is False."""
s = sudoku_board
# All masked coordinates found.
s._masked_coordinates = set()
# Still chances to guess.
s._max_attempts = 2
s._attempts_so_far = 1
# Game over - win.
assert not s.in_play
def test_in_play_masked_coordinates_exist_and_attempts_exhausted(sudoku_board):
"""in_play will be False since attempts_exhausted is True, even though
masked_coordinates exist."""
s = sudoku_board
# A masked coordinate needs to be found.
s._masked_coordinates = {(0, 0)}
# No more chances to guess.
s._max_attempts = 1
s._attempts_so_far = 1
# Game over - lose.
assert not s.in_play
def test_in_play_coordinates_unmasked_empty_and_attempts_exhausted(sudoku_board):
"""in_play will be False since attempts_exhausted is True and there are no
masked_coordinates."""
s = sudoku_board
# All masked coordinates found.
s._masked_coordinates = set()
# No more chances to guess.
s._max_attempts = 1
s._attempts_so_far = 1
# Game over - win.
assert not s.in_play
def test_display(capsys):
"""Test board image."""
s = Sudoku()
s._matrix = np.zeros(9 * 9, dtype=np.int32).reshape([9, 9])
sudoku_image = """\
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
----- + ----- + -----
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
----- + ----- + -----
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
"""
s.display()
out, err = capsys.readouterr()
assert out == sudoku_image
def test_remove_random():
"""Testing the set of coordinates to be removed."""
s = Sudoku()
s.empty_numbers = 2
s._matrix = np.arange(1, 3, 1).reshape([1, 2])
s.remove_random()
# The assertion checks the complete list of coordinates so random doesn't have to
# be mocked.
assert s._masked_coordinates == {(0, 1), (0, 0)}
def test_info_message(sudoku_board):
s = sudoku_board
welcome_message = """
Welcome to Sudoku
Rules:
All rows should have the digits 1-9, without repetition.
All columns should have the digits 1-9, without repetition.
All 9 sub-matrices should have the digits 1-9, without repetition.
To play, enter the row, column, and answer at the command prompt. The
Format is: <row> <column> <value>
Type exit to leave
Please note this game uses 0 indexing
Good luck!\n
"""
assert s.intro_message() == welcome_message
def test_run():
s = Sudoku()
with mock.patch.object(builtins, "input", lambda _: "exit"):
assert s.run() is None | tests/test_sudoku.py |
import builtins
import mock
import numpy as np
from sudoku.sudoku import Sudoku
def is_valid_array(arr):
return np.array_equal(sorted(arr), np.arange(1, 10, 1))
def check_valid_puzzle(board):
# Check that all digits 1-9 are represented in the row and are not duplicated.
for i in range(9):
if not is_valid_array(board[i]):
return False
# Check that all digits 1-9 are represented in the column and are not duplicated.
for i in range(9):
if not is_valid_array(board[:, i]):
return False
coords = [(0, 0), (0, 3), (0, 6), (3, 0), (3, 3), (3, 6), (6, 0), (6, 3), (6, 6)]
# Check that all digits 1-9 are represented in the subset and are not duplicated.
for i, j in coords:
block = board[i : i + 3, j : j + 3]
if not is_valid_array(block.reshape([1, 9])[0]):
return False
return True
def test_generate():
"""Test the generate algorithm is correct."""
assert check_valid_puzzle(Sudoku.generate())
def test_verify_true(sudoku_board):
s = sudoku_board
# Correct guess
assert s.verify(row=0, column=0, value=1)
def test_verify_false(sudoku_board):
s = sudoku_board
# Incorrect guess
assert not s.verify(row=0, column=0, value=8)
def test_attempt_true(sudoku_board):
s = sudoku_board
s._masked_coordinates = {(0, 0)}
s._attempts_so_far = 3
# Correct guess
s.attempt(row=0, column=0, value=1)
# The _attempts_so_far count is unchanged since the guess was correct.
assert sudoku_board._attempts_so_far == 3
def test_attempt_false(sudoku_board):
s = sudoku_board
s._attempts_so_far = 3
# Incorrect guess
s.attempt(row=0, column=0, value=8)
# The _attempts_so_far count has increased by one since the guess was incorrect.
assert s._attempts_so_far == 4
def test_attempts_exhausted_true(sudoku_board):
"""The _attempts_so_far is less than _max_attempts and therefore
attempts_exhausted() is False."""
s = sudoku_board
s._attempts_so_far = 5
s._max_attempts = 6
assert not s.attempts_exhausted
def test_attempts_exhausted_false(sudoku_board):
"""The _attempts_so_far is greater than _max_attempts and
therefore attempts_exhausted() is True."""
s = sudoku_board
s._attempts_so_far = 3
s._max_attempts = 2
assert s.attempts_exhausted
def test_in_play_masked_coordinates_exist_and_attempts_exist(sudoku_board):
"""in_play will be True since there are still missing coordinates and
attempts_exhausted is False."""
s = sudoku_board
# A masked coordinate needs to be found.
s._masked_coordinates = {(0, 0)}
# Still chances to guess.
s._max_attempts = 2
s._attempts_so_far = 1
# Continue playing.
assert s.in_play
def test_in_play_coordinates_unmasked_empty_and_attempts_exist(sudoku_board):
"""in_play will be False since there are no masked_coordinates, even though
attempts_exhausted is False."""
s = sudoku_board
# All masked coordinates found.
s._masked_coordinates = set()
# Still chances to guess.
s._max_attempts = 2
s._attempts_so_far = 1
# Game over - win.
assert not s.in_play
def test_in_play_masked_coordinates_exist_and_attempts_exhausted(sudoku_board):
"""in_play will be False since attempts_exhausted is True, even though
masked_coordinates exist."""
s = sudoku_board
# A masked coordinate needs to be found.
s._masked_coordinates = {(0, 0)}
# No more chances to guess.
s._max_attempts = 1
s._attempts_so_far = 1
# Game over - lose.
assert not s.in_play
def test_in_play_coordinates_unmasked_empty_and_attempts_exhausted(sudoku_board):
"""in_play will be False since attempts_exhausted is True and there are no
masked_coordinates."""
s = sudoku_board
# All masked coordinates found.
s._masked_coordinates = set()
# No more chances to guess.
s._max_attempts = 1
s._attempts_so_far = 1
# Game over - win.
assert not s.in_play
def test_display(capsys):
"""Test board image."""
s = Sudoku()
s._matrix = np.zeros(9 * 9, dtype=np.int32).reshape([9, 9])
sudoku_image = """\
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
----- + ----- + -----
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
----- + ----- + -----
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
| 0 0 0 | 0 0 0 | 0 0 0 |
"""
s.display()
out, err = capsys.readouterr()
assert out == sudoku_image
def test_remove_random():
"""Testing the set of coordinates to be removed."""
s = Sudoku()
s.empty_numbers = 2
s._matrix = np.arange(1, 3, 1).reshape([1, 2])
s.remove_random()
# The assertion checks the complete list of coordinates so random doesn't have to
# be mocked.
assert s._masked_coordinates == {(0, 1), (0, 0)}
def test_info_message(sudoku_board):
s = sudoku_board
welcome_message = """
Welcome to Sudoku
Rules:
All rows should have the digits 1-9, without repetition.
All columns should have the digits 1-9, without repetition.
All 9 sub-matrices should have the digits 1-9, without repetition.
To play, enter the row, column, and answer at the command prompt. The
Format is: <row> <column> <value>
Type exit to leave
Please note this game uses 0 indexing
Good luck!\n
"""
assert s.intro_message() == welcome_message
def test_run():
s = Sudoku()
with mock.patch.object(builtins, "input", lambda _: "exit"):
assert s.run() is None | 0.723602 | 0.598459 |
from mako.template import Template
import os
import importlib.resources as resources
def genRobotCode(projectType, config):
if projectType == "Simple":
with resources.path(__name__, "templates") as path:
with open(os.path.join(path, "Simple", "Robot.java.mako"), "r") as template:
return Template(template.read()).render(
diam=config["wheelDiameter"],
epr=config["encoderEPR"],
lports=config["leftMotorPorts"],
rports=config["rightMotorPorts"],
linverted=config["leftMotorsInverted"],
rinverted=config["rightMotorsInverted"],
lcontrollers=config["leftControllerTypes"],
rcontrollers=config["rightControllerTypes"],
lencoderports=config["leftEncoderPorts"],
rencoderports=config["rightEncoderPorts"],
lencoderinv=config["leftEncoderInverted"],
rencoderinv=config["rightEncoderInverted"],
gyro=config["gyroType"],
gyroport=config["gyroPort"],
)
elif projectType == "Talon":
with resources.path(__name__, "templates") as path:
with open(os.path.join(path, "Talon", "Robot.java.mako"), "r") as template:
return Template(template.read()).render(
diam=config["wheelDiameter"],
epr=config["encoderEPR"],
lports=config["leftMotorPorts"],
rports=config["rightMotorPorts"],
linverted=config["leftMotorsInverted"],
rinverted=config["rightMotorsInverted"],
lcontrollers=config["leftControllerTypes"],
rcontrollers=config["rightControllerTypes"],
lencoderinv=config["leftEncoderInverted"],
rencoderinv=config["rightEncoderInverted"],
gyro=config["gyroType"],
gyroport=config["gyroPort"],
)
elif projectType == "SparkMax_Brushed":
with resources.path(__name__, "templates") as path:
with open(
os.path.join(path, "SparkMax_Brushed", "Robot.java.mako"), "r"
) as template:
return Template(template.read()).render(
diam=config["wheelDiameter"],
epr=config["encoderEPR"],
gearing=config["gearing"],
lports=config["leftMotorPorts"],
rports=config["rightMotorPorts"],
linverted=config["leftMotorsInverted"],
rinverted=config["rightMotorsInverted"],
lencoderinv=config["leftEncoderInverted"],
rencoderinv=config["rightEncoderInverted"],
gyro=config["gyroType"],
gyroport=config["gyroPort"],
)
elif projectType == "SparkMax_Brushless":
with resources.path(__name__, "templates") as path:
with open(
os.path.join(path, "SparkMax_Brushless", "Robot.java.mako"), "r"
) as template:
return Template(template.read()).render(
diam=config["wheelDiameter"],
gearing=config["gearing"],
lports=config["leftMotorPorts"],
rports=config["rightMotorPorts"],
linverted=config["leftMotorsInverted"],
rinverted=config["rightMotorsInverted"],
gyro=config["gyroType"],
gyroport=config["gyroPort"],
)
def genBuildGradle(projectType, team):
with resources.path(__name__, "templates") as path:
with open(
os.path.join(path, projectType, "build.gradle.mako"), "r"
) as template:
return Template(template.read()).render(team=team) | frc_characterization/drive_characterization/__init__.py | from mako.template import Template
import os
import importlib.resources as resources
def genRobotCode(projectType, config):
if projectType == "Simple":
with resources.path(__name__, "templates") as path:
with open(os.path.join(path, "Simple", "Robot.java.mako"), "r") as template:
return Template(template.read()).render(
diam=config["wheelDiameter"],
epr=config["encoderEPR"],
lports=config["leftMotorPorts"],
rports=config["rightMotorPorts"],
linverted=config["leftMotorsInverted"],
rinverted=config["rightMotorsInverted"],
lcontrollers=config["leftControllerTypes"],
rcontrollers=config["rightControllerTypes"],
lencoderports=config["leftEncoderPorts"],
rencoderports=config["rightEncoderPorts"],
lencoderinv=config["leftEncoderInverted"],
rencoderinv=config["rightEncoderInverted"],
gyro=config["gyroType"],
gyroport=config["gyroPort"],
)
elif projectType == "Talon":
with resources.path(__name__, "templates") as path:
with open(os.path.join(path, "Talon", "Robot.java.mako"), "r") as template:
return Template(template.read()).render(
diam=config["wheelDiameter"],
epr=config["encoderEPR"],
lports=config["leftMotorPorts"],
rports=config["rightMotorPorts"],
linverted=config["leftMotorsInverted"],
rinverted=config["rightMotorsInverted"],
lcontrollers=config["leftControllerTypes"],
rcontrollers=config["rightControllerTypes"],
lencoderinv=config["leftEncoderInverted"],
rencoderinv=config["rightEncoderInverted"],
gyro=config["gyroType"],
gyroport=config["gyroPort"],
)
elif projectType == "SparkMax_Brushed":
with resources.path(__name__, "templates") as path:
with open(
os.path.join(path, "SparkMax_Brushed", "Robot.java.mako"), "r"
) as template:
return Template(template.read()).render(
diam=config["wheelDiameter"],
epr=config["encoderEPR"],
gearing=config["gearing"],
lports=config["leftMotorPorts"],
rports=config["rightMotorPorts"],
linverted=config["leftMotorsInverted"],
rinverted=config["rightMotorsInverted"],
lencoderinv=config["leftEncoderInverted"],
rencoderinv=config["rightEncoderInverted"],
gyro=config["gyroType"],
gyroport=config["gyroPort"],
)
elif projectType == "SparkMax_Brushless":
with resources.path(__name__, "templates") as path:
with open(
os.path.join(path, "SparkMax_Brushless", "Robot.java.mako"), "r"
) as template:
return Template(template.read()).render(
diam=config["wheelDiameter"],
gearing=config["gearing"],
lports=config["leftMotorPorts"],
rports=config["rightMotorPorts"],
linverted=config["leftMotorsInverted"],
rinverted=config["rightMotorsInverted"],
gyro=config["gyroType"],
gyroport=config["gyroPort"],
)
def genBuildGradle(projectType, team):
with resources.path(__name__, "templates") as path:
with open(
os.path.join(path, projectType, "build.gradle.mako"), "r"
) as template:
return Template(template.read()).render(team=team) | 0.509032 | 0.12692 |
from __future__ import unicode_literals
import random
import pyecharts.echarts as option
base_fs = [] # _config_components() 方法中被调用
other_fs = [] # add() 方法中被调用
SYMBOLS = ("rect", "roundRect", "triangle", "diamond", "pin", "arrow")
def collect_other_func(func):
other_fs.append(func)
return func
def collect_base_func(func):
base_fs.append(func)
return func
@collect_base_func
def datazoom(
is_datazoom_show=False,
datazoom_type="slider",
datazoom_range=None,
datazoom_orient="horizontal",
datazoom_xaxis_index=None,
datazoom_yaxis_index=None,
**kwargs
):
"""
dataZoom 组件 用于区域缩放,从而能自由关注细节的数据信息,或者概览数据整
体,或者去除离群点的影响。
:param is_datazoom_show:
是否使用区域缩放组件,默认为 False
:param datazoom_type:
区域缩放组件类型,默认为'slider',有'slider', 'inside', 'both'可选
:param datazoom_range:
区域缩放的范围,默认为[50, 100]
:param datazoom_orient:
datazoom 组件在直角坐标系中的方向,默认为 'horizontal',效果显示在 x 轴。
如若设置为 'vertical' 的话效果显示在 y 轴。
:param datazoom_xaxis_index:
datazoom 组件控制的 x 轴索引
默认控制第一个 x 轴,如没特殊需求无须显示指定。单个为 int 类型而控制多个为 list
类型,如 [0, 1] 表示控制第一个和第二个 x 轴。
:param datazoom_yaxis_index:
datazoom 组件控制的 y 轴索引
默认控制第一个 y 轴,如没特殊需求无须显示指定。单个为 int 类型而控制多个为 list
类型,如 [0, 1] 表示控制第一个和第二个 x 轴。
:param kwargs:
"""
_min, _max = 50, 100
if datazoom_range:
if len(datazoom_range) == 2:
_min, _max = datazoom_range
if datazoom_type not in ("slider", "inside", "both"):
datazoom_type = "slider"
_datazoom = []
_datazoom_config = {
"show": is_datazoom_show,
"type": "slider",
"start": _min,
"end": _max,
"orient": datazoom_orient,
"xAxisIndex": datazoom_xaxis_index,
"yAxisIndex": datazoom_yaxis_index,
}
if datazoom_type == "both":
_datazoom.append(_datazoom_config.copy())
datazoom_type = "inside"
_datazoom_config["type"] = datazoom_type
_datazoom.append(_datazoom_config)
return _datazoom
@collect_base_func
def datazoom_extra(
is_datazoom_extra_show=False,
datazoom_extra_type="slider",
datazoom_extra_range=None,
datazoom_extra_orient="vertical",
datazoom_extra_xaxis_index=None,
datazoom_extra_yaxis_index=None,
**kwargs
):
"""
额外的 dataZoom 条,直接 X/Y 轴同时使用 dataZoom 效果
"""
if is_datazoom_extra_show:
return datazoom(
is_datazoom_show=True,
datazoom_type=datazoom_extra_type,
datazoom_range=datazoom_extra_range,
datazoom_orient=datazoom_extra_orient,
datazoom_xaxis_index=datazoom_extra_xaxis_index,
datazoom_yaxis_index=datazoom_extra_yaxis_index,
)
@collect_base_func
def color(colorlst=None, is_random=False, label_color=None, **kwargs):
"""
:param colorlst:
全局颜色列表
:param is_random:
指定是否随机打乱全局颜色列表
:param label_color:
追加的颜色列表
:param kwargs:
"""
if colorlst is None:
colorlst = []
if label_color:
for color in reversed(list(label_color)):
colorlst.insert(0, color)
if is_random:
random.shuffle(colorlst)
return colorlst
@collect_base_func
def tooltip(**kwargs):
return option.Tooltip(**kwargs)
@collect_base_func
def legend(
is_legend_show=True,
legend_orient="horizontal",
legend_pos="center",
legend_top="top",
legend_selectedmode="multiple",
legend_text_size=12,
legend_text_color=None,
**kwargs
):
"""
图例组件。图例组件展现了不同系列的标记(symbol),颜色和名字。可以通过点击图例
控制哪些系列不显示。
:param is_legend_show:
是否显示顶端图例,默认为 True
:param legend_orient:
图例列表的布局朝向,默认为'horizontal',有'horizontal', 'vertical'可选
:param legend_pos:
图例组件离容器左侧的距离,默认为'center',有'left', 'center', 'right'可选,
也可以为百分数,如 "%60"
:param legend_top:
图例组件离容器上侧的距离,默认为'top',有'top', 'center', 'bottom'可选,
也可以为百分数,如 "%60"
:param legend_selectedmode:
图例选择的模式,控制是否可以通过点击图例改变系列的显示状态。默认为'multiple',
可以设成 'single' 或者 'multiple' 使用单选或者多选模式。
也可以设置为 False 关闭显示状态。
:param legend_text_size:
图例名称字体大小
:param legend_text_color:
图例名称字体颜色
:param kwargs:
"""
_legend = {
"selectedMode": legend_selectedmode,
"show": is_legend_show,
"left": legend_pos,
"top": legend_top,
"orient": legend_orient,
"textStyle": {
"fontSize": legend_text_size,
"color": legend_text_color,
},
}
return _legend
@collect_base_func
def visual_map(
visual_type="color",
visual_range=None,
visual_text_color=None,
visual_range_text=None,
visual_range_color=None,
visual_range_size=None,
visual_orient="vertical",
visual_pos="left",
visual_top="bottom",
visual_split_number=5,
visual_dimension=None,
is_calculable=True,
is_piecewise=False,
pieces=None,
**kwargs
):
"""
是视觉映射组件,用于进行『视觉编码』,也就是将数据映射到视觉元素(视觉通道)
:param visual_type:
制定组件映射方式,默认为'color‘,即通过颜色来映射数值。有'color', 'size'可选。
'size'通过数值点的大小,也就是图形点的大小来映射数值。
:param visual_range:
指定组件的允许的最小值与最大值。默认为 [0, 100]
:param visual_text_color:
两端文本颜色。
:param visual_range_text:
两端文本。默认为 ['low', 'hight']
:param visual_range_size:
数值映射的范围,也就是图形点大小的范围。默认为 [20, 50]
:param visual_range_color:
过渡颜色。默认为 ['#50a3ba', '#eac763', '#d94e5d']
:param visual_orient:
visualMap 组件条的方向,默认为'vertical',有'vertical', 'horizontal'可选。
:param visual_pos:
visualmap 组件条距离左侧的位置,默认为'left'。有'right', 'center',
'right'可选,也可为百分数或整数。
:param visual_top:
visualmap 组件条距离顶部的位置,默认为'top'。有'top', 'center',
'bottom'可选,也可为百分数或整数。
:param visual_split_number:
分段型中分割的段数,在设置为分段型时生效。默认分为 5 段。
:param visual_dimension:
指定用数据的『哪个维度』,映射到视觉元素上。默认映射到最后一个维度。索引从 0 开始。
在直角坐标系中,x 轴为第一个维度(0),y 轴为第二个维度(1)。
:param is_calculable:
是否显示拖拽用的手柄(手柄能拖拽调整选中范围)。默认为 True
:param is_piecewise:
是否将组件转换为分段型(默认为连续型),默认为 False
:param pieces:
自定义『分段式视觉映射组件(visualMapPiecewise)』的每一段的范围,
以及每一段的文字,以及每一段的特别的样式(仅在 is_piecewise 为 True
时生效)。例如:
pieces: [
{min: 1500}, // 不指定 max,表示 max 为无限大(Infinity)。
{min: 900, max: 1500},
{min: 310, max: 1000},
{min: 200, max: 300},
{min: 10, max: 200, label: '10 到 200(自定义label)'},
// 表示 value 等于 123 的情况。
{value: 123, label: '123(自定义特殊颜色)', color: 'grey'}
{max: 5} // 不指定 min,表示 min 为无限大(-Infinity)。
]
:param kwargs:
"""
_min, _max = 0, 100
if visual_range:
if len(visual_range) == 2:
_min, _max = visual_range
_tlow, _thigh = "low", "high"
if visual_range_text:
if len(visual_range_text) == 2:
_tlow, _thigh = visual_range_text
_inrange_op = {}
if visual_type == "color":
range_color = ["#50a3ba", "#eac763", "#d94e5d"]
if visual_range_color:
if len(visual_range_color) >= 2:
range_color = visual_range_color
_inrange_op.update(color=range_color)
if visual_type == "size":
range_size = [20, 50]
if visual_range_size:
if len(visual_range_size) >= 2:
range_size = visual_range_size
_inrange_op.update(symbolSize=range_size)
_type = "piecewise" if is_piecewise else "continuous"
_visual_map = {
"type": _type,
"min": _min,
"max": _max,
"text": [_thigh, _tlow],
"textStyle": {"color": visual_text_color},
"inRange": _inrange_op,
"calculable": is_calculable,
"splitNumber": visual_split_number,
"dimension": visual_dimension,
"orient": visual_orient,
"left": visual_pos,
"top": visual_top,
"showLabel": True,
}
if is_piecewise:
_visual_map.update(pieces=pieces)
return _visual_map
@collect_other_func
def label(
type=None,
is_label_show=False,
is_label_emphasis=True,
label_pos=None,
label_text_color=None,
label_text_size=12,
label_formatter=None,
label_emphasis_pos=None,
label_emphasis_textcolor=None,
label_emphasis_textsize=12,
**kwargs
):
"""
图形上的文本标签,可用于说明图形的一些数据信息,比如值,名称等。
:param type:
图形类型
:param is_label_emphasis:
是否高亮显示标签,默认显示。高亮标签即选中数据时显示的信息项。
:param is_label_show:
是否正常显示标签,默认不显示。标签即各点的数据项信息
:param label_pos:
标签的位置,Bar 图默认为'top'。有'top', 'left', 'right', 'bottom',
'inside', 'outside'可选
:param label_text_color:
标签字体颜色,默认为 "#000"
:param label_text_size:
标签字体大小,默认为 12
:param label_formatter:
模板变量有 {a}, {b},{c},{d},{e},分别表示系列名,数据名,数据值等。
使用示例,如 label_formatter='{a}'
在 trigger 为 'axis' 的时候,会有多个系列的数据,此时可以通过 {a0}, {a1}, {a2}
这种后面加索引的方式表示系列的索引。不同图表类型下的 {a},{b},{c},{d} 含义不一样。
其中变量 {a}, {b}, {c}, {d} 在不同图表类型下代表数据含义为:
折线(区域)图、柱状(条形)图、K线图 :
{a}(系列名称),{b}(类目值),{c}(数值), {d}(无)
散点图(气泡)图 :
{a}(系列名称),{b}(数据名称),{c}(数值数组), {d}(无)
地图 :
{a}(系列名称),{b}(区域名称),{c}(合并数值), {d}(无)
饼图、仪表盘、漏斗图:
{a}(系列名称),{b}(数据项名称),{c}(数值), {d}(百分比)
:param label_emphasis_pos:
高亮标签的位置,Bar 图默认为'top'。有'top', 'left', 'right', 'bottom',
'inside', 'outside'可选
:param label_emphasis_textcolor:
高亮标签字体颜色,默认为 "#fff"
:param label_emphasis_textsize:
高亮标签字体大小,默认为 12
:param kwargs:
"""
_label = {
"normal": option.NormalLabel(
visibility=is_label_show,
position=label_pos,
text_color=label_text_color,
text_size=label_text_size,
formatter=label_formatter,
chart_type=type,
),
"emphasis": option.EmphasisLabel(
visibility=is_label_emphasis,
position=label_emphasis_pos,
text_color=label_emphasis_textcolor,
text_size=label_emphasis_textsize,
chart_type=type,
),
}
return _label
@collect_other_func
def line_style(
type=None,
line_width=1,
line_opacity=1,
line_curve=0,
line_type="solid",
line_color=None,
**kwargs
):
"""
带线图形的线的风格选项
:param type:
图形类型
:param line_width:
线的宽度,默认为 1
:param line_opacity:
线的透明度,0 为完全透明,1 为完全不透明。默认为 1
:param line_curve:
线的弯曲程度,0 为完全不弯曲,1 为最弯曲。默认为 0
:param line_type:
线的类型,有'solid', 'dashed', 'dotted'可选。默认为'solid'
:param line_color:
线的颜色
:param kwargs:
"""
if line_color is None and type == "graph":
line_color = "#aaa"
_line_style = {
"normal": option.Line(
width=line_width,
opacity=line_opacity,
curve=line_curve,
line_type=line_type,
color=line_color,
chart_type=type,
)
}
return _line_style
@collect_other_func
def split_line(is_splitline_show=True, **kwargs):
"""
:param is_splitline_show:
指定是否显示分割线
:param kwargs:
"""
_split_line = {
"show": is_splitline_show,
"lineStyle": line_style(**kwargs),
}
return _split_line
@collect_other_func
def axis_line(is_axisline_show=True, **kwargs):
"""
:param is_axisline_show:
指定是否显示坐标轴线
:param kwargs:
"""
_axis_line = {"show": is_axisline_show, "lineStyle": line_style(**kwargs)}
return _axis_line
@collect_other_func
def split_area(is_area_show=True, **kwargs):
"""
:param is_area_show:
指定是否显示标记区域
:param kwargs:
"""
_split_area = {"show": is_area_show, "areaStyle": area_style(**kwargs)}
return _split_area
@collect_other_func
def area_style(flag=False, area_opacity=None, area_color=None, **kwargs):
"""
:param flag:
判断符
:param area_opacity:
区域透明度。支持从 0 到 1 的数字,为 0 时不绘制该图形。
:param area_color:
填充的颜色
:param kwargs:
"""
if area_opacity is None:
area_opacity = 0 if flag else 1
_area_style = {"opacity": area_opacity, "color": area_color}
return _area_style
@collect_other_func
def xy_axis(
type=None,
x_axis=None,
xaxis_margin=8,
xaxis_name_size=14,
xaxis_name_gap=25,
xaxis_name="",
xaxis_name_pos="middle",
xaxis_rotate=0,
xaxis_min=None,
xaxis_max=None,
xaxis_type=None,
xaxis_interval="auto",
xaxis_force_interval=None,
xaxis_pos=None,
xaxis_label_textsize=12,
xaxis_label_textcolor=None,
xaxis_formatter=None,
xaxis_line_color=None,
xaxis_line_width=1,
yaxis_margin=8,
yaxis_name_size=14,
yaxis_name_gap=25,
yaxis_name="",
yaxis_name_pos="middle",
yaxis_rotate=0,
yaxis_min=None,
yaxis_max=None,
yaxis_type=None,
yaxis_interval="auto",
yaxis_force_interval=None,
yaxis_pos=None,
yaxis_label_textsize=12,
yaxis_label_textcolor=None,
yaxis_formatter="",
yaxis_line_color=None,
yaxis_line_width=1,
is_convert=False,
is_xaxis_inverse=False,
is_yaxis_inverse=False,
is_xaxislabel_align=False,
is_yaxislabel_align=False,
is_xaxis_boundarygap=True,
is_yaxis_boundarygap=True,
is_xaxis_show=True,
is_yaxis_show=True,
is_splitline_show=True,
**kwargs
):
"""
直角坐标系中的 x、y 轴(Line、Bar、Scatter、EffectScatter、Kline)。
:param type:
图形类型。
:param x_axis:
x 轴数据项。
:param xaxis_margin:
x 轴刻度标签与轴线之间的距离。默认为 8
:param xaxis_name_size:
x 轴名称体大小,默认为 14
:param xaxis_name_gap:
x 轴名称与轴线之间的距离,默认为 25
:param xaxis_name:
x 轴名称
:param xaxis_name_pos:
x 轴名称位置,有'start','middle','end'可选
:param xaxis_rotate:
x 轴刻度标签旋转的角度,在类目轴的类目标签显示不下的时候可以通过旋转防止标
签之间重叠。默认为 0,即不旋转。旋转的角度从 -90 度到 90 度。
:param xaxis_min:
x 坐标轴刻度最小值,默认为自适应。使用特殊值 "dataMin" 可自定以数
据中最小值为 x 轴最小值。
:param xaxis_max:
x 坐标轴刻度最大值,默认为自适应。使用特殊值 "dataMax" 可自定以数
据中最小值为 x 轴最大值。
:param xaxis_type:
x 坐标轴类型
'value':数值轴,适用于连续数据。
'category':类目轴,适用于离散的类目数据。
'log':对数轴。适用于对数数据。
:param xaxis_interval:
x 轴刻度标签的显示间隔,在类目轴中有效。默认会采用标签不重叠的策略间隔显示标签。
设置成 0 强制显示所有标签。设置为 1,表示『隔一个标签显示一个标签』,
如果值为 2,表示隔两个标签显示一个标签,以此类推
:param xaxis_force_interval:
强制设置 x 坐标轴分割间隔。如设置为 50 则刻度为 [0, 50, 150, ...],设置为 "auto"
则只显示两个刻度。一般情况下不建议设置这个参数!!
因为 splitNumber 是预估的值,实际根据策略计算出来的刻度可能无法达到想要的效果,
这时候可以使用 interval 配合 min、max 强制设定刻度划分。在类目轴中无效。
:param xaxis_pos:
x 坐标轴位置,有'top','bottom'可选
:param xaxis_label_textsize:
x 坐标轴标签字体大小
:param xaxis_label_textcolor:
x 坐标轴标签字体颜色
:param xaxis_formatter:
x 轴标签格式器,如 '天',则 x 轴的标签为数据加'天'(3 天,4 天),默认为 ""
xaxis_formatter -> function
```python
def label_formatter(params):
return params.value + ' [Good!]'
```
回调函数格式,更多内容请参考 [高级用法篇](zh-cn/advanced)
```
(params: Object|Array) => string
参数 params 是 formatter 需要的单个数据集。格式如下:
{
componentType: 'series',
// 系列类型
seriesType: string,
// 系列在传入的 option.series 中的 index
seriesIndex: number,
// 系列名称
seriesName: string,
// 数据名,类目名
name: string,
// 数据在传入的 data 数组中的 index
dataIndex: number,
// 传入的原始数据项
data: Object,
// 传入的数据值
value: number|Array,
// 数据图形的颜色
color: string,
}
```
:param xaxis_line_color:
x 坐标轴线线的颜色,默认为 None
:param xaxis_line_width:
x 坐标轴线线的宽度,默认为 1
:param yaxis_margin:
y 轴刻度标签与轴线之间的距离。默认为 8
:param yaxis_name_size:
y 轴名称体大小,默认为 14
:param yaxis_name_gap:
y 轴名称与轴线之间的距离,默认为 25
:param yaxis_name:
y 轴名称
:param yaxis_name_pos:
y 轴名称位置,有'start', 'middle','end'可选
:param yaxis_rotate:
y 轴刻度标签旋转的角度,在类目轴的类目标签显示不下的时候可以通过旋转防止标
签之间重叠。默认为 0,即不旋转。旋转的角度从 -90 度到 90 度。
:param yaxis_min:
y 坐标轴刻度最小值,默认为自适应。使用特殊值 "dataMin" 可自定以数
据中最小值为 y 轴最小值。
:param yaxis_max:
y 坐标轴刻度最大值,默认为自适应。使用特殊值 "dataMax" 可自定以数
据中最小值为 y 轴最大值。
:param yaxis_type:
y 坐标轴类型
'value':数值轴,适用于连续数据。
'category':类目轴,适用于离散的类目数据。
'log':对数轴。适用于对数数据。
:param yaxis_interval:
y 轴刻度标签的显示间隔,在类目轴中有效。默认会采用标签不重叠的策略间隔显示标签。
设置成 0 强制显示所有标签。设置为 1,表示『隔一个标签显示一个标签』,
如果值为 2,表示隔两个标签显示一个标签,以此类推
:param yaxis_force_interval:
强制设置 y 坐标轴分割间隔。如设置为 50 则刻度为 [0, 50, 150, ...],设置为 "auto"
则只显示两个刻度。一般情况下不建议设置这个参数!!
因为 splitNumber 是预估的值,实际根据策略计算出来的刻度可能无法达到想要的效果,
这时候可以使用 interval 配合 min、max 强制设定刻度划分。在类目轴中无效。
:param yaxis_pos:
y 坐标轴位置,有'left','right'可选
:param yaxis_label_textsize:
y 坐标轴标签字体大小
:param yaxis_label_textcolor:
y 坐标轴标签字体颜色
:param yaxis_formatter:
y 轴标签格式器,如 '天',则 y 轴的标签为数据加'天'(3 天,4 天),默认为 ""
yaxis_formatter -> function
```python
def label_formatter(params):
return params.value + ' [Good!]'
```
回调函数格式,更多内容请参考 [高级用法篇](zh-cn/advanced)
```
(params: Object|Array) => string
参数 params 是 formatter 需要的单个数据集。格式如下:
{
componentType: 'series',
// 系列类型
seriesType: string,
// 系列在传入的 option.series 中的 index
seriesIndex: number,
// 系列名称
seriesName: string,
// 数据名,类目名
name: string,
// 数据在传入的 data 数组中的 index
dataIndex: number,
// 传入的原始数据项
data: Object,
// 传入的数据值
value: number|Array,
// 数据图形的颜色
color: string,
}
``
:param yaxis_line_color:
y 坐标轴线线的颜色,默认为 None
:param yaxis_line_width:
y 坐标轴线线的宽度,默认为 1
:param is_convert:
是否交换 x 轴与 y 轴
:param is_xaxis_inverse:
是否反向 x 坐标轴,默认为 False
:param is_yaxis_inverse:
是否反向 y 坐标轴,默认为 False
:param is_xaxislabel_align:
x 轴刻度线和标签是否对齐,默认为 False
:param is_yaxislabel_align:
y 轴刻度线和标签是否对齐,默认为 False
:param is_xaxis_boundarygap:
x 轴两边留白策略,适用于类目轴。类目轴中 boundaryGap 可以配置为 True 和 False。
默认为 True,这时候刻度只是作为分隔线,标签和数据点都会在两个刻度之间的带(band)
中间,即两边留白。
:param is_yaxis_boundarygap:
y 轴两边留白策略,适用于类目轴。类目轴中 boundaryGap 可以配置为 True 和 False。
默认为 True,这时候刻度只是作为分隔线,标签和数据点都会在两个刻度之间的带(band)
中间,即两边留白。
:param is_xaxis_show:
是否显示 x 轴
:param is_yaxis_show:
是否显示 y 轴
:param is_splitline_show:
是否显示 y 轴网格线,默认为 True。
:param kwargs:
"""
_xAxis = option.XAxis(
name=xaxis_name,
visibility=is_xaxis_show,
name_location=xaxis_name_pos,
name_gap=xaxis_name_gap,
name_size=xaxis_name_size,
position=xaxis_pos,
boundary_gap=is_xaxis_boundarygap,
label_alignment=is_xaxislabel_align,
inverse=is_xaxis_inverse,
value_range=[xaxis_min, xaxis_max],
axis_type=xaxis_type,
axis_line_color=xaxis_line_color,
axis_line_width=xaxis_line_width,
chart_type=type,
)
_xAxis["axisLabel"] = option.XAxisLabel(
interval=xaxis_interval,
rotate=xaxis_rotate,
margin=xaxis_margin,
text_size=xaxis_label_textsize,
text_color=xaxis_label_textcolor,
formatter=xaxis_formatter,
)
_yAxis = option.YAxis(
name=yaxis_name,
visibility=is_yaxis_show,
name_location=yaxis_name_pos,
name_gap=yaxis_name_gap,
name_size=yaxis_name_size,
position=yaxis_pos,
boundary_gap=is_yaxis_boundarygap,
label_alignment=is_yaxislabel_align,
inverse=is_yaxis_inverse,
value_range=[yaxis_min, yaxis_max],
split_line=is_splitline_show,
axis_type=yaxis_type,
axis_line_color=yaxis_line_color,
axis_line_width=yaxis_line_width,
chart_type=type,
)
_yAxis["axisLabel"] = option.YAxisLabel(
interval=yaxis_interval,
rotate=yaxis_rotate,
margin=yaxis_margin,
text_size=yaxis_label_textsize,
text_color=yaxis_label_textcolor,
formatter=yaxis_formatter,
)
if is_convert:
xaxis_type, yaxis_type = _yAxis["type"], _xAxis["type"]
_xAxis["type"] = xaxis_type
_yAxis.update(data=x_axis, type=yaxis_type)
else:
_xAxis["data"] = x_axis
# 强制分割数值轴,在多 x、y 轴中可以使用强制分割使标刻线对齐
if xaxis_force_interval is not None:
_xAxis["interval"] = xaxis_force_interval
if yaxis_force_interval is not None:
_yAxis["interval"] = yaxis_force_interval
# 返回字典
return [_xAxis], [_yAxis]
def _mark(
data,
mark_point_raw=None,
mark_point_symbol="pin",
mark_point_symbolsize=50,
mark_point_textcolor="#fff",
mark_line_raw=None,
mark_line_symbolsize=10,
mark_line_valuedim="",
mark_line_coords=None,
mark_point_valuedim="",
_is_markline=False,
**kwargs
):
"""
图形标记组件,用于标记指定的特殊数据,有标记线和标记点两种
:param data:
标记点
默认有'min', 'max', 'average'可选。支持自定义标记点,具体使用如下
[
{
"coord": [a1, b1],
"name": "first markpoint"
},
{
"coord": [a2, b2],
"name": "second markpoint"
}
]
需自己传入标记点字典,共有两个键值对,'coord' 对应为 x y 轴坐标,'name' 为标记点名称
标记线
只支持默认的 'min', 'max', 'average'
:param mark_point_raw:
原生格式的 markPoint 数据,数据类型为 [{}, {}, ...]。
格式请参考 http://echarts.baidu.com/option.html#series-line.markPoint.data
:param mark_point_symbol:
标记点图形,,默认为'pin',有'circle', 'rect', 'roundRect', 'triangle',
'diamond', 'pin', 'arrow'可选
:param mark_point_symbolsize:
标记点图形大小,默认为 50
:param mark_point_textcolor:
标记点字体颜色,默认为'#fff'
:param mark_line_raw:
原生格式的 markLine 数据,数据类型为 [{}, {}, ...]。
格式请参考 http://echarts.baidu.com/option.html#series-line.markLine.data
:param mark_line_symbolsize:
标记线图形大小,默认为 15
:param mark_line_valuedim:
标记线指定在哪个维度上指定最大值最小值。这可以是维度的直接名称,Line 时可以
是 x、angle 等、Kline 图时可以是 open、close、highest、lowest。
可同时制定多个维度,如
mark_line=['min', 'max'], mark_line_valuedim=['lowest', 'highest']
则表示 min 使用 lowest 维度,max 使用 highest 维度,以此类推
:param mark_line_coords:
标记线指定起点坐标和终点坐标,如 [[10, 10], [30, 30]],两个点分别为横
纵坐标轴点。
:param mark_point_valuedim:
标记点指定在哪个维度上指定最大值最小值。这可以是维度的直接名称,Line 时可以
是 x、angle 等、Kline 图时可以是 open、close、highest、lowest。
可同时制定多个维度,如
mark_point=['min', 'max'], mark_point_valuedim=['lowest', 'highest']
则表示 min 使用 lowest 维度,max 使用 highest 维度,以此类推
:param _is_markline:
指定是否为 markline
"""
if _is_markline:
if mark_line_raw:
return {"data": mark_line_raw}
else:
if mark_point_raw:
return {"data": mark_point_raw}
mark = {"data": []}
if data:
_markpv = _marklv = [None for _ in range(len(data))]
_markpv[: len(mark_point_valuedim)] = mark_point_valuedim
_marklv[: len(mark_line_valuedim)] = mark_line_valuedim
for index, d in enumerate(list(data)):
# 自定义坐标点数据
if isinstance(d, dict):
_coord = d.get("coord", None)
_pname = d.get("name", None)
_marktmp = {
"coord": _coord,
"value": _coord[1],
"name": _pname,
"symbol": mark_point_symbol,
"symbolSize": mark_point_symbolsize,
"label": {
"normal": {
"textStyle": {"color": mark_point_textcolor}
}
},
}
mark.get("data").append(_marktmp)
else:
_type, _name = "", ""
if "max" in d:
_type, _name = "max", "Maximum"
elif "min" in d:
_type, _name = "min", "Minimum"
elif "average" in d:
_type, _name = "average", "mean-Value"
if _is_markline:
_marktmp = {
"type": _type,
"name": _name,
"valueDim": _marklv[index],
}
if _type:
mark.get("data").append(_marktmp)
mark.update(symbolSize=mark_line_symbolsize)
else:
_marktmp = {
"type": _type,
"name": _name,
"valueDim": _markpv[index],
}
_marktmp.update(
symbol=mark_point_symbol,
symbolSize=mark_point_symbolsize,
label={
"normal": {
"textStyle": {"color": mark_point_textcolor}
}
},
)
if _type:
mark.get("data").append(_marktmp)
if mark_line_coords and len(mark_line_coords) == 2:
return {
"data": [
[
{"coord": mark_line_coords[0]},
{"coord": mark_line_coords[1]},
]
]
}
return mark
@collect_other_func
def mark_point(mark_point=None, **kwargs):
"""
标记点配置项
:param mark_point:
默认有'min', 'max', 'average'可选。支持自定义标记点,具体使用如下
[
{
"coord": [a1, b1],
"name": "first markpoint"
},
{
"coord": [a2, b2],
"name": "second markpoint"
}
]
需自己传入标记点字典,共有两个键值对,'coord' 对应为 x y 轴坐标,'name' 为标记点名称
:param kwargs:
"""
return _mark(mark_point, **kwargs)
@collect_other_func
def mark_line(mark_line=None, **kwargs):
""" 标记线配置项
:param mark_line:
只支持默认的 'min', 'max', 'average'
:param kwargs:
"""
return _mark(mark_line, _is_markline=True, **kwargs)
@collect_other_func
def symbol(type=None, symbol="", **kwargs):
"""
:param symbol:
标记类型, 有'rect', 'roundRect', 'triangle', 'diamond',
'pin', 'arrow'可选
:param kwargs:
"""
if symbol is None: # Radar
symbol = "none"
elif type == "line" and symbol == "": # Line
symbol = "emptyCircle"
elif symbol not in SYMBOLS:
symbol = "circle"
return symbol
@collect_other_func
def effect(
effect_brushtype="stroke", effect_scale=2.5, effect_period=4, **kwargs
):
"""
涟漪动画配置项
:param effect_brushtype:
波纹绘制方式,有'stroke', 'fill'可选。默认为'stroke'
:param effect_scale:
动画中波纹的最大缩放比例。默认为 2.5
:param effect_period:
动画持续的时间。默认为 4(s)
:param kwargs:
"""
_effect = {
"brushType": effect_brushtype,
"scale": effect_scale,
"period": effect_period,
}
return _effect
@collect_other_func
def grid(
grid_width=None,
grid_height=None,
grid_top=None,
grid_bottom=None,
grid_left=None,
grid_right=None,
**kwargs
):
"""
Grid 类组件配置项
:param grid_width:
grid 组件的宽度。默认自适应。
:param grid_height:
grid 组件的高度。默认自适应。
:param grid_top:
grid 组件离容器顶部的距离。默认为 None, 有'top', 'center', 'middle'可选,
也可以为百分数或者整数
:param grid_bottom:
grid 组件离容器底部的距离。默认为 None, 有'top', 'center', 'middle'可选,
也可以为百分数或者整数
:param grid_left:
grid 组件离容器左侧的距离。默认为 None, 有'left', 'center', 'right'可选,
也可以为百分数或者整数
:param grid_right:
grid 组件离容器右侧的距离。默认为 None, 有'left', 'center', 'right'可选
也可以为百分数或者整数
"""
_grid = {}
if grid_width is not None:
_grid.update(width=grid_width)
if grid_height is not None:
_grid.update(height=grid_height)
if grid_top is not None:
_grid.update(top=grid_top)
if grid_bottom is not None:
_grid.update(bottom=grid_bottom)
if grid_left is not None:
_grid.update(left=grid_left)
if grid_right is not None:
_grid.update(right=grid_right)
return _grid
@collect_other_func
def grid3D(
grid3d_width=100,
grid3d_height=100,
grid3d_depth=100,
grid3d_rotate_speed=10,
grid3d_rotate_sensitivity=1,
is_grid3d_rotate=False,
**kwargs
):
"""
3D 笛卡尔坐标系组配置项,适用于 3D 图形。
:param grid3d_width:
三维笛卡尔坐标系组件在三维场景中的高度。默认为 100
:param grid3d_height:
三维笛卡尔坐标系组件在三维场景中的高度。默认为 100
:param grid3d_depth:
三维笛卡尔坐标系组件在三维场景中的高度。默认为 100
:param grid3d_rotate_speed:
物体自传的速度。单位为角度 / 秒,默认为 10 ,也就是 36 秒转一圈。
:param is_grid3d_rotate:
是否开启视角绕物体的自动旋转查看。默认为 False
:param grid3d_rotate_sensitivity:
旋转操作的灵敏度,值越大越灵敏。默认为 1, 设置为 0 后无法旋转。
:param kwargs:
"""
_grid3D = {
"boxWidth": grid3d_width,
"boxHeight": grid3d_height,
"boxDepth": grid3d_depth,
"viewControl": {
"autoRotate": is_grid3d_rotate,
"autoRotateSpeed": grid3d_rotate_speed,
"rotateSensitivity": grid3d_rotate_sensitivity,
},
}
return _grid3D
@collect_other_func
def xaxis3D(
xaxis3d_type=None,
xaxis3d_name="",
xaxis3d_name_size=16,
xaxis3d_name_gap=20,
xaxis3d_min=None,
xaxis3d_max=None,
xaxis3d_interval="auto",
xaxis3d_margin=8,
**kwargs
):
"""
3D x 轴配置项
:param xaxis3d_type:
3D x 轴类型
:param xaxis3d_name:
x 轴名称,默认为 ""
:param xaxis3d_name_size:
x 轴名称体大小,默认为 16
:param xaxis3d_name_gap:
x 轴名称与轴线之间的距离,默认为 25
:param xaxis3d_min:
x 坐标轴刻度最小值,默认为自适应。
:param xaxis3d_max:
x 坐标轴刻度最大值,默认为自适应。
:param xaxis3d_interval:
x 轴刻度标签的显示间隔,在类目轴中有效。默认会采用标签不重叠的策略间隔显示标签。
设置成 0 强制显示所有标签。设置为 1,表示『隔一个标签显示一个标签』,如果值
为 2,表示隔两个标签显示一个标签,以此类推
:param xaxis3d_margin:
x 轴刻度标签与轴线之间的距离。默认为 8
"""
_xaxis3D = {
"name": xaxis3d_name,
"nameGap": xaxis3d_name_gap,
"nameTextStyle": {"fontSize": xaxis3d_name_size},
"type": xaxis3d_type,
"min": xaxis3d_min,
"max": xaxis3d_max,
"axisLabel": {"margin": xaxis3d_margin, "interval": xaxis3d_interval},
}
return _xaxis3D
@collect_other_func
def yaxis3D(
yaxis3d_type=None,
yaxis3d_name="",
yaxis3d_name_size=16,
yaxis3d_name_gap=20,
yaxis3d_min=None,
yaxis3d_max=None,
yaxis3d_interval="auto",
yaxis3d_margin=8,
**kwargs
):
"""
3D y 轴配置项
:param yaxis3d_type:
3D x 轴类型
:param yaxis3d_name:
y 轴名称,默认为 ""
:param yaxis3d_name_size:
y 轴名称体大小,默认为 16
:param yaxis3d_name_gap:
y 轴名称与轴线之间的距离,默认为 25
:param yaxis3d_min:
y 坐标轴刻度最小值,默认为自适应。
:param yaxis3d_max:
y 坐标轴刻度最大值,默认为自适应。
:param yaxis3d_interval:
y 轴刻度标签的显示间隔,在类目轴中有效。默认会采用标签不重叠的策略间隔显示标签。
设置成 0 强制显示所有标签。设置为 1,表示『隔一个标签显示一个标签』,如果值
为 2,表示隔两个标签显示一个标签,以此类推
:param yaxis3d_margin:
y 轴刻度标签与轴线之间的距离。默认为 8
"""
_yaxis3D = {
"name": yaxis3d_name,
"nameGap": yaxis3d_name_gap,
"nameTextStyle": {"fontSize": yaxis3d_name_size},
"type": yaxis3d_type,
"min": yaxis3d_min,
"max": yaxis3d_max,
"axisLabel": {"margin": yaxis3d_margin, "interval": yaxis3d_interval},
}
return _yaxis3D
@collect_other_func
def zaxis3D(
zaxis3d_type=None,
zaxis3d_name="",
zaxis3d_name_size=16,
zaxis3d_name_gap=20,
zaxis3d_min=None,
zaxis3d_max=None,
zaxis3d_margin=8,
**kwargs
):
"""
3D y 轴配置项
:param zaxis3d_type:
3D y 轴类型
:param zaxis3d_name:
z 轴名称,默认为 ""
:param zaxis3d_name_size:
z 轴名称体大小,默认为 16
:param zaxis3d_name_gap:
z 轴名称与轴线之间的距离,默认为 25
:param zaxis3d_min:
z 坐标轴刻度最小值,默认为自适应。
:param zaxis3d_max:
z 坐标轴刻度最大值,默认为自适应。
:param zaxis3d_margin:
z 轴刻度标签与轴线之间的距离。默认为 8
"""
_zaxis3D = {
"name": zaxis3d_name,
"nameGap": zaxis3d_name_gap,
"nameTextStyle": {"fontSize": zaxis3d_name_size},
"type": zaxis3d_type,
"min": zaxis3d_min,
"max": zaxis3d_max,
"axisLabel": {"margin": zaxis3d_margin},
}
return _zaxis3D
@collect_other_func
def calendar(calendar_date_range=None, calendar_cell_size=None, **kwargs):
"""
:param calendar_date_range:
日历热力图的日期, "2016" 表示 2016 年, ["2016-5-5", "2017-5-5"]
表示 2016 年 5 月 5 日至 2017 年 5 月 5 日
:param calendar_cell_size:
日历每格框的大小,可设置单值 或数组 第一个元素是宽 第二个元素是高,支持
设置自适应 "auto"。默认为 ["auto", 20]
:param kwargs:
"""
if calendar_cell_size is None:
calendar_cell_size = ["auto", 20]
_calendar = {"range": calendar_date_range, "cellSize": calendar_cell_size}
return _calendar
def get_base_options(**kwargs):
"""
返回图形实例的所有配置项
"""
_funcs = {}
for f in base_fs:
_funcs[f.__name__] = f(**kwargs)
return _funcs
def get_other_options(**kwargs):
"""
返回图形实例的所有配置项
"""
_funcs = {}
for f in other_fs:
_funcs[f.__name__] = f(**kwargs)
return _funcs | pyecharts/echarts/option.py | from __future__ import unicode_literals
import random
import pyecharts.echarts as option
base_fs = [] # _config_components() 方法中被调用
other_fs = [] # add() 方法中被调用
SYMBOLS = ("rect", "roundRect", "triangle", "diamond", "pin", "arrow")
def collect_other_func(func):
other_fs.append(func)
return func
def collect_base_func(func):
base_fs.append(func)
return func
@collect_base_func
def datazoom(
is_datazoom_show=False,
datazoom_type="slider",
datazoom_range=None,
datazoom_orient="horizontal",
datazoom_xaxis_index=None,
datazoom_yaxis_index=None,
**kwargs
):
"""
dataZoom 组件 用于区域缩放,从而能自由关注细节的数据信息,或者概览数据整
体,或者去除离群点的影响。
:param is_datazoom_show:
是否使用区域缩放组件,默认为 False
:param datazoom_type:
区域缩放组件类型,默认为'slider',有'slider', 'inside', 'both'可选
:param datazoom_range:
区域缩放的范围,默认为[50, 100]
:param datazoom_orient:
datazoom 组件在直角坐标系中的方向,默认为 'horizontal',效果显示在 x 轴。
如若设置为 'vertical' 的话效果显示在 y 轴。
:param datazoom_xaxis_index:
datazoom 组件控制的 x 轴索引
默认控制第一个 x 轴,如没特殊需求无须显示指定。单个为 int 类型而控制多个为 list
类型,如 [0, 1] 表示控制第一个和第二个 x 轴。
:param datazoom_yaxis_index:
datazoom 组件控制的 y 轴索引
默认控制第一个 y 轴,如没特殊需求无须显示指定。单个为 int 类型而控制多个为 list
类型,如 [0, 1] 表示控制第一个和第二个 x 轴。
:param kwargs:
"""
_min, _max = 50, 100
if datazoom_range:
if len(datazoom_range) == 2:
_min, _max = datazoom_range
if datazoom_type not in ("slider", "inside", "both"):
datazoom_type = "slider"
_datazoom = []
_datazoom_config = {
"show": is_datazoom_show,
"type": "slider",
"start": _min,
"end": _max,
"orient": datazoom_orient,
"xAxisIndex": datazoom_xaxis_index,
"yAxisIndex": datazoom_yaxis_index,
}
if datazoom_type == "both":
_datazoom.append(_datazoom_config.copy())
datazoom_type = "inside"
_datazoom_config["type"] = datazoom_type
_datazoom.append(_datazoom_config)
return _datazoom
@collect_base_func
def datazoom_extra(
is_datazoom_extra_show=False,
datazoom_extra_type="slider",
datazoom_extra_range=None,
datazoom_extra_orient="vertical",
datazoom_extra_xaxis_index=None,
datazoom_extra_yaxis_index=None,
**kwargs
):
"""
额外的 dataZoom 条,直接 X/Y 轴同时使用 dataZoom 效果
"""
if is_datazoom_extra_show:
return datazoom(
is_datazoom_show=True,
datazoom_type=datazoom_extra_type,
datazoom_range=datazoom_extra_range,
datazoom_orient=datazoom_extra_orient,
datazoom_xaxis_index=datazoom_extra_xaxis_index,
datazoom_yaxis_index=datazoom_extra_yaxis_index,
)
@collect_base_func
def color(colorlst=None, is_random=False, label_color=None, **kwargs):
"""
:param colorlst:
全局颜色列表
:param is_random:
指定是否随机打乱全局颜色列表
:param label_color:
追加的颜色列表
:param kwargs:
"""
if colorlst is None:
colorlst = []
if label_color:
for color in reversed(list(label_color)):
colorlst.insert(0, color)
if is_random:
random.shuffle(colorlst)
return colorlst
@collect_base_func
def tooltip(**kwargs):
return option.Tooltip(**kwargs)
@collect_base_func
def legend(
is_legend_show=True,
legend_orient="horizontal",
legend_pos="center",
legend_top="top",
legend_selectedmode="multiple",
legend_text_size=12,
legend_text_color=None,
**kwargs
):
"""
图例组件。图例组件展现了不同系列的标记(symbol),颜色和名字。可以通过点击图例
控制哪些系列不显示。
:param is_legend_show:
是否显示顶端图例,默认为 True
:param legend_orient:
图例列表的布局朝向,默认为'horizontal',有'horizontal', 'vertical'可选
:param legend_pos:
图例组件离容器左侧的距离,默认为'center',有'left', 'center', 'right'可选,
也可以为百分数,如 "%60"
:param legend_top:
图例组件离容器上侧的距离,默认为'top',有'top', 'center', 'bottom'可选,
也可以为百分数,如 "%60"
:param legend_selectedmode:
图例选择的模式,控制是否可以通过点击图例改变系列的显示状态。默认为'multiple',
可以设成 'single' 或者 'multiple' 使用单选或者多选模式。
也可以设置为 False 关闭显示状态。
:param legend_text_size:
图例名称字体大小
:param legend_text_color:
图例名称字体颜色
:param kwargs:
"""
_legend = {
"selectedMode": legend_selectedmode,
"show": is_legend_show,
"left": legend_pos,
"top": legend_top,
"orient": legend_orient,
"textStyle": {
"fontSize": legend_text_size,
"color": legend_text_color,
},
}
return _legend
@collect_base_func
def visual_map(
visual_type="color",
visual_range=None,
visual_text_color=None,
visual_range_text=None,
visual_range_color=None,
visual_range_size=None,
visual_orient="vertical",
visual_pos="left",
visual_top="bottom",
visual_split_number=5,
visual_dimension=None,
is_calculable=True,
is_piecewise=False,
pieces=None,
**kwargs
):
"""
是视觉映射组件,用于进行『视觉编码』,也就是将数据映射到视觉元素(视觉通道)
:param visual_type:
制定组件映射方式,默认为'color‘,即通过颜色来映射数值。有'color', 'size'可选。
'size'通过数值点的大小,也就是图形点的大小来映射数值。
:param visual_range:
指定组件的允许的最小值与最大值。默认为 [0, 100]
:param visual_text_color:
两端文本颜色。
:param visual_range_text:
两端文本。默认为 ['low', 'hight']
:param visual_range_size:
数值映射的范围,也就是图形点大小的范围。默认为 [20, 50]
:param visual_range_color:
过渡颜色。默认为 ['#50a3ba', '#eac763', '#d94e5d']
:param visual_orient:
visualMap 组件条的方向,默认为'vertical',有'vertical', 'horizontal'可选。
:param visual_pos:
visualmap 组件条距离左侧的位置,默认为'left'。有'right', 'center',
'right'可选,也可为百分数或整数。
:param visual_top:
visualmap 组件条距离顶部的位置,默认为'top'。有'top', 'center',
'bottom'可选,也可为百分数或整数。
:param visual_split_number:
分段型中分割的段数,在设置为分段型时生效。默认分为 5 段。
:param visual_dimension:
指定用数据的『哪个维度』,映射到视觉元素上。默认映射到最后一个维度。索引从 0 开始。
在直角坐标系中,x 轴为第一个维度(0),y 轴为第二个维度(1)。
:param is_calculable:
是否显示拖拽用的手柄(手柄能拖拽调整选中范围)。默认为 True
:param is_piecewise:
是否将组件转换为分段型(默认为连续型),默认为 False
:param pieces:
自定义『分段式视觉映射组件(visualMapPiecewise)』的每一段的范围,
以及每一段的文字,以及每一段的特别的样式(仅在 is_piecewise 为 True
时生效)。例如:
pieces: [
{min: 1500}, // 不指定 max,表示 max 为无限大(Infinity)。
{min: 900, max: 1500},
{min: 310, max: 1000},
{min: 200, max: 300},
{min: 10, max: 200, label: '10 到 200(自定义label)'},
// 表示 value 等于 123 的情况。
{value: 123, label: '123(自定义特殊颜色)', color: 'grey'}
{max: 5} // 不指定 min,表示 min 为无限大(-Infinity)。
]
:param kwargs:
"""
_min, _max = 0, 100
if visual_range:
if len(visual_range) == 2:
_min, _max = visual_range
_tlow, _thigh = "low", "high"
if visual_range_text:
if len(visual_range_text) == 2:
_tlow, _thigh = visual_range_text
_inrange_op = {}
if visual_type == "color":
range_color = ["#50a3ba", "#eac763", "#d94e5d"]
if visual_range_color:
if len(visual_range_color) >= 2:
range_color = visual_range_color
_inrange_op.update(color=range_color)
if visual_type == "size":
range_size = [20, 50]
if visual_range_size:
if len(visual_range_size) >= 2:
range_size = visual_range_size
_inrange_op.update(symbolSize=range_size)
_type = "piecewise" if is_piecewise else "continuous"
_visual_map = {
"type": _type,
"min": _min,
"max": _max,
"text": [_thigh, _tlow],
"textStyle": {"color": visual_text_color},
"inRange": _inrange_op,
"calculable": is_calculable,
"splitNumber": visual_split_number,
"dimension": visual_dimension,
"orient": visual_orient,
"left": visual_pos,
"top": visual_top,
"showLabel": True,
}
if is_piecewise:
_visual_map.update(pieces=pieces)
return _visual_map
@collect_other_func
def label(
type=None,
is_label_show=False,
is_label_emphasis=True,
label_pos=None,
label_text_color=None,
label_text_size=12,
label_formatter=None,
label_emphasis_pos=None,
label_emphasis_textcolor=None,
label_emphasis_textsize=12,
**kwargs
):
"""
图形上的文本标签,可用于说明图形的一些数据信息,比如值,名称等。
:param type:
图形类型
:param is_label_emphasis:
是否高亮显示标签,默认显示。高亮标签即选中数据时显示的信息项。
:param is_label_show:
是否正常显示标签,默认不显示。标签即各点的数据项信息
:param label_pos:
标签的位置,Bar 图默认为'top'。有'top', 'left', 'right', 'bottom',
'inside', 'outside'可选
:param label_text_color:
标签字体颜色,默认为 "#000"
:param label_text_size:
标签字体大小,默认为 12
:param label_formatter:
模板变量有 {a}, {b},{c},{d},{e},分别表示系列名,数据名,数据值等。
使用示例,如 label_formatter='{a}'
在 trigger 为 'axis' 的时候,会有多个系列的数据,此时可以通过 {a0}, {a1}, {a2}
这种后面加索引的方式表示系列的索引。不同图表类型下的 {a},{b},{c},{d} 含义不一样。
其中变量 {a}, {b}, {c}, {d} 在不同图表类型下代表数据含义为:
折线(区域)图、柱状(条形)图、K线图 :
{a}(系列名称),{b}(类目值),{c}(数值), {d}(无)
散点图(气泡)图 :
{a}(系列名称),{b}(数据名称),{c}(数值数组), {d}(无)
地图 :
{a}(系列名称),{b}(区域名称),{c}(合并数值), {d}(无)
饼图、仪表盘、漏斗图:
{a}(系列名称),{b}(数据项名称),{c}(数值), {d}(百分比)
:param label_emphasis_pos:
高亮标签的位置,Bar 图默认为'top'。有'top', 'left', 'right', 'bottom',
'inside', 'outside'可选
:param label_emphasis_textcolor:
高亮标签字体颜色,默认为 "#fff"
:param label_emphasis_textsize:
高亮标签字体大小,默认为 12
:param kwargs:
"""
_label = {
"normal": option.NormalLabel(
visibility=is_label_show,
position=label_pos,
text_color=label_text_color,
text_size=label_text_size,
formatter=label_formatter,
chart_type=type,
),
"emphasis": option.EmphasisLabel(
visibility=is_label_emphasis,
position=label_emphasis_pos,
text_color=label_emphasis_textcolor,
text_size=label_emphasis_textsize,
chart_type=type,
),
}
return _label
@collect_other_func
def line_style(
type=None,
line_width=1,
line_opacity=1,
line_curve=0,
line_type="solid",
line_color=None,
**kwargs
):
"""
带线图形的线的风格选项
:param type:
图形类型
:param line_width:
线的宽度,默认为 1
:param line_opacity:
线的透明度,0 为完全透明,1 为完全不透明。默认为 1
:param line_curve:
线的弯曲程度,0 为完全不弯曲,1 为最弯曲。默认为 0
:param line_type:
线的类型,有'solid', 'dashed', 'dotted'可选。默认为'solid'
:param line_color:
线的颜色
:param kwargs:
"""
if line_color is None and type == "graph":
line_color = "#aaa"
_line_style = {
"normal": option.Line(
width=line_width,
opacity=line_opacity,
curve=line_curve,
line_type=line_type,
color=line_color,
chart_type=type,
)
}
return _line_style
@collect_other_func
def split_line(is_splitline_show=True, **kwargs):
"""
:param is_splitline_show:
指定是否显示分割线
:param kwargs:
"""
_split_line = {
"show": is_splitline_show,
"lineStyle": line_style(**kwargs),
}
return _split_line
@collect_other_func
def axis_line(is_axisline_show=True, **kwargs):
"""
:param is_axisline_show:
指定是否显示坐标轴线
:param kwargs:
"""
_axis_line = {"show": is_axisline_show, "lineStyle": line_style(**kwargs)}
return _axis_line
@collect_other_func
def split_area(is_area_show=True, **kwargs):
"""
:param is_area_show:
指定是否显示标记区域
:param kwargs:
"""
_split_area = {"show": is_area_show, "areaStyle": area_style(**kwargs)}
return _split_area
@collect_other_func
def area_style(flag=False, area_opacity=None, area_color=None, **kwargs):
"""
:param flag:
判断符
:param area_opacity:
区域透明度。支持从 0 到 1 的数字,为 0 时不绘制该图形。
:param area_color:
填充的颜色
:param kwargs:
"""
if area_opacity is None:
area_opacity = 0 if flag else 1
_area_style = {"opacity": area_opacity, "color": area_color}
return _area_style
@collect_other_func
def xy_axis(
type=None,
x_axis=None,
xaxis_margin=8,
xaxis_name_size=14,
xaxis_name_gap=25,
xaxis_name="",
xaxis_name_pos="middle",
xaxis_rotate=0,
xaxis_min=None,
xaxis_max=None,
xaxis_type=None,
xaxis_interval="auto",
xaxis_force_interval=None,
xaxis_pos=None,
xaxis_label_textsize=12,
xaxis_label_textcolor=None,
xaxis_formatter=None,
xaxis_line_color=None,
xaxis_line_width=1,
yaxis_margin=8,
yaxis_name_size=14,
yaxis_name_gap=25,
yaxis_name="",
yaxis_name_pos="middle",
yaxis_rotate=0,
yaxis_min=None,
yaxis_max=None,
yaxis_type=None,
yaxis_interval="auto",
yaxis_force_interval=None,
yaxis_pos=None,
yaxis_label_textsize=12,
yaxis_label_textcolor=None,
yaxis_formatter="",
yaxis_line_color=None,
yaxis_line_width=1,
is_convert=False,
is_xaxis_inverse=False,
is_yaxis_inverse=False,
is_xaxislabel_align=False,
is_yaxislabel_align=False,
is_xaxis_boundarygap=True,
is_yaxis_boundarygap=True,
is_xaxis_show=True,
is_yaxis_show=True,
is_splitline_show=True,
**kwargs
):
"""
直角坐标系中的 x、y 轴(Line、Bar、Scatter、EffectScatter、Kline)。
:param type:
图形类型。
:param x_axis:
x 轴数据项。
:param xaxis_margin:
x 轴刻度标签与轴线之间的距离。默认为 8
:param xaxis_name_size:
x 轴名称体大小,默认为 14
:param xaxis_name_gap:
x 轴名称与轴线之间的距离,默认为 25
:param xaxis_name:
x 轴名称
:param xaxis_name_pos:
x 轴名称位置,有'start','middle','end'可选
:param xaxis_rotate:
x 轴刻度标签旋转的角度,在类目轴的类目标签显示不下的时候可以通过旋转防止标
签之间重叠。默认为 0,即不旋转。旋转的角度从 -90 度到 90 度。
:param xaxis_min:
x 坐标轴刻度最小值,默认为自适应。使用特殊值 "dataMin" 可自定以数
据中最小值为 x 轴最小值。
:param xaxis_max:
x 坐标轴刻度最大值,默认为自适应。使用特殊值 "dataMax" 可自定以数
据中最小值为 x 轴最大值。
:param xaxis_type:
x 坐标轴类型
'value':数值轴,适用于连续数据。
'category':类目轴,适用于离散的类目数据。
'log':对数轴。适用于对数数据。
:param xaxis_interval:
x 轴刻度标签的显示间隔,在类目轴中有效。默认会采用标签不重叠的策略间隔显示标签。
设置成 0 强制显示所有标签。设置为 1,表示『隔一个标签显示一个标签』,
如果值为 2,表示隔两个标签显示一个标签,以此类推
:param xaxis_force_interval:
强制设置 x 坐标轴分割间隔。如设置为 50 则刻度为 [0, 50, 150, ...],设置为 "auto"
则只显示两个刻度。一般情况下不建议设置这个参数!!
因为 splitNumber 是预估的值,实际根据策略计算出来的刻度可能无法达到想要的效果,
这时候可以使用 interval 配合 min、max 强制设定刻度划分。在类目轴中无效。
:param xaxis_pos:
x 坐标轴位置,有'top','bottom'可选
:param xaxis_label_textsize:
x 坐标轴标签字体大小
:param xaxis_label_textcolor:
x 坐标轴标签字体颜色
:param xaxis_formatter:
x 轴标签格式器,如 '天',则 x 轴的标签为数据加'天'(3 天,4 天),默认为 ""
xaxis_formatter -> function
```python
def label_formatter(params):
return params.value + ' [Good!]'
```
回调函数格式,更多内容请参考 [高级用法篇](zh-cn/advanced)
```
(params: Object|Array) => string
参数 params 是 formatter 需要的单个数据集。格式如下:
{
componentType: 'series',
// 系列类型
seriesType: string,
// 系列在传入的 option.series 中的 index
seriesIndex: number,
// 系列名称
seriesName: string,
// 数据名,类目名
name: string,
// 数据在传入的 data 数组中的 index
dataIndex: number,
// 传入的原始数据项
data: Object,
// 传入的数据值
value: number|Array,
// 数据图形的颜色
color: string,
}
```
:param xaxis_line_color:
x 坐标轴线线的颜色,默认为 None
:param xaxis_line_width:
x 坐标轴线线的宽度,默认为 1
:param yaxis_margin:
y 轴刻度标签与轴线之间的距离。默认为 8
:param yaxis_name_size:
y 轴名称体大小,默认为 14
:param yaxis_name_gap:
y 轴名称与轴线之间的距离,默认为 25
:param yaxis_name:
y 轴名称
:param yaxis_name_pos:
y 轴名称位置,有'start', 'middle','end'可选
:param yaxis_rotate:
y 轴刻度标签旋转的角度,在类目轴的类目标签显示不下的时候可以通过旋转防止标
签之间重叠。默认为 0,即不旋转。旋转的角度从 -90 度到 90 度。
:param yaxis_min:
y 坐标轴刻度最小值,默认为自适应。使用特殊值 "dataMin" 可自定以数
据中最小值为 y 轴最小值。
:param yaxis_max:
y 坐标轴刻度最大值,默认为自适应。使用特殊值 "dataMax" 可自定以数
据中最小值为 y 轴最大值。
:param yaxis_type:
y 坐标轴类型
'value':数值轴,适用于连续数据。
'category':类目轴,适用于离散的类目数据。
'log':对数轴。适用于对数数据。
:param yaxis_interval:
y 轴刻度标签的显示间隔,在类目轴中有效。默认会采用标签不重叠的策略间隔显示标签。
设置成 0 强制显示所有标签。设置为 1,表示『隔一个标签显示一个标签』,
如果值为 2,表示隔两个标签显示一个标签,以此类推
:param yaxis_force_interval:
强制设置 y 坐标轴分割间隔。如设置为 50 则刻度为 [0, 50, 150, ...],设置为 "auto"
则只显示两个刻度。一般情况下不建议设置这个参数!!
因为 splitNumber 是预估的值,实际根据策略计算出来的刻度可能无法达到想要的效果,
这时候可以使用 interval 配合 min、max 强制设定刻度划分。在类目轴中无效。
:param yaxis_pos:
y 坐标轴位置,有'left','right'可选
:param yaxis_label_textsize:
y 坐标轴标签字体大小
:param yaxis_label_textcolor:
y 坐标轴标签字体颜色
:param yaxis_formatter:
y 轴标签格式器,如 '天',则 y 轴的标签为数据加'天'(3 天,4 天),默认为 ""
yaxis_formatter -> function
```python
def label_formatter(params):
return params.value + ' [Good!]'
```
回调函数格式,更多内容请参考 [高级用法篇](zh-cn/advanced)
```
(params: Object|Array) => string
参数 params 是 formatter 需要的单个数据集。格式如下:
{
componentType: 'series',
// 系列类型
seriesType: string,
// 系列在传入的 option.series 中的 index
seriesIndex: number,
// 系列名称
seriesName: string,
// 数据名,类目名
name: string,
// 数据在传入的 data 数组中的 index
dataIndex: number,
// 传入的原始数据项
data: Object,
// 传入的数据值
value: number|Array,
// 数据图形的颜色
color: string,
}
``
:param yaxis_line_color:
y 坐标轴线线的颜色,默认为 None
:param yaxis_line_width:
y 坐标轴线线的宽度,默认为 1
:param is_convert:
是否交换 x 轴与 y 轴
:param is_xaxis_inverse:
是否反向 x 坐标轴,默认为 False
:param is_yaxis_inverse:
是否反向 y 坐标轴,默认为 False
:param is_xaxislabel_align:
x 轴刻度线和标签是否对齐,默认为 False
:param is_yaxislabel_align:
y 轴刻度线和标签是否对齐,默认为 False
:param is_xaxis_boundarygap:
x 轴两边留白策略,适用于类目轴。类目轴中 boundaryGap 可以配置为 True 和 False。
默认为 True,这时候刻度只是作为分隔线,标签和数据点都会在两个刻度之间的带(band)
中间,即两边留白。
:param is_yaxis_boundarygap:
y 轴两边留白策略,适用于类目轴。类目轴中 boundaryGap 可以配置为 True 和 False。
默认为 True,这时候刻度只是作为分隔线,标签和数据点都会在两个刻度之间的带(band)
中间,即两边留白。
:param is_xaxis_show:
是否显示 x 轴
:param is_yaxis_show:
是否显示 y 轴
:param is_splitline_show:
是否显示 y 轴网格线,默认为 True。
:param kwargs:
"""
_xAxis = option.XAxis(
name=xaxis_name,
visibility=is_xaxis_show,
name_location=xaxis_name_pos,
name_gap=xaxis_name_gap,
name_size=xaxis_name_size,
position=xaxis_pos,
boundary_gap=is_xaxis_boundarygap,
label_alignment=is_xaxislabel_align,
inverse=is_xaxis_inverse,
value_range=[xaxis_min, xaxis_max],
axis_type=xaxis_type,
axis_line_color=xaxis_line_color,
axis_line_width=xaxis_line_width,
chart_type=type,
)
_xAxis["axisLabel"] = option.XAxisLabel(
interval=xaxis_interval,
rotate=xaxis_rotate,
margin=xaxis_margin,
text_size=xaxis_label_textsize,
text_color=xaxis_label_textcolor,
formatter=xaxis_formatter,
)
_yAxis = option.YAxis(
name=yaxis_name,
visibility=is_yaxis_show,
name_location=yaxis_name_pos,
name_gap=yaxis_name_gap,
name_size=yaxis_name_size,
position=yaxis_pos,
boundary_gap=is_yaxis_boundarygap,
label_alignment=is_yaxislabel_align,
inverse=is_yaxis_inverse,
value_range=[yaxis_min, yaxis_max],
split_line=is_splitline_show,
axis_type=yaxis_type,
axis_line_color=yaxis_line_color,
axis_line_width=yaxis_line_width,
chart_type=type,
)
_yAxis["axisLabel"] = option.YAxisLabel(
interval=yaxis_interval,
rotate=yaxis_rotate,
margin=yaxis_margin,
text_size=yaxis_label_textsize,
text_color=yaxis_label_textcolor,
formatter=yaxis_formatter,
)
if is_convert:
xaxis_type, yaxis_type = _yAxis["type"], _xAxis["type"]
_xAxis["type"] = xaxis_type
_yAxis.update(data=x_axis, type=yaxis_type)
else:
_xAxis["data"] = x_axis
# 强制分割数值轴,在多 x、y 轴中可以使用强制分割使标刻线对齐
if xaxis_force_interval is not None:
_xAxis["interval"] = xaxis_force_interval
if yaxis_force_interval is not None:
_yAxis["interval"] = yaxis_force_interval
# 返回字典
return [_xAxis], [_yAxis]
def _mark(
data,
mark_point_raw=None,
mark_point_symbol="pin",
mark_point_symbolsize=50,
mark_point_textcolor="#fff",
mark_line_raw=None,
mark_line_symbolsize=10,
mark_line_valuedim="",
mark_line_coords=None,
mark_point_valuedim="",
_is_markline=False,
**kwargs
):
"""
图形标记组件,用于标记指定的特殊数据,有标记线和标记点两种
:param data:
标记点
默认有'min', 'max', 'average'可选。支持自定义标记点,具体使用如下
[
{
"coord": [a1, b1],
"name": "first markpoint"
},
{
"coord": [a2, b2],
"name": "second markpoint"
}
]
需自己传入标记点字典,共有两个键值对,'coord' 对应为 x y 轴坐标,'name' 为标记点名称
标记线
只支持默认的 'min', 'max', 'average'
:param mark_point_raw:
原生格式的 markPoint 数据,数据类型为 [{}, {}, ...]。
格式请参考 http://echarts.baidu.com/option.html#series-line.markPoint.data
:param mark_point_symbol:
标记点图形,,默认为'pin',有'circle', 'rect', 'roundRect', 'triangle',
'diamond', 'pin', 'arrow'可选
:param mark_point_symbolsize:
标记点图形大小,默认为 50
:param mark_point_textcolor:
标记点字体颜色,默认为'#fff'
:param mark_line_raw:
原生格式的 markLine 数据,数据类型为 [{}, {}, ...]。
格式请参考 http://echarts.baidu.com/option.html#series-line.markLine.data
:param mark_line_symbolsize:
标记线图形大小,默认为 15
:param mark_line_valuedim:
标记线指定在哪个维度上指定最大值最小值。这可以是维度的直接名称,Line 时可以
是 x、angle 等、Kline 图时可以是 open、close、highest、lowest。
可同时制定多个维度,如
mark_line=['min', 'max'], mark_line_valuedim=['lowest', 'highest']
则表示 min 使用 lowest 维度,max 使用 highest 维度,以此类推
:param mark_line_coords:
标记线指定起点坐标和终点坐标,如 [[10, 10], [30, 30]],两个点分别为横
纵坐标轴点。
:param mark_point_valuedim:
标记点指定在哪个维度上指定最大值最小值。这可以是维度的直接名称,Line 时可以
是 x、angle 等、Kline 图时可以是 open、close、highest、lowest。
可同时制定多个维度,如
mark_point=['min', 'max'], mark_point_valuedim=['lowest', 'highest']
则表示 min 使用 lowest 维度,max 使用 highest 维度,以此类推
:param _is_markline:
指定是否为 markline
"""
if _is_markline:
if mark_line_raw:
return {"data": mark_line_raw}
else:
if mark_point_raw:
return {"data": mark_point_raw}
mark = {"data": []}
if data:
_markpv = _marklv = [None for _ in range(len(data))]
_markpv[: len(mark_point_valuedim)] = mark_point_valuedim
_marklv[: len(mark_line_valuedim)] = mark_line_valuedim
for index, d in enumerate(list(data)):
# 自定义坐标点数据
if isinstance(d, dict):
_coord = d.get("coord", None)
_pname = d.get("name", None)
_marktmp = {
"coord": _coord,
"value": _coord[1],
"name": _pname,
"symbol": mark_point_symbol,
"symbolSize": mark_point_symbolsize,
"label": {
"normal": {
"textStyle": {"color": mark_point_textcolor}
}
},
}
mark.get("data").append(_marktmp)
else:
_type, _name = "", ""
if "max" in d:
_type, _name = "max", "Maximum"
elif "min" in d:
_type, _name = "min", "Minimum"
elif "average" in d:
_type, _name = "average", "mean-Value"
if _is_markline:
_marktmp = {
"type": _type,
"name": _name,
"valueDim": _marklv[index],
}
if _type:
mark.get("data").append(_marktmp)
mark.update(symbolSize=mark_line_symbolsize)
else:
_marktmp = {
"type": _type,
"name": _name,
"valueDim": _markpv[index],
}
_marktmp.update(
symbol=mark_point_symbol,
symbolSize=mark_point_symbolsize,
label={
"normal": {
"textStyle": {"color": mark_point_textcolor}
}
},
)
if _type:
mark.get("data").append(_marktmp)
if mark_line_coords and len(mark_line_coords) == 2:
return {
"data": [
[
{"coord": mark_line_coords[0]},
{"coord": mark_line_coords[1]},
]
]
}
return mark
@collect_other_func
def mark_point(mark_point=None, **kwargs):
"""
标记点配置项
:param mark_point:
默认有'min', 'max', 'average'可选。支持自定义标记点,具体使用如下
[
{
"coord": [a1, b1],
"name": "first markpoint"
},
{
"coord": [a2, b2],
"name": "second markpoint"
}
]
需自己传入标记点字典,共有两个键值对,'coord' 对应为 x y 轴坐标,'name' 为标记点名称
:param kwargs:
"""
return _mark(mark_point, **kwargs)
@collect_other_func
def mark_line(mark_line=None, **kwargs):
""" 标记线配置项
:param mark_line:
只支持默认的 'min', 'max', 'average'
:param kwargs:
"""
return _mark(mark_line, _is_markline=True, **kwargs)
@collect_other_func
def symbol(type=None, symbol="", **kwargs):
"""
:param symbol:
标记类型, 有'rect', 'roundRect', 'triangle', 'diamond',
'pin', 'arrow'可选
:param kwargs:
"""
if symbol is None: # Radar
symbol = "none"
elif type == "line" and symbol == "": # Line
symbol = "emptyCircle"
elif symbol not in SYMBOLS:
symbol = "circle"
return symbol
@collect_other_func
def effect(
effect_brushtype="stroke", effect_scale=2.5, effect_period=4, **kwargs
):
"""
涟漪动画配置项
:param effect_brushtype:
波纹绘制方式,有'stroke', 'fill'可选。默认为'stroke'
:param effect_scale:
动画中波纹的最大缩放比例。默认为 2.5
:param effect_period:
动画持续的时间。默认为 4(s)
:param kwargs:
"""
_effect = {
"brushType": effect_brushtype,
"scale": effect_scale,
"period": effect_period,
}
return _effect
@collect_other_func
def grid(
grid_width=None,
grid_height=None,
grid_top=None,
grid_bottom=None,
grid_left=None,
grid_right=None,
**kwargs
):
"""
Grid 类组件配置项
:param grid_width:
grid 组件的宽度。默认自适应。
:param grid_height:
grid 组件的高度。默认自适应。
:param grid_top:
grid 组件离容器顶部的距离。默认为 None, 有'top', 'center', 'middle'可选,
也可以为百分数或者整数
:param grid_bottom:
grid 组件离容器底部的距离。默认为 None, 有'top', 'center', 'middle'可选,
也可以为百分数或者整数
:param grid_left:
grid 组件离容器左侧的距离。默认为 None, 有'left', 'center', 'right'可选,
也可以为百分数或者整数
:param grid_right:
grid 组件离容器右侧的距离。默认为 None, 有'left', 'center', 'right'可选
也可以为百分数或者整数
"""
_grid = {}
if grid_width is not None:
_grid.update(width=grid_width)
if grid_height is not None:
_grid.update(height=grid_height)
if grid_top is not None:
_grid.update(top=grid_top)
if grid_bottom is not None:
_grid.update(bottom=grid_bottom)
if grid_left is not None:
_grid.update(left=grid_left)
if grid_right is not None:
_grid.update(right=grid_right)
return _grid
@collect_other_func
def grid3D(
grid3d_width=100,
grid3d_height=100,
grid3d_depth=100,
grid3d_rotate_speed=10,
grid3d_rotate_sensitivity=1,
is_grid3d_rotate=False,
**kwargs
):
"""
3D 笛卡尔坐标系组配置项,适用于 3D 图形。
:param grid3d_width:
三维笛卡尔坐标系组件在三维场景中的高度。默认为 100
:param grid3d_height:
三维笛卡尔坐标系组件在三维场景中的高度。默认为 100
:param grid3d_depth:
三维笛卡尔坐标系组件在三维场景中的高度。默认为 100
:param grid3d_rotate_speed:
物体自传的速度。单位为角度 / 秒,默认为 10 ,也就是 36 秒转一圈。
:param is_grid3d_rotate:
是否开启视角绕物体的自动旋转查看。默认为 False
:param grid3d_rotate_sensitivity:
旋转操作的灵敏度,值越大越灵敏。默认为 1, 设置为 0 后无法旋转。
:param kwargs:
"""
_grid3D = {
"boxWidth": grid3d_width,
"boxHeight": grid3d_height,
"boxDepth": grid3d_depth,
"viewControl": {
"autoRotate": is_grid3d_rotate,
"autoRotateSpeed": grid3d_rotate_speed,
"rotateSensitivity": grid3d_rotate_sensitivity,
},
}
return _grid3D
@collect_other_func
def xaxis3D(
xaxis3d_type=None,
xaxis3d_name="",
xaxis3d_name_size=16,
xaxis3d_name_gap=20,
xaxis3d_min=None,
xaxis3d_max=None,
xaxis3d_interval="auto",
xaxis3d_margin=8,
**kwargs
):
"""
3D x 轴配置项
:param xaxis3d_type:
3D x 轴类型
:param xaxis3d_name:
x 轴名称,默认为 ""
:param xaxis3d_name_size:
x 轴名称体大小,默认为 16
:param xaxis3d_name_gap:
x 轴名称与轴线之间的距离,默认为 25
:param xaxis3d_min:
x 坐标轴刻度最小值,默认为自适应。
:param xaxis3d_max:
x 坐标轴刻度最大值,默认为自适应。
:param xaxis3d_interval:
x 轴刻度标签的显示间隔,在类目轴中有效。默认会采用标签不重叠的策略间隔显示标签。
设置成 0 强制显示所有标签。设置为 1,表示『隔一个标签显示一个标签』,如果值
为 2,表示隔两个标签显示一个标签,以此类推
:param xaxis3d_margin:
x 轴刻度标签与轴线之间的距离。默认为 8
"""
_xaxis3D = {
"name": xaxis3d_name,
"nameGap": xaxis3d_name_gap,
"nameTextStyle": {"fontSize": xaxis3d_name_size},
"type": xaxis3d_type,
"min": xaxis3d_min,
"max": xaxis3d_max,
"axisLabel": {"margin": xaxis3d_margin, "interval": xaxis3d_interval},
}
return _xaxis3D
@collect_other_func
def yaxis3D(
yaxis3d_type=None,
yaxis3d_name="",
yaxis3d_name_size=16,
yaxis3d_name_gap=20,
yaxis3d_min=None,
yaxis3d_max=None,
yaxis3d_interval="auto",
yaxis3d_margin=8,
**kwargs
):
"""
3D y 轴配置项
:param yaxis3d_type:
3D x 轴类型
:param yaxis3d_name:
y 轴名称,默认为 ""
:param yaxis3d_name_size:
y 轴名称体大小,默认为 16
:param yaxis3d_name_gap:
y 轴名称与轴线之间的距离,默认为 25
:param yaxis3d_min:
y 坐标轴刻度最小值,默认为自适应。
:param yaxis3d_max:
y 坐标轴刻度最大值,默认为自适应。
:param yaxis3d_interval:
y 轴刻度标签的显示间隔,在类目轴中有效。默认会采用标签不重叠的策略间隔显示标签。
设置成 0 强制显示所有标签。设置为 1,表示『隔一个标签显示一个标签』,如果值
为 2,表示隔两个标签显示一个标签,以此类推
:param yaxis3d_margin:
y 轴刻度标签与轴线之间的距离。默认为 8
"""
_yaxis3D = {
"name": yaxis3d_name,
"nameGap": yaxis3d_name_gap,
"nameTextStyle": {"fontSize": yaxis3d_name_size},
"type": yaxis3d_type,
"min": yaxis3d_min,
"max": yaxis3d_max,
"axisLabel": {"margin": yaxis3d_margin, "interval": yaxis3d_interval},
}
return _yaxis3D
@collect_other_func
def zaxis3D(
zaxis3d_type=None,
zaxis3d_name="",
zaxis3d_name_size=16,
zaxis3d_name_gap=20,
zaxis3d_min=None,
zaxis3d_max=None,
zaxis3d_margin=8,
**kwargs
):
"""
3D y 轴配置项
:param zaxis3d_type:
3D y 轴类型
:param zaxis3d_name:
z 轴名称,默认为 ""
:param zaxis3d_name_size:
z 轴名称体大小,默认为 16
:param zaxis3d_name_gap:
z 轴名称与轴线之间的距离,默认为 25
:param zaxis3d_min:
z 坐标轴刻度最小值,默认为自适应。
:param zaxis3d_max:
z 坐标轴刻度最大值,默认为自适应。
:param zaxis3d_margin:
z 轴刻度标签与轴线之间的距离。默认为 8
"""
_zaxis3D = {
"name": zaxis3d_name,
"nameGap": zaxis3d_name_gap,
"nameTextStyle": {"fontSize": zaxis3d_name_size},
"type": zaxis3d_type,
"min": zaxis3d_min,
"max": zaxis3d_max,
"axisLabel": {"margin": zaxis3d_margin},
}
return _zaxis3D
@collect_other_func
def calendar(calendar_date_range=None, calendar_cell_size=None, **kwargs):
"""
:param calendar_date_range:
日历热力图的日期, "2016" 表示 2016 年, ["2016-5-5", "2017-5-5"]
表示 2016 年 5 月 5 日至 2017 年 5 月 5 日
:param calendar_cell_size:
日历每格框的大小,可设置单值 或数组 第一个元素是宽 第二个元素是高,支持
设置自适应 "auto"。默认为 ["auto", 20]
:param kwargs:
"""
if calendar_cell_size is None:
calendar_cell_size = ["auto", 20]
_calendar = {"range": calendar_date_range, "cellSize": calendar_cell_size}
return _calendar
def get_base_options(**kwargs):
"""
返回图形实例的所有配置项
"""
_funcs = {}
for f in base_fs:
_funcs[f.__name__] = f(**kwargs)
return _funcs
def get_other_options(**kwargs):
"""
返回图形实例的所有配置项
"""
_funcs = {}
for f in other_fs:
_funcs[f.__name__] = f(**kwargs)
return _funcs | 0.448909 | 0.263422 |
class HistoryManager():
def __init__(self, maxSize=100):
""" Initialize HistoryManager which saved maximal maxSize states.
The maxSize+1 insertion removes the first
"""
self.history = []
# Position is a value contains the index of the state in a continues way.
# That means even when the first entries get deleted because of maxSize
# self.position do not get decreased by this.
self.position = -1
# the index of the current position in history list
self.__idx = -1
# self.listLen contains maxSize
self.listLen = maxSize
# If this gets true, no other entry can be make in history
self.__lockHistory = False
def canUndo(self):
return self.__idx >0
def canRedo(self):
return self.__idx < len(self.history)-1
def undo(self):
if self.canUndo():
self.__idx -=1
self.position -= 1
def redo(self):
if self.canRedo():
self.__idx +=1
self.position += 1
def currentState(self):
""" Get latest state saved in history"""
if len(self.history) > 0:
return self.history[self.__idx]
else:
return None
def _insert(self,element):
""" Insert element at the current position """
# remove newer elements
del self.history[self.__idx + 1:]
# Remove the oldest element if there are too many elements
if self.__idx == self.listLen:
del self.history[0]
else:
self.__idx += 1
self.history.append(element)
self.position += 1
def lockHistory(self, fun):
""" Lock history for the whole method fun.
As a result no history can be inserted whil
fun is executed.
"""
if self.__lockHistory:
return
self.__lockHistory = True
fun()
self.__lockHistory = False
def insertFunc(self, fun):
""" Insert an element in history using the function fun.
While fun is working insertFunc is locked, so no other
element can be added in history.
As a result recursivly insertion of history is stopped.
The function fun will be called with in insertion function
which can be called to insert an element in history.
E.g.:
def createNewElement(text):
# Nothing happend, because insertFunc is locked
historymanager.insertFunc(lambda insertF: insertF(text))
return text
historymananger.insertFunc(lambda insertF: insertF(createNewElement("bla"))
# Only one string will be insert
When inserting a new state old state gets removed if the limit of
entries is reached.
Also states newer than the current one (e.g. by using undo())
get removed (so you can't do a redo() anymore)
"""
if self.__lockHistory:
return
self.__lockHistory = True
fun(self._insert)
self.__lockHistory = False
def clear(self):
self.position = -1
self.__idx = -1
self.history = [] | gui/network_manager/history_manager.py | class HistoryManager():
def __init__(self, maxSize=100):
""" Initialize HistoryManager which saved maximal maxSize states.
The maxSize+1 insertion removes the first
"""
self.history = []
# Position is a value contains the index of the state in a continues way.
# That means even when the first entries get deleted because of maxSize
# self.position do not get decreased by this.
self.position = -1
# the index of the current position in history list
self.__idx = -1
# self.listLen contains maxSize
self.listLen = maxSize
# If this gets true, no other entry can be make in history
self.__lockHistory = False
def canUndo(self):
return self.__idx >0
def canRedo(self):
return self.__idx < len(self.history)-1
def undo(self):
if self.canUndo():
self.__idx -=1
self.position -= 1
def redo(self):
if self.canRedo():
self.__idx +=1
self.position += 1
def currentState(self):
""" Get latest state saved in history"""
if len(self.history) > 0:
return self.history[self.__idx]
else:
return None
def _insert(self,element):
""" Insert element at the current position """
# remove newer elements
del self.history[self.__idx + 1:]
# Remove the oldest element if there are too many elements
if self.__idx == self.listLen:
del self.history[0]
else:
self.__idx += 1
self.history.append(element)
self.position += 1
def lockHistory(self, fun):
""" Lock history for the whole method fun.
As a result no history can be inserted whil
fun is executed.
"""
if self.__lockHistory:
return
self.__lockHistory = True
fun()
self.__lockHistory = False
def insertFunc(self, fun):
""" Insert an element in history using the function fun.
While fun is working insertFunc is locked, so no other
element can be added in history.
As a result recursivly insertion of history is stopped.
The function fun will be called with in insertion function
which can be called to insert an element in history.
E.g.:
def createNewElement(text):
# Nothing happend, because insertFunc is locked
historymanager.insertFunc(lambda insertF: insertF(text))
return text
historymananger.insertFunc(lambda insertF: insertF(createNewElement("bla"))
# Only one string will be insert
When inserting a new state old state gets removed if the limit of
entries is reached.
Also states newer than the current one (e.g. by using undo())
get removed (so you can't do a redo() anymore)
"""
if self.__lockHistory:
return
self.__lockHistory = True
fun(self._insert)
self.__lockHistory = False
def clear(self):
self.position = -1
self.__idx = -1
self.history = [] | 0.61451 | 0.224895 |
import numpy as np
import psyneulink.core.llvm as pnlvm
import psyneulink.core.components.functions.transferfunctions as Functions
import psyneulink.core.globals.keywords as kw
import pytest
from math import e, pi, sqrt
SIZE=5
test_var = np.random.rand(SIZE)
test_matrix = np.random.rand(SIZE, SIZE)
test_matrix_s = np.random.rand(SIZE, SIZE // 4)
test_matrix_l = np.random.rand(SIZE, 3 * SIZE)
RAND1 = np.random.rand()
RAND2 = np.random.rand()
RAND3 = np.random.rand()
RAND4 = np.random.rand()
softmax_helper = RAND1 * test_var
softmax_helper = softmax_helper - np.max(softmax_helper)
softmax_helper = np.exp(softmax_helper) / np.sum(np.exp(softmax_helper))
tanh_helper = -2 * (RAND1 * (test_var + RAND2 - RAND3) + RAND4)
tanh_helper = (1 - e**tanh_helper)/ (1 + e**tanh_helper)
gaussian_helper = e**(-(test_var - RAND2)**2 / (2 * RAND1**2)) / sqrt(2 * pi * RAND1)
gaussian_helper = RAND3 * gaussian_helper + RAND4
def gaussian_distort_helper(seed):
state = np.random.RandomState([seed])
# compensate for construction
state.normal(test_var + RAND1, RAND2)
return RAND4 * state.normal(test_var + RAND1, RAND2) + RAND3
test_data = [
(Functions.Linear, test_var, {'slope':RAND1, 'intercept':RAND2}, None, test_var * RAND1 + RAND2),
(Functions.Exponential, test_var, {'scale':RAND1, 'rate':RAND2}, None, RAND1 * np.exp(RAND2 * test_var)),
(Functions.Logistic, test_var, {'gain':RAND1, 'x_0':RAND2, 'offset':RAND3, 'scale':RAND4}, None, RAND4 / (1 + np.exp(-(RAND1 * (test_var - RAND2)) + RAND3))),
(Functions.Tanh, test_var, {'gain':RAND1, 'bias':RAND2, 'x_0':RAND3, 'offset':RAND4}, None, tanh_helper),
(Functions.ReLU, test_var, {'gain':RAND1, 'bias':RAND2, 'leak':RAND3}, None, np.maximum(RAND1 * (test_var - RAND2), RAND3 * RAND1 *(test_var - RAND2))),
(Functions.Gaussian, test_var, {'standard_deviation':RAND1, 'bias':RAND2, 'scale':RAND3, 'offset':RAND4}, None, gaussian_helper),
(Functions.GaussianDistort, test_var.tolist(), {'bias': RAND1, 'variance':RAND2, 'offset':RAND3, 'scale':RAND4 }, None, gaussian_distort_helper(0)),
(Functions.GaussianDistort, test_var.tolist(), {'bias': RAND1, 'variance':RAND2, 'offset':RAND3, 'scale':RAND4, 'seed':0 }, None, gaussian_distort_helper(0)),
(Functions.SoftMax, test_var, {'gain':RAND1, 'per_item': False}, None, softmax_helper),
(Functions.SoftMax, test_var, {'gain':RAND1, 'params':{kw.OUTPUT_TYPE:kw.MAX_VAL}, 'per_item': False}, None, np.where(softmax_helper == np.max(softmax_helper), np.max(softmax_helper), 0)),
(Functions.SoftMax, test_var, {'gain':RAND1, 'params':{kw.OUTPUT_TYPE:kw.MAX_INDICATOR}, 'per_item': False}, None, np.where(softmax_helper == np.max(softmax_helper), 1, 0)),
(Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix.tolist()}, None, np.dot(test_var, test_matrix)),
(Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix_l.tolist()}, None, np.dot(test_var, test_matrix_l)),
(Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix_s.tolist()}, None, np.dot(test_var, test_matrix_s)),
]
# use list, naming function produces ugly names
names = [
"LINEAR",
"EXPONENTIAL",
"LOGISTIC",
"TANH",
"RELU",
"GAUSIAN",
"GAUSSIAN DISTORT GLOBAL SEED",
"GAUSSIAN DISTORT",
"SOFT_MAX ALL",
"SOFT_MAX MAX_VAL",
"SOFT_MAX MAX_INDICATOR",
"LINEAR_MATRIX SQUARE",
"LINEAR_MATRIX WIDE",
"LINEAR_MATRIX TALL",
]
@pytest.mark.function
@pytest.mark.transfer_function
@pytest.mark.parametrize("func, variable, params, fail, expected", test_data, ids=names)
@pytest.mark.benchmark
def test_basic(func, variable, params, fail, expected, benchmark):
f = func(default_variable=variable, **params)
benchmark.group = "TransferFunction " + func.componentName
res = f.function(variable)
benchmark(f.function, variable)
assert np.allclose(res, expected)
@pytest.mark.llvm
@pytest.mark.function
@pytest.mark.transfer_function
@pytest.mark.parametrize("func, variable, params, fail, expected", test_data, ids=names)
@pytest.mark.benchmark
def test_llvm(func, variable, params, fail, expected, benchmark):
if fail is not None:
pytest.xfail(fail)
f = func(default_variable=variable, **params)
benchmark.group = "TransferFunction " + func.componentName
m = pnlvm.execution.FuncExecution(f)
res = m.execute(variable)
benchmark(m.execute, variable)
assert np.allclose(res, expected)
@pytest.mark.llvm
@pytest.mark.cuda
@pytest.mark.function
@pytest.mark.transfer_function
@pytest.mark.parametrize("func, variable, params, fail, expected", test_data, ids=names)
@pytest.mark.benchmark
def test_ptx_cuda(func, variable, params, fail, expected, benchmark):
if fail is not None:
pytest.xfail(fail)
f = func(default_variable=variable, **params)
benchmark.group = "TransferFunction " + func.componentName
m = pnlvm.execution.FuncExecution(f)
res = m.cuda_execute(variable)
benchmark(m.cuda_execute, variable)
assert np.allclose(res, expected)
def test_transfer_with_costs_function():
from psyneulink.core.components.functions.transferfunctions import TransferWithCosts, CostFunctions
f = TransferWithCosts()
result = f(1)
assert np.allclose(result, 1)
f.toggle_cost_function(CostFunctions.INTENSITY)
f = TransferWithCosts(enabled_cost_functions=CostFunctions.INTENSITY)
result = f(2)
assert np.allclose(result, 2)
assert np.allclose(f.intensity_cost, 7.38905609893065)
assert f.adjustment_cost == None
assert f.duration_cost == None
assert np.allclose(np.float(f.combined_costs), 7.38905609893065)
f.toggle_cost_function(CostFunctions.ADJUSTMENT)
result = f(3)
assert np.allclose(result, 3)
assert np.allclose(f.intensity_cost, 20.085536923187668)
assert np.allclose(f.adjustment_cost, 1)
assert f.duration_cost == None
assert np.allclose(np.float(f.combined_costs), 21.085536923187668)
f.toggle_cost_function(CostFunctions.DURATION)
result = f(5)
assert np.allclose(result, 5)
assert np.allclose(f.intensity_cost, 148.413159102576603)
assert np.allclose(f.adjustment_cost, 2)
assert np.allclose(f.duration_cost, 5)
assert np.allclose(np.float(f.combined_costs), 155.413159102576603)
result = f(1)
assert np.allclose(result, 1)
assert np.allclose(f.intensity_cost, 2.718281828459045)
assert np.allclose(f.adjustment_cost, 4)
assert np.allclose(f.duration_cost, 6)
assert np.allclose(np.float(f.combined_costs), 12.718281828459045) | tests/functions/test_transfer.py | import numpy as np
import psyneulink.core.llvm as pnlvm
import psyneulink.core.components.functions.transferfunctions as Functions
import psyneulink.core.globals.keywords as kw
import pytest
from math import e, pi, sqrt
SIZE=5
test_var = np.random.rand(SIZE)
test_matrix = np.random.rand(SIZE, SIZE)
test_matrix_s = np.random.rand(SIZE, SIZE // 4)
test_matrix_l = np.random.rand(SIZE, 3 * SIZE)
RAND1 = np.random.rand()
RAND2 = np.random.rand()
RAND3 = np.random.rand()
RAND4 = np.random.rand()
softmax_helper = RAND1 * test_var
softmax_helper = softmax_helper - np.max(softmax_helper)
softmax_helper = np.exp(softmax_helper) / np.sum(np.exp(softmax_helper))
tanh_helper = -2 * (RAND1 * (test_var + RAND2 - RAND3) + RAND4)
tanh_helper = (1 - e**tanh_helper)/ (1 + e**tanh_helper)
gaussian_helper = e**(-(test_var - RAND2)**2 / (2 * RAND1**2)) / sqrt(2 * pi * RAND1)
gaussian_helper = RAND3 * gaussian_helper + RAND4
def gaussian_distort_helper(seed):
state = np.random.RandomState([seed])
# compensate for construction
state.normal(test_var + RAND1, RAND2)
return RAND4 * state.normal(test_var + RAND1, RAND2) + RAND3
test_data = [
(Functions.Linear, test_var, {'slope':RAND1, 'intercept':RAND2}, None, test_var * RAND1 + RAND2),
(Functions.Exponential, test_var, {'scale':RAND1, 'rate':RAND2}, None, RAND1 * np.exp(RAND2 * test_var)),
(Functions.Logistic, test_var, {'gain':RAND1, 'x_0':RAND2, 'offset':RAND3, 'scale':RAND4}, None, RAND4 / (1 + np.exp(-(RAND1 * (test_var - RAND2)) + RAND3))),
(Functions.Tanh, test_var, {'gain':RAND1, 'bias':RAND2, 'x_0':RAND3, 'offset':RAND4}, None, tanh_helper),
(Functions.ReLU, test_var, {'gain':RAND1, 'bias':RAND2, 'leak':RAND3}, None, np.maximum(RAND1 * (test_var - RAND2), RAND3 * RAND1 *(test_var - RAND2))),
(Functions.Gaussian, test_var, {'standard_deviation':RAND1, 'bias':RAND2, 'scale':RAND3, 'offset':RAND4}, None, gaussian_helper),
(Functions.GaussianDistort, test_var.tolist(), {'bias': RAND1, 'variance':RAND2, 'offset':RAND3, 'scale':RAND4 }, None, gaussian_distort_helper(0)),
(Functions.GaussianDistort, test_var.tolist(), {'bias': RAND1, 'variance':RAND2, 'offset':RAND3, 'scale':RAND4, 'seed':0 }, None, gaussian_distort_helper(0)),
(Functions.SoftMax, test_var, {'gain':RAND1, 'per_item': False}, None, softmax_helper),
(Functions.SoftMax, test_var, {'gain':RAND1, 'params':{kw.OUTPUT_TYPE:kw.MAX_VAL}, 'per_item': False}, None, np.where(softmax_helper == np.max(softmax_helper), np.max(softmax_helper), 0)),
(Functions.SoftMax, test_var, {'gain':RAND1, 'params':{kw.OUTPUT_TYPE:kw.MAX_INDICATOR}, 'per_item': False}, None, np.where(softmax_helper == np.max(softmax_helper), 1, 0)),
(Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix.tolist()}, None, np.dot(test_var, test_matrix)),
(Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix_l.tolist()}, None, np.dot(test_var, test_matrix_l)),
(Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix_s.tolist()}, None, np.dot(test_var, test_matrix_s)),
]
# use list, naming function produces ugly names
names = [
"LINEAR",
"EXPONENTIAL",
"LOGISTIC",
"TANH",
"RELU",
"GAUSIAN",
"GAUSSIAN DISTORT GLOBAL SEED",
"GAUSSIAN DISTORT",
"SOFT_MAX ALL",
"SOFT_MAX MAX_VAL",
"SOFT_MAX MAX_INDICATOR",
"LINEAR_MATRIX SQUARE",
"LINEAR_MATRIX WIDE",
"LINEAR_MATRIX TALL",
]
@pytest.mark.function
@pytest.mark.transfer_function
@pytest.mark.parametrize("func, variable, params, fail, expected", test_data, ids=names)
@pytest.mark.benchmark
def test_basic(func, variable, params, fail, expected, benchmark):
f = func(default_variable=variable, **params)
benchmark.group = "TransferFunction " + func.componentName
res = f.function(variable)
benchmark(f.function, variable)
assert np.allclose(res, expected)
@pytest.mark.llvm
@pytest.mark.function
@pytest.mark.transfer_function
@pytest.mark.parametrize("func, variable, params, fail, expected", test_data, ids=names)
@pytest.mark.benchmark
def test_llvm(func, variable, params, fail, expected, benchmark):
if fail is not None:
pytest.xfail(fail)
f = func(default_variable=variable, **params)
benchmark.group = "TransferFunction " + func.componentName
m = pnlvm.execution.FuncExecution(f)
res = m.execute(variable)
benchmark(m.execute, variable)
assert np.allclose(res, expected)
@pytest.mark.llvm
@pytest.mark.cuda
@pytest.mark.function
@pytest.mark.transfer_function
@pytest.mark.parametrize("func, variable, params, fail, expected", test_data, ids=names)
@pytest.mark.benchmark
def test_ptx_cuda(func, variable, params, fail, expected, benchmark):
if fail is not None:
pytest.xfail(fail)
f = func(default_variable=variable, **params)
benchmark.group = "TransferFunction " + func.componentName
m = pnlvm.execution.FuncExecution(f)
res = m.cuda_execute(variable)
benchmark(m.cuda_execute, variable)
assert np.allclose(res, expected)
def test_transfer_with_costs_function():
from psyneulink.core.components.functions.transferfunctions import TransferWithCosts, CostFunctions
f = TransferWithCosts()
result = f(1)
assert np.allclose(result, 1)
f.toggle_cost_function(CostFunctions.INTENSITY)
f = TransferWithCosts(enabled_cost_functions=CostFunctions.INTENSITY)
result = f(2)
assert np.allclose(result, 2)
assert np.allclose(f.intensity_cost, 7.38905609893065)
assert f.adjustment_cost == None
assert f.duration_cost == None
assert np.allclose(np.float(f.combined_costs), 7.38905609893065)
f.toggle_cost_function(CostFunctions.ADJUSTMENT)
result = f(3)
assert np.allclose(result, 3)
assert np.allclose(f.intensity_cost, 20.085536923187668)
assert np.allclose(f.adjustment_cost, 1)
assert f.duration_cost == None
assert np.allclose(np.float(f.combined_costs), 21.085536923187668)
f.toggle_cost_function(CostFunctions.DURATION)
result = f(5)
assert np.allclose(result, 5)
assert np.allclose(f.intensity_cost, 148.413159102576603)
assert np.allclose(f.adjustment_cost, 2)
assert np.allclose(f.duration_cost, 5)
assert np.allclose(np.float(f.combined_costs), 155.413159102576603)
result = f(1)
assert np.allclose(result, 1)
assert np.allclose(f.intensity_cost, 2.718281828459045)
assert np.allclose(f.adjustment_cost, 4)
assert np.allclose(f.duration_cost, 6)
assert np.allclose(np.float(f.combined_costs), 12.718281828459045) | 0.554712 | 0.394143 |
__all__ = [
"Schedule",
"Season",
"Team",
"team_from_str"
]
class Season(object):
"""Data model of types of seasons"""
POST = 'POST'
PRE = 'PRE'
PRO = 'PRO'
REGULAR = 'REG'
class Schedule(object):
"""Data model of a season's week's schedule"""
def __init__(self, season, week, stype=Season.REGULAR, data=None, games=None):
self.season = season
self.week = week
self.stype = stype
self.data = data
self.games = games
def __str__(self):
return self.data if self.games is None else str(self.games)
class Team(object):
"""Data model of a team"""
def __init__(self, code, name):
self._code = code
self._name = name
def __str__(self):
return '{} ({})'.format(self._name, self._code)
@property
def code(self):
return self._code
@property
def name(self):
return self._name
_STR_TO_TEAM_MAPPING = {}
_TEAM_METADATA = [
('SF', '49ers'),
('CHI', 'bears'),
('CIN', 'bengals'),
('BUF', 'bills'),
('DEN', 'broncos'),
('CLE', 'browns'),
('TB', 'buccaneers'),
('ARI', 'cardinals'),
('LAC', 'chargers'),
('KC', 'chiefs'),
('IND', 'colts'),
('DAL', 'cowboys'),
('MIA', 'dolphins'),
('PHI', 'eagles'),
('ATL', 'falcons'),
('NYG', 'giants'),
('JAX', 'jaguars'),
('NYJ', 'jets'),
('DET', 'lions'),
('GB', 'packers'),
('CAR', 'panthers'),
('OAK', 'raiders'),
('LA', 'rams'),
('BAL', 'ravens'),
('WAS', 'redskins'),
('NO', 'saints'),
('SEA', 'seahawks'),
('PIT', 'steelers'),
('HOU', 'texans'),
('TEN', 'titans'),
('NE', 'patriots'),
('MIN', 'vikings')
]
for team_metadatum in _TEAM_METADATA:
team = Team(*team_metadatum)
_STR_TO_TEAM_MAPPING[team_metadatum[0]] = team
_STR_TO_TEAM_MAPPING[team_metadatum[1]] = team
def team_from_str(team_str):
team = _STR_TO_TEAM_MAPPING.get(team_str)
if team is None:
raise RuntimeError('No team with code or name of `{}`'.format(team_str))
return team | ffpicker/data/models.py | __all__ = [
"Schedule",
"Season",
"Team",
"team_from_str"
]
class Season(object):
"""Data model of types of seasons"""
POST = 'POST'
PRE = 'PRE'
PRO = 'PRO'
REGULAR = 'REG'
class Schedule(object):
"""Data model of a season's week's schedule"""
def __init__(self, season, week, stype=Season.REGULAR, data=None, games=None):
self.season = season
self.week = week
self.stype = stype
self.data = data
self.games = games
def __str__(self):
return self.data if self.games is None else str(self.games)
class Team(object):
"""Data model of a team"""
def __init__(self, code, name):
self._code = code
self._name = name
def __str__(self):
return '{} ({})'.format(self._name, self._code)
@property
def code(self):
return self._code
@property
def name(self):
return self._name
_STR_TO_TEAM_MAPPING = {}
_TEAM_METADATA = [
('SF', '49ers'),
('CHI', 'bears'),
('CIN', 'bengals'),
('BUF', 'bills'),
('DEN', 'broncos'),
('CLE', 'browns'),
('TB', 'buccaneers'),
('ARI', 'cardinals'),
('LAC', 'chargers'),
('KC', 'chiefs'),
('IND', 'colts'),
('DAL', 'cowboys'),
('MIA', 'dolphins'),
('PHI', 'eagles'),
('ATL', 'falcons'),
('NYG', 'giants'),
('JAX', 'jaguars'),
('NYJ', 'jets'),
('DET', 'lions'),
('GB', 'packers'),
('CAR', 'panthers'),
('OAK', 'raiders'),
('LA', 'rams'),
('BAL', 'ravens'),
('WAS', 'redskins'),
('NO', 'saints'),
('SEA', 'seahawks'),
('PIT', 'steelers'),
('HOU', 'texans'),
('TEN', 'titans'),
('NE', 'patriots'),
('MIN', 'vikings')
]
for team_metadatum in _TEAM_METADATA:
team = Team(*team_metadatum)
_STR_TO_TEAM_MAPPING[team_metadatum[0]] = team
_STR_TO_TEAM_MAPPING[team_metadatum[1]] = team
def team_from_str(team_str):
team = _STR_TO_TEAM_MAPPING.get(team_str)
if team is None:
raise RuntimeError('No team with code or name of `{}`'.format(team_str))
return team | 0.750278 | 0.155399 |
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_almost_equal)
from nose.tools import assert_true, assert_raises
import warnings
from mne.datasets import testing
from mne import read_forward_solution
from mne.simulation import simulate_sparse_stc, simulate_evoked
from mne import read_cov
from mne.io import Raw
from mne import pick_types_forward, read_evokeds
from mne.utils import run_tests_if_main
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
ave_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
cov_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-cov.fif')
@testing.requires_testing_data
def test_simulate_evoked():
""" Test simulation of evoked data """
raw = Raw(raw_fname)
fwd = read_forward_solution(fwd_fname, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = read_cov(cov_fname)
evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info['bads'])
snr = 6 # dB
tmin = -0.1
sfreq = 1000. # Hz
tstep = 1. / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series for 2 dipoles
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times)
stc._data *= 1e-9
# Generate noisy evoked data
iir_filter = [1, -0.9]
evoked = simulate_evoked(fwd, stc, evoked_template.info, cov, snr,
tmin=0.0, tmax=0.2, iir_filter=iir_filter)
assert_array_almost_equal(evoked.times, stc.times)
assert_true(len(evoked.data) == len(fwd['sol']['data']))
# make a vertex that doesn't exist in fwd, should throw error
stc_bad = stc.copy()
mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
stc_bad.vertices[0][0] = mv + 1
assert_raises(RuntimeError, simulate_evoked, fwd, stc_bad,
evoked_template.info, cov, snr, tmin=0.0, tmax=0.2)
evoked_1 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
tmin=0.0, tmax=0.2)
evoked_2 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
tmin=0.0, tmax=0.2)
assert_array_equal(evoked_1.data, evoked_2.data)
# test snr definition in dB
evoked_noise = simulate_evoked(fwd, stc, evoked_template.info, cov,
snr=snr, tmin=None, tmax=None,
iir_filter=None)
evoked_clean = simulate_evoked(fwd, stc, evoked_template.info, cov,
snr=np.inf, tmin=None, tmax=None,
iir_filter=None)
noise = evoked_noise.data - evoked_clean.data
empirical_snr = 10 * np.log10(np.mean((evoked_clean.data ** 2).ravel()) /
np.mean((noise ** 2).ravel()))
assert_almost_equal(snr, empirical_snr, decimal=5)
cov['names'] = cov.ch_names[:-2] # Error channels are different.
assert_raises(ValueError, simulate_evoked, fwd, stc, evoked_template.info,
cov, snr=3., tmin=None, tmax=None, iir_filter=None)
run_tests_if_main() | mne/simulation/tests/test_evoked.py |
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_almost_equal)
from nose.tools import assert_true, assert_raises
import warnings
from mne.datasets import testing
from mne import read_forward_solution
from mne.simulation import simulate_sparse_stc, simulate_evoked
from mne import read_cov
from mne.io import Raw
from mne import pick_types_forward, read_evokeds
from mne.utils import run_tests_if_main
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
ave_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
cov_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-cov.fif')
@testing.requires_testing_data
def test_simulate_evoked():
""" Test simulation of evoked data """
raw = Raw(raw_fname)
fwd = read_forward_solution(fwd_fname, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = read_cov(cov_fname)
evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info['bads'])
snr = 6 # dB
tmin = -0.1
sfreq = 1000. # Hz
tstep = 1. / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series for 2 dipoles
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times)
stc._data *= 1e-9
# Generate noisy evoked data
iir_filter = [1, -0.9]
evoked = simulate_evoked(fwd, stc, evoked_template.info, cov, snr,
tmin=0.0, tmax=0.2, iir_filter=iir_filter)
assert_array_almost_equal(evoked.times, stc.times)
assert_true(len(evoked.data) == len(fwd['sol']['data']))
# make a vertex that doesn't exist in fwd, should throw error
stc_bad = stc.copy()
mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
stc_bad.vertices[0][0] = mv + 1
assert_raises(RuntimeError, simulate_evoked, fwd, stc_bad,
evoked_template.info, cov, snr, tmin=0.0, tmax=0.2)
evoked_1 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
tmin=0.0, tmax=0.2)
evoked_2 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
tmin=0.0, tmax=0.2)
assert_array_equal(evoked_1.data, evoked_2.data)
# test snr definition in dB
evoked_noise = simulate_evoked(fwd, stc, evoked_template.info, cov,
snr=snr, tmin=None, tmax=None,
iir_filter=None)
evoked_clean = simulate_evoked(fwd, stc, evoked_template.info, cov,
snr=np.inf, tmin=None, tmax=None,
iir_filter=None)
noise = evoked_noise.data - evoked_clean.data
empirical_snr = 10 * np.log10(np.mean((evoked_clean.data ** 2).ravel()) /
np.mean((noise ** 2).ravel()))
assert_almost_equal(snr, empirical_snr, decimal=5)
cov['names'] = cov.ch_names[:-2] # Error channels are different.
assert_raises(ValueError, simulate_evoked, fwd, stc, evoked_template.info,
cov, snr=3., tmin=None, tmax=None, iir_filter=None)
run_tests_if_main() | 0.567218 | 0.574275 |
import pytest
from decouple import config
@pytest.fixture
def ipnetworkxmlschema():
from lxml import etree
schema = config('EPPSCHEMAPATH', '../../../schemas') + '/ipnetwork-1.0.xsd'
xmlschema_doc = etree.parse(schema)
return etree.XMLSchema(xmlschema_doc)
@pytest.fixture
def checkipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<check>
<ipnetwork:check xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.0.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
</ipnetwork:check>
</check>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsecheckipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<ipnetwork:chkData xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:cd>
<ipnetwork:ipRange avail="0" version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.0.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:reason>In use</ipnetwork:reason>
</ipnetwork:cd>
</ipnetwork:chkData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def createipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<create>
<ipnetwork:create xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.31.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:organization>BR-ABC-LACNIC</ipnetwork:organization>
<ipnetwork:allocType>assignment</ipnetwork:allocType>
<ipnetwork:contact type="admin">ABC123</ipnetwork:contact>
<ipnetwork:reverseDNS>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.17.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:hostName>a.example.com</ipnetwork:hostName>
<ipnetwork:hostName>b.example.com</ipnetwork:hostName>
</ipnetwork:reverseDNS>
<ipnetwork:dsData>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.16.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:keyTag>12345</ipnetwork:keyTag>
<ipnetwork:alg>3</ipnetwork:alg>
<ipnetwork:digestType>1</ipnetwork:digestType>
<ipnetwork:digest>49FD46E6C4B45C55D4AC</ipnetwork:digest>
</ipnetwork:dsData>
</ipnetwork:create>
</create>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsecreateipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<ipnetwork:creData xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.31.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
<ipnetwork:crDate>1999-04-03T22:00:00.0Z</ipnetwork:crDate>
</ipnetwork:creData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def deleteipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<delete>
<ipnetwork:delete xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
</ipnetwork:delete>
</delete>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsedeleteipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def infoipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<info>
<ipnetwork:info xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.15.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
</ipnetwork:info>
</info>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responseinfoipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<ipnetwork:infData xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.15.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:ipRangeInfo>
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
<ipnetwork:allocType>allocation</ipnetwork:allocType>
<ipnetwork:organization>BR-ABC-LACNIC</ipnetwork:organization>
<ipnetwork:contact type="admin">HKK</ipnetwork:contact>
<ipnetwork:reverseDNS>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.0.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:hostName>a.example.com</ipnetwork:hostName>
<ipnetwork:hostName>b.example.com</ipnetwork:hostName>
</ipnetwork:reverseDNS>
<ipnetwork:reverseDNS>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.2.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.2.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:hostName>d.example.com</ipnetwork:hostName>
<ipnetwork:hostName>e.example.com</ipnetwork:hostName>
</ipnetwork:reverseDNS>
<ipnetwork:dsData>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.0.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:keyTag>12345</ipnetwork:keyTag>
<ipnetwork:alg>3</ipnetwork:alg>
<ipnetwork:digestType>1</ipnetwork:digestType>
<ipnetwork:digest>49FD46E6C4B45C55D4AC</ipnetwork:digest>
</ipnetwork:dsData>
<ipnetwork:dsData>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.2.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.2.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:keyTag>54321</ipnetwork:keyTag>
<ipnetwork:alg>3</ipnetwork:alg>
<ipnetwork:digestType>1</ipnetwork:digestType>
<ipnetwork:digest>49FD46E6C4B45C55D4AC</ipnetwork:digest>
</ipnetwork:dsData>
<ipnetwork:parentNetwork>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.255.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:roid>b_12345-LACNIC</ipnetwork:roid>
</ipnetwork:parentNetwork>
<ipnetwork:childNetwork>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.0.127</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:roid>b_234567-LACNIC</ipnetwork:roid>
</ipnetwork:childNetwork>
<ipnetwork:clID>ClientY</ipnetwork:clID>
<ipnetwork:crID>ClientX</ipnetwork:crID>
<ipnetwork:crDate>1999-04-03T22:00:00.0Z</ipnetwork:crDate>
<ipnetwork:upID>ClientX</ipnetwork:upID>
<ipnetwork:upDate>1999-12-03T09:00:00.0Z</ipnetwork:upDate>
</ipnetwork:ipRangeInfo>
</ipnetwork:infData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def renewipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<renew>
<ipnetwork:renew xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_12345-LACNIC</ipnetwork:roid>
<ipnetwork:curExpDate>2008-04-03T00:00:00.0Z</ipnetwork:curExpDate>
<ipnetwork:period unit="y">3</ipnetwork:period>
</ipnetwork:renew>
</renew>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responserenewipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<ipnetwork:renData xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_12345-LACNIC</ipnetwork:roid>
<ipnetwork:exDate>2011-04-03T00:00:00.0Z</ipnetwork:exDate>
</ipnetwork:renData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def transferrequestipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<transfer op="request">
<ipnetwork:transfer xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_12345-LACNIC</ipnetwork:roid>
</ipnetwork:transfer>
</transfer>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsetransferipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<ipnetwork:trnData xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_12345-LACNIC</ipnetwork:roid>
<ipnetwork:trStatus>pending</ipnetwork:trStatus>
<ipnetwork:reID>ClientX</ipnetwork:reID>
<ipnetwork:reDate>2000-06-08T22:00:00.0Z</ipnetwork:reDate>
<ipnetwork:acID>ClientY</ipnetwork:acID>
<ipnetwork:acDate>2000-06-13T22:00:00.0Z</ipnetwork:acDate>
</ipnetwork:trnData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def updateipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<update>
<ipnetwork:update xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
<ipnetwork:add>
<ipnetwork:dsData>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.16.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:keyTag>12345</ipnetwork:keyTag>
<ipnetwork:alg>3</ipnetwork:alg>
<ipnetwork:digestType>1</ipnetwork:digestType>
<ipnetwork:digest>49FD46E6C4B45C55D4AC</ipnetwork:digest>
</ipnetwork:dsData>
<ipnetwork:contact type="tech">AAA1</ipnetwork:contact>
</ipnetwork:add>
<ipnetwork:rem>
<ipnetwork:dsData>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.16.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:keyTag>12345</ipnetwork:keyTag>
<ipnetwork:alg>3</ipnetwork:alg>
<ipnetwork:digestType>1</ipnetwork:digestType>
<ipnetwork:digest>49FD46E6C4B45C55D4AC</ipnetwork:digest>
</ipnetwork:dsData>
<ipnetwork:contact type="abuse">AAA1</ipnetwork:contact>
</ipnetwork:rem>
<ipnetwork:chg>
<ipnetwork:organization>BR-DEF-LACNIC</ipnetwork:organization>
<ipnetwork:allocType>assignment</ipnetwork:allocType>
<ipnetwork:asn>2</ipnetwork:asn>
</ipnetwork:chg>
<ipnetwork:aggr>
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
<ipnetwork:hostName>a.a.com</ipnetwork:hostName>
</ipnetwork:aggr>
<ipnetwork:creation_date>2011-01-27T00:00:00.0Z</ipnetwork:creation_date>
</ipnetwork:update>
</update>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responseupdateipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
""" | tests/unit/ipnetwork/conftest.py | import pytest
from decouple import config
@pytest.fixture
def ipnetworkxmlschema():
from lxml import etree
schema = config('EPPSCHEMAPATH', '../../../schemas') + '/ipnetwork-1.0.xsd'
xmlschema_doc = etree.parse(schema)
return etree.XMLSchema(xmlschema_doc)
@pytest.fixture
def checkipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<check>
<ipnetwork:check xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.0.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
</ipnetwork:check>
</check>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsecheckipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<ipnetwork:chkData xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:cd>
<ipnetwork:ipRange avail="0" version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.0.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:reason>In use</ipnetwork:reason>
</ipnetwork:cd>
</ipnetwork:chkData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def createipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<create>
<ipnetwork:create xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.31.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:organization>BR-ABC-LACNIC</ipnetwork:organization>
<ipnetwork:allocType>assignment</ipnetwork:allocType>
<ipnetwork:contact type="admin">ABC123</ipnetwork:contact>
<ipnetwork:reverseDNS>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.17.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:hostName>a.example.com</ipnetwork:hostName>
<ipnetwork:hostName>b.example.com</ipnetwork:hostName>
</ipnetwork:reverseDNS>
<ipnetwork:dsData>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.16.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:keyTag>12345</ipnetwork:keyTag>
<ipnetwork:alg>3</ipnetwork:alg>
<ipnetwork:digestType>1</ipnetwork:digestType>
<ipnetwork:digest>49FD46E6C4B45C55D4AC</ipnetwork:digest>
</ipnetwork:dsData>
</ipnetwork:create>
</create>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsecreateipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<ipnetwork:creData xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.31.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
<ipnetwork:crDate>1999-04-03T22:00:00.0Z</ipnetwork:crDate>
</ipnetwork:creData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def deleteipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<delete>
<ipnetwork:delete xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
</ipnetwork:delete>
</delete>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsedeleteipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def infoipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<info>
<ipnetwork:info xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.15.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
</ipnetwork:info>
</info>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responseinfoipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<ipnetwork:infData xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.15.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:ipRangeInfo>
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
<ipnetwork:allocType>allocation</ipnetwork:allocType>
<ipnetwork:organization>BR-ABC-LACNIC</ipnetwork:organization>
<ipnetwork:contact type="admin">HKK</ipnetwork:contact>
<ipnetwork:reverseDNS>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.0.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:hostName>a.example.com</ipnetwork:hostName>
<ipnetwork:hostName>b.example.com</ipnetwork:hostName>
</ipnetwork:reverseDNS>
<ipnetwork:reverseDNS>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.2.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.2.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:hostName>d.example.com</ipnetwork:hostName>
<ipnetwork:hostName>e.example.com</ipnetwork:hostName>
</ipnetwork:reverseDNS>
<ipnetwork:dsData>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.0.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:keyTag>12345</ipnetwork:keyTag>
<ipnetwork:alg>3</ipnetwork:alg>
<ipnetwork:digestType>1</ipnetwork:digestType>
<ipnetwork:digest>49FD46E6C4B45C55D4AC</ipnetwork:digest>
</ipnetwork:dsData>
<ipnetwork:dsData>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.2.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.2.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:keyTag>54321</ipnetwork:keyTag>
<ipnetwork:alg>3</ipnetwork:alg>
<ipnetwork:digestType>1</ipnetwork:digestType>
<ipnetwork:digest>49FD46E6C4B45C55D4AC</ipnetwork:digest>
</ipnetwork:dsData>
<ipnetwork:parentNetwork>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.255.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:roid>b_12345-LACNIC</ipnetwork:roid>
</ipnetwork:parentNetwork>
<ipnetwork:childNetwork>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.0.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.0.127</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:roid>b_234567-LACNIC</ipnetwork:roid>
</ipnetwork:childNetwork>
<ipnetwork:clID>ClientY</ipnetwork:clID>
<ipnetwork:crID>ClientX</ipnetwork:crID>
<ipnetwork:crDate>1999-04-03T22:00:00.0Z</ipnetwork:crDate>
<ipnetwork:upID>ClientX</ipnetwork:upID>
<ipnetwork:upDate>1999-12-03T09:00:00.0Z</ipnetwork:upDate>
</ipnetwork:ipRangeInfo>
</ipnetwork:infData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def renewipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<renew>
<ipnetwork:renew xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_12345-LACNIC</ipnetwork:roid>
<ipnetwork:curExpDate>2008-04-03T00:00:00.0Z</ipnetwork:curExpDate>
<ipnetwork:period unit="y">3</ipnetwork:period>
</ipnetwork:renew>
</renew>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responserenewipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<ipnetwork:renData xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_12345-LACNIC</ipnetwork:roid>
<ipnetwork:exDate>2011-04-03T00:00:00.0Z</ipnetwork:exDate>
</ipnetwork:renData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def transferrequestipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<transfer op="request">
<ipnetwork:transfer xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_12345-LACNIC</ipnetwork:roid>
</ipnetwork:transfer>
</transfer>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsetransferipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<ipnetwork:trnData xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_12345-LACNIC</ipnetwork:roid>
<ipnetwork:trStatus>pending</ipnetwork:trStatus>
<ipnetwork:reID>ClientX</ipnetwork:reID>
<ipnetwork:reDate>2000-06-08T22:00:00.0Z</ipnetwork:reDate>
<ipnetwork:acID>ClientY</ipnetwork:acID>
<ipnetwork:acDate>2000-06-13T22:00:00.0Z</ipnetwork:acDate>
</ipnetwork:trnData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def updateipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<update>
<ipnetwork:update xmlns:ipnetwork="urn:ietf:params:xml:ns:ipnetwork-1.0">
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
<ipnetwork:add>
<ipnetwork:dsData>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.16.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:keyTag>12345</ipnetwork:keyTag>
<ipnetwork:alg>3</ipnetwork:alg>
<ipnetwork:digestType>1</ipnetwork:digestType>
<ipnetwork:digest>49FD46E6C4B45C55D4AC</ipnetwork:digest>
</ipnetwork:dsData>
<ipnetwork:contact type="tech">AAA1</ipnetwork:contact>
</ipnetwork:add>
<ipnetwork:rem>
<ipnetwork:dsData>
<ipnetwork:ipRange version="v4">
<ipnetwork:startAddress>192.168.16.0</ipnetwork:startAddress>
<ipnetwork:endAddress>192.168.16.255</ipnetwork:endAddress>
</ipnetwork:ipRange>
<ipnetwork:keyTag>12345</ipnetwork:keyTag>
<ipnetwork:alg>3</ipnetwork:alg>
<ipnetwork:digestType>1</ipnetwork:digestType>
<ipnetwork:digest>49FD46E6C4B45C55D4AC</ipnetwork:digest>
</ipnetwork:dsData>
<ipnetwork:contact type="abuse">AAA1</ipnetwork:contact>
</ipnetwork:rem>
<ipnetwork:chg>
<ipnetwork:organization>BR-DEF-LACNIC</ipnetwork:organization>
<ipnetwork:allocType>assignment</ipnetwork:allocType>
<ipnetwork:asn>2</ipnetwork:asn>
</ipnetwork:chg>
<ipnetwork:aggr>
<ipnetwork:roid>b_123456-LACNIC</ipnetwork:roid>
<ipnetwork:hostName>a.a.com</ipnetwork:hostName>
</ipnetwork:aggr>
<ipnetwork:creation_date>2011-01-27T00:00:00.0Z</ipnetwork:creation_date>
</ipnetwork:update>
</update>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responseupdateipnetworkcommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
""" | 0.420243 | 0.188567 |
import csv
import glob
import unittest
import numpy as np
import libpandasafety_py
MAX_RATE_UP = 3
MAX_RATE_DOWN = 3
MAX_STEER = 261
MAX_RT_DELTA = 112
RT_INTERVAL = 250000
MAX_TORQUE_ERROR = 80
def twos_comp(val, bits):
if val >= 0:
return val
else:
return (2**bits) + val
def sign(a):
if a > 0:
return 1
else:
return -1
def swap_bytes(data_str):
"""Accepts string with hex, returns integer with order swapped for CAN."""
a = int(data_str, 16)
return ((a & 0xff) << 24) + ((a & 0xff00) << 8) + ((a & 0x00ff0000) >> 8) + ((a & 0xff000000) >> 24)
class TestChryslerSafety(unittest.TestCase):
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.nooutput_init(0)
cls.safety.init_tests_chrysler()
def _button_msg(self, buttons):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 1265 << 21
to_send[0].RDLR = buttons
return to_send
def _set_prev_torque(self, t):
self.safety.set_chrysler_desired_torque_last(t)
self.safety.set_chrysler_rt_torque_last(t)
self.safety.set_chrysler_torque_meas(t, t)
def _torque_meas_msg(self, torque):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 544 << 21
to_send[0].RDHR = ((torque + 1024) >> 8) + (((torque + 1024) & 0xff) << 8)
return to_send
def _torque_msg(self, torque):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x292 << 21
to_send[0].RDLR = ((torque + 1024) >> 8) + (((torque + 1024) & 0xff) << 8)
return to_send
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_steer_safety_check(self):
for enabled in [0, 1]:
for t in range(-MAX_STEER*2, MAX_STEER*2):
self.safety.set_controls_allowed(enabled)
self._set_prev_torque(t)
if abs(t) > MAX_STEER or (not enabled and abs(t) > 0):
self.assertFalse(self.safety.chrysler_tx_hook(self._torque_msg(t)))
else:
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(t)))
def test_manually_enable_controls_allowed(self):
self.safety.set_controls_allowed(1)
self.assertTrue(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(0)
self.assertFalse(self.safety.get_controls_allowed())
def test_enable_control_allowed_from_cruise(self):
to_push = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_push[0].RIR = 0x309 << 21 #1f4
to_push[0].RDLR = 0x380000
self.safety.chrysler_rx_hook(to_push)
self.assertTrue(self.safety.get_controls_allowed())
def test_disable_control_allowed_from_cruise(self):
to_push = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_push[0].RIR = 0x309 << 21 #1f4
to_push[0].RDLR = 0
self.safety.set_controls_allowed(1)
self.safety.chrysler_rx_hook(to_push)
self.assertFalse(self.safety.get_controls_allowed())
def test_non_realtime_limit_up(self):
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertFalse(self.safety.chrysler_tx_hook(self._torque_msg(MAX_RATE_UP + 1)))
def test_non_realtime_limit_down(self):
self.safety.set_controls_allowed(True)
self.safety.set_chrysler_rt_torque_last(MAX_STEER)
torque_meas = MAX_STEER - MAX_TORQUE_ERROR - 20
self.safety.set_chrysler_torque_meas(torque_meas, torque_meas)
self.safety.set_chrysler_desired_torque_last(MAX_STEER)
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(MAX_STEER - MAX_RATE_DOWN)))
self.safety.set_chrysler_rt_torque_last(MAX_STEER)
self.safety.set_chrysler_torque_meas(torque_meas, torque_meas)
self.safety.set_chrysler_desired_torque_last(MAX_STEER)
self.assertFalse(self.safety.chrysler_tx_hook(self._torque_msg(MAX_STEER - MAX_RATE_DOWN + 1)))
def test_exceed_torque_sensor(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self._set_prev_torque(0)
for t in np.arange(0, MAX_TORQUE_ERROR + 2, 2): # step needs to be smaller than MAX_TORQUE_ERROR
t *= sign
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(t)))
self.assertFalse(self.safety.chrysler_tx_hook(self._torque_msg(sign * (MAX_TORQUE_ERROR + 2))))
def test_realtime_limit_up(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self.safety.init_tests_chrysler()
self._set_prev_torque(0)
for t in np.arange(0, MAX_RT_DELTA+1, 1):
t *= sign
self.safety.set_chrysler_torque_meas(t, t)
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(t)))
self.assertFalse(self.safety.chrysler_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
self._set_prev_torque(0)
for t in np.arange(0, MAX_RT_DELTA+1, 1):
t *= sign
self.safety.set_chrysler_torque_meas(t, t)
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(t)))
# Increase timer to update rt_torque_last
self.safety.set_timer(RT_INTERVAL + 1)
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(sign * MAX_RT_DELTA)))
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
def test_torque_measurements(self):
self.safety.chrysler_rx_hook(self._torque_meas_msg(50))
self.safety.chrysler_rx_hook(self._torque_meas_msg(-50))
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.assertEqual(-50, self.safety.get_chrysler_torque_meas_min())
self.assertEqual(50, self.safety.get_chrysler_torque_meas_max())
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.assertEqual(0, self.safety.get_chrysler_torque_meas_max())
self.assertEqual(-50, self.safety.get_chrysler_torque_meas_min())
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.assertEqual(0, self.safety.get_chrysler_torque_meas_max())
self.assertEqual(0, self.safety.get_chrysler_torque_meas_min())
def _replay_drive(self, csv_reader):
for row in csv_reader:
if len(row) != 4: # sometimes truncated at end of the file
continue
if row[0] == 'time': # skip CSV header
continue
addr = int(row[1])
bus = int(row[2])
data_str = row[3] # Example '081407ff0806e06f'
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = addr << 21
to_send[0].RDHR = swap_bytes(data_str[8:])
to_send[0].RDLR = swap_bytes(data_str[:8])
if (bus == 128):
self.assertTrue(self.safety.chrysler_tx_hook(to_send), msg=row)
else:
self.safety.chrysler_rx_hook(to_send)
def test_replay_drive(self):
# In Cabana, click "Save Log" and then put the downloaded CSV in this directory.
test_files = glob.glob('chrysler_*.csv')
for filename in test_files:
print 'testing %s' % filename
with open(filename) as csvfile:
reader = csv.reader(csvfile)
self._replay_drive(reader)
if __name__ == "__main__":
unittest.main() | panda/tests/safety/test_chrysler.py | import csv
import glob
import unittest
import numpy as np
import libpandasafety_py
MAX_RATE_UP = 3
MAX_RATE_DOWN = 3
MAX_STEER = 261
MAX_RT_DELTA = 112
RT_INTERVAL = 250000
MAX_TORQUE_ERROR = 80
def twos_comp(val, bits):
if val >= 0:
return val
else:
return (2**bits) + val
def sign(a):
if a > 0:
return 1
else:
return -1
def swap_bytes(data_str):
"""Accepts string with hex, returns integer with order swapped for CAN."""
a = int(data_str, 16)
return ((a & 0xff) << 24) + ((a & 0xff00) << 8) + ((a & 0x00ff0000) >> 8) + ((a & 0xff000000) >> 24)
class TestChryslerSafety(unittest.TestCase):
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.nooutput_init(0)
cls.safety.init_tests_chrysler()
def _button_msg(self, buttons):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 1265 << 21
to_send[0].RDLR = buttons
return to_send
def _set_prev_torque(self, t):
self.safety.set_chrysler_desired_torque_last(t)
self.safety.set_chrysler_rt_torque_last(t)
self.safety.set_chrysler_torque_meas(t, t)
def _torque_meas_msg(self, torque):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 544 << 21
to_send[0].RDHR = ((torque + 1024) >> 8) + (((torque + 1024) & 0xff) << 8)
return to_send
def _torque_msg(self, torque):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x292 << 21
to_send[0].RDLR = ((torque + 1024) >> 8) + (((torque + 1024) & 0xff) << 8)
return to_send
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_steer_safety_check(self):
for enabled in [0, 1]:
for t in range(-MAX_STEER*2, MAX_STEER*2):
self.safety.set_controls_allowed(enabled)
self._set_prev_torque(t)
if abs(t) > MAX_STEER or (not enabled and abs(t) > 0):
self.assertFalse(self.safety.chrysler_tx_hook(self._torque_msg(t)))
else:
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(t)))
def test_manually_enable_controls_allowed(self):
self.safety.set_controls_allowed(1)
self.assertTrue(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(0)
self.assertFalse(self.safety.get_controls_allowed())
def test_enable_control_allowed_from_cruise(self):
to_push = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_push[0].RIR = 0x309 << 21 #1f4
to_push[0].RDLR = 0x380000
self.safety.chrysler_rx_hook(to_push)
self.assertTrue(self.safety.get_controls_allowed())
def test_disable_control_allowed_from_cruise(self):
to_push = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_push[0].RIR = 0x309 << 21 #1f4
to_push[0].RDLR = 0
self.safety.set_controls_allowed(1)
self.safety.chrysler_rx_hook(to_push)
self.assertFalse(self.safety.get_controls_allowed())
def test_non_realtime_limit_up(self):
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertFalse(self.safety.chrysler_tx_hook(self._torque_msg(MAX_RATE_UP + 1)))
def test_non_realtime_limit_down(self):
self.safety.set_controls_allowed(True)
self.safety.set_chrysler_rt_torque_last(MAX_STEER)
torque_meas = MAX_STEER - MAX_TORQUE_ERROR - 20
self.safety.set_chrysler_torque_meas(torque_meas, torque_meas)
self.safety.set_chrysler_desired_torque_last(MAX_STEER)
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(MAX_STEER - MAX_RATE_DOWN)))
self.safety.set_chrysler_rt_torque_last(MAX_STEER)
self.safety.set_chrysler_torque_meas(torque_meas, torque_meas)
self.safety.set_chrysler_desired_torque_last(MAX_STEER)
self.assertFalse(self.safety.chrysler_tx_hook(self._torque_msg(MAX_STEER - MAX_RATE_DOWN + 1)))
def test_exceed_torque_sensor(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self._set_prev_torque(0)
for t in np.arange(0, MAX_TORQUE_ERROR + 2, 2): # step needs to be smaller than MAX_TORQUE_ERROR
t *= sign
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(t)))
self.assertFalse(self.safety.chrysler_tx_hook(self._torque_msg(sign * (MAX_TORQUE_ERROR + 2))))
def test_realtime_limit_up(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self.safety.init_tests_chrysler()
self._set_prev_torque(0)
for t in np.arange(0, MAX_RT_DELTA+1, 1):
t *= sign
self.safety.set_chrysler_torque_meas(t, t)
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(t)))
self.assertFalse(self.safety.chrysler_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
self._set_prev_torque(0)
for t in np.arange(0, MAX_RT_DELTA+1, 1):
t *= sign
self.safety.set_chrysler_torque_meas(t, t)
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(t)))
# Increase timer to update rt_torque_last
self.safety.set_timer(RT_INTERVAL + 1)
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(sign * MAX_RT_DELTA)))
self.assertTrue(self.safety.chrysler_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
def test_torque_measurements(self):
self.safety.chrysler_rx_hook(self._torque_meas_msg(50))
self.safety.chrysler_rx_hook(self._torque_meas_msg(-50))
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.assertEqual(-50, self.safety.get_chrysler_torque_meas_min())
self.assertEqual(50, self.safety.get_chrysler_torque_meas_max())
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.assertEqual(0, self.safety.get_chrysler_torque_meas_max())
self.assertEqual(-50, self.safety.get_chrysler_torque_meas_min())
self.safety.chrysler_rx_hook(self._torque_meas_msg(0))
self.assertEqual(0, self.safety.get_chrysler_torque_meas_max())
self.assertEqual(0, self.safety.get_chrysler_torque_meas_min())
def _replay_drive(self, csv_reader):
for row in csv_reader:
if len(row) != 4: # sometimes truncated at end of the file
continue
if row[0] == 'time': # skip CSV header
continue
addr = int(row[1])
bus = int(row[2])
data_str = row[3] # Example '081407ff0806e06f'
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = addr << 21
to_send[0].RDHR = swap_bytes(data_str[8:])
to_send[0].RDLR = swap_bytes(data_str[:8])
if (bus == 128):
self.assertTrue(self.safety.chrysler_tx_hook(to_send), msg=row)
else:
self.safety.chrysler_rx_hook(to_send)
def test_replay_drive(self):
# In Cabana, click "Save Log" and then put the downloaded CSV in this directory.
test_files = glob.glob('chrysler_*.csv')
for filename in test_files:
print 'testing %s' % filename
with open(filename) as csvfile:
reader = csv.reader(csvfile)
self._replay_drive(reader)
if __name__ == "__main__":
unittest.main() | 0.483161 | 0.320077 |
from itertools import chain
from collections import namedtuple
from .lazyre import LazyReCompile
LinePart = namedtuple("LinePart", ["start", "stop", "word"])
current_word_re = LazyReCompile(r"(?<![)\]\w_.])" r"([\w_][\w0-9._]*[(]?)")
def current_word(cursor_offset, line):
"""the object.attribute.attribute just before or under the cursor"""
pos = cursor_offset
matches = current_word_re.finditer(line)
start = pos
end = pos
word = None
for m in matches:
if m.start(1) < pos and m.end(1) >= pos:
start = m.start(1)
end = m.end(1)
word = m.group(1)
if word is None:
return None
return LinePart(start, end, word)
current_dict_key_re = LazyReCompile(r"""[\w_][\w0-9._]*\[([\w0-9._(), '"]*)""")
def current_dict_key(cursor_offset, line):
"""If in dictionary completion, return the current key"""
matches = current_dict_key_re.finditer(line)
for m in matches:
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_dict_re = LazyReCompile(r"""([\w_][\w0-9._]*)\[([\w0-9._(), '"]*)""")
def current_dict(cursor_offset, line):
"""If in dictionary completion, return the dict that should be used"""
matches = current_dict_re.finditer(line)
for m in matches:
if m.start(2) <= cursor_offset and m.end(2) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_string_re = LazyReCompile(
'''(?P<open>(?:""")|"|(?:''\')|')(?:((?P<closed>.+?)(?P=open))|'''
"""(?P<unclosed>.+))"""
)
def current_string(cursor_offset, line):
"""If inside a string of nonzero length, return the string (excluding
quotes)
Weaker than bpython.Repl's current_string, because that checks that a
string is a string based on previous lines in the buffer."""
for m in current_string_re.finditer(line):
i = 3 if m.group(3) else 4
if m.start(i) <= cursor_offset and m.end(i) >= cursor_offset:
return LinePart(m.start(i), m.end(i), m.group(i))
return None
current_object_re = LazyReCompile(r"([\w_][\w0-9_]*)[.]")
def current_object(cursor_offset, line):
"""If in attribute completion, the object on which attribute should be
looked up."""
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
matches = current_object_re.finditer(word)
s = ""
for m in matches:
if m.end(1) + start < cursor_offset:
if s:
s += "."
s += m.group(1)
if not s:
return None
return LinePart(start, start + len(s), s)
current_object_attribute_re = LazyReCompile(r"([\w_][\w0-9_]*)[.]?")
def current_object_attribute(cursor_offset, line):
"""If in attribute completion, the attribute being completed"""
# TODO replace with more general current_expression_attribute
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
matches = current_object_attribute_re.finditer(word)
next(matches)
for m in matches:
if (
m.start(1) + start <= cursor_offset
and m.end(1) + start >= cursor_offset
):
return LinePart(m.start(1) + start, m.end(1) + start, m.group(1))
return None
current_from_import_from_re = LazyReCompile(
r"from ([\w0-9_.]*)(?:\s+import\s+([\w0-9_]+[,]?\s*)+)*"
)
def current_from_import_from(cursor_offset, line):
"""If in from import completion, the word after from
returns None if cursor not in or just after one of the two interesting
parts of an import: from (module) import (name1, name2)
"""
# TODO allow for as's
tokens = line.split()
if not ("from" in tokens or "import" in tokens):
return None
matches = current_from_import_from_re.finditer(line)
for m in matches:
if (m.start(1) < cursor_offset and m.end(1) >= cursor_offset) or (
m.start(2) < cursor_offset and m.end(2) >= cursor_offset
):
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_from_import_import_re_1 = LazyReCompile(r"from\s([\w0-9_.]*)\s+import")
current_from_import_import_re_2 = LazyReCompile(r"([\w0-9_]+)")
current_from_import_import_re_3 = LazyReCompile(r"[,][ ]([\w0-9_]*)")
def current_from_import_import(cursor_offset, line):
"""If in from import completion, the word after import being completed
returns None if cursor not in or just after one of these words
"""
baseline = current_from_import_import_re_1.search(line)
if baseline is None:
return None
match1 = current_from_import_import_re_2.search(line[baseline.end() :])
if match1 is None:
return None
matches = current_from_import_import_re_3.finditer(line[baseline.end() :])
for m in chain((match1,), matches):
start = baseline.end() + m.start(1)
end = baseline.end() + m.end(1)
if start < cursor_offset and end >= cursor_offset:
return LinePart(start, end, m.group(1))
return None
current_import_re_1 = LazyReCompile(r"import")
current_import_re_2 = LazyReCompile(r"([\w0-9_.]+)")
current_import_re_3 = LazyReCompile(r"[,][ ]([\w0-9_.]*)")
def current_import(cursor_offset, line):
# TODO allow for multiple as's
baseline = current_import_re_1.search(line)
if baseline is None:
return None
match1 = current_import_re_2.search(line[baseline.end() :])
if match1 is None:
return None
matches = current_import_re_3.finditer(line[baseline.end() :])
for m in chain((match1,), matches):
start = baseline.end() + m.start(1)
end = baseline.end() + m.end(1)
if start < cursor_offset and end >= cursor_offset:
return LinePart(start, end, m.group(1))
current_method_definition_name_re = LazyReCompile(r"def\s+([a-zA-Z_][\w]*)")
def current_method_definition_name(cursor_offset, line):
"""The name of a method being defined"""
matches = current_method_definition_name_re.finditer(line)
for m in matches:
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_single_word_re = LazyReCompile(r"(?<![.])\b([a-zA-Z_][\w]*)")
def current_single_word(cursor_offset, line):
"""the un-dotted word just before or under the cursor"""
matches = current_single_word_re.finditer(line)
for m in matches:
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
def current_dotted_attribute(cursor_offset, line):
"""The dotted attribute-object pair before the cursor"""
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
if "." in word[1:]:
return LinePart(start, end, word)
current_expression_attribute_re = LazyReCompile(
r"[.]\s*((?:[\w_][\w0-9_]*)|(?:))"
)
def current_expression_attribute(cursor_offset, line):
"""If after a dot, the attribute being completed"""
# TODO replace with more general current_expression_attribute
matches = current_expression_attribute_re.finditer(line)
for m in matches:
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None | bpython/line.py | from itertools import chain
from collections import namedtuple
from .lazyre import LazyReCompile
LinePart = namedtuple("LinePart", ["start", "stop", "word"])
current_word_re = LazyReCompile(r"(?<![)\]\w_.])" r"([\w_][\w0-9._]*[(]?)")
def current_word(cursor_offset, line):
"""the object.attribute.attribute just before or under the cursor"""
pos = cursor_offset
matches = current_word_re.finditer(line)
start = pos
end = pos
word = None
for m in matches:
if m.start(1) < pos and m.end(1) >= pos:
start = m.start(1)
end = m.end(1)
word = m.group(1)
if word is None:
return None
return LinePart(start, end, word)
current_dict_key_re = LazyReCompile(r"""[\w_][\w0-9._]*\[([\w0-9._(), '"]*)""")
def current_dict_key(cursor_offset, line):
"""If in dictionary completion, return the current key"""
matches = current_dict_key_re.finditer(line)
for m in matches:
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_dict_re = LazyReCompile(r"""([\w_][\w0-9._]*)\[([\w0-9._(), '"]*)""")
def current_dict(cursor_offset, line):
"""If in dictionary completion, return the dict that should be used"""
matches = current_dict_re.finditer(line)
for m in matches:
if m.start(2) <= cursor_offset and m.end(2) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_string_re = LazyReCompile(
'''(?P<open>(?:""")|"|(?:''\')|')(?:((?P<closed>.+?)(?P=open))|'''
"""(?P<unclosed>.+))"""
)
def current_string(cursor_offset, line):
"""If inside a string of nonzero length, return the string (excluding
quotes)
Weaker than bpython.Repl's current_string, because that checks that a
string is a string based on previous lines in the buffer."""
for m in current_string_re.finditer(line):
i = 3 if m.group(3) else 4
if m.start(i) <= cursor_offset and m.end(i) >= cursor_offset:
return LinePart(m.start(i), m.end(i), m.group(i))
return None
current_object_re = LazyReCompile(r"([\w_][\w0-9_]*)[.]")
def current_object(cursor_offset, line):
"""If in attribute completion, the object on which attribute should be
looked up."""
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
matches = current_object_re.finditer(word)
s = ""
for m in matches:
if m.end(1) + start < cursor_offset:
if s:
s += "."
s += m.group(1)
if not s:
return None
return LinePart(start, start + len(s), s)
current_object_attribute_re = LazyReCompile(r"([\w_][\w0-9_]*)[.]?")
def current_object_attribute(cursor_offset, line):
"""If in attribute completion, the attribute being completed"""
# TODO replace with more general current_expression_attribute
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
matches = current_object_attribute_re.finditer(word)
next(matches)
for m in matches:
if (
m.start(1) + start <= cursor_offset
and m.end(1) + start >= cursor_offset
):
return LinePart(m.start(1) + start, m.end(1) + start, m.group(1))
return None
current_from_import_from_re = LazyReCompile(
r"from ([\w0-9_.]*)(?:\s+import\s+([\w0-9_]+[,]?\s*)+)*"
)
def current_from_import_from(cursor_offset, line):
"""If in from import completion, the word after from
returns None if cursor not in or just after one of the two interesting
parts of an import: from (module) import (name1, name2)
"""
# TODO allow for as's
tokens = line.split()
if not ("from" in tokens or "import" in tokens):
return None
matches = current_from_import_from_re.finditer(line)
for m in matches:
if (m.start(1) < cursor_offset and m.end(1) >= cursor_offset) or (
m.start(2) < cursor_offset and m.end(2) >= cursor_offset
):
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_from_import_import_re_1 = LazyReCompile(r"from\s([\w0-9_.]*)\s+import")
current_from_import_import_re_2 = LazyReCompile(r"([\w0-9_]+)")
current_from_import_import_re_3 = LazyReCompile(r"[,][ ]([\w0-9_]*)")
def current_from_import_import(cursor_offset, line):
"""If in from import completion, the word after import being completed
returns None if cursor not in or just after one of these words
"""
baseline = current_from_import_import_re_1.search(line)
if baseline is None:
return None
match1 = current_from_import_import_re_2.search(line[baseline.end() :])
if match1 is None:
return None
matches = current_from_import_import_re_3.finditer(line[baseline.end() :])
for m in chain((match1,), matches):
start = baseline.end() + m.start(1)
end = baseline.end() + m.end(1)
if start < cursor_offset and end >= cursor_offset:
return LinePart(start, end, m.group(1))
return None
current_import_re_1 = LazyReCompile(r"import")
current_import_re_2 = LazyReCompile(r"([\w0-9_.]+)")
current_import_re_3 = LazyReCompile(r"[,][ ]([\w0-9_.]*)")
def current_import(cursor_offset, line):
# TODO allow for multiple as's
baseline = current_import_re_1.search(line)
if baseline is None:
return None
match1 = current_import_re_2.search(line[baseline.end() :])
if match1 is None:
return None
matches = current_import_re_3.finditer(line[baseline.end() :])
for m in chain((match1,), matches):
start = baseline.end() + m.start(1)
end = baseline.end() + m.end(1)
if start < cursor_offset and end >= cursor_offset:
return LinePart(start, end, m.group(1))
current_method_definition_name_re = LazyReCompile(r"def\s+([a-zA-Z_][\w]*)")
def current_method_definition_name(cursor_offset, line):
"""The name of a method being defined"""
matches = current_method_definition_name_re.finditer(line)
for m in matches:
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_single_word_re = LazyReCompile(r"(?<![.])\b([a-zA-Z_][\w]*)")
def current_single_word(cursor_offset, line):
"""the un-dotted word just before or under the cursor"""
matches = current_single_word_re.finditer(line)
for m in matches:
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
def current_dotted_attribute(cursor_offset, line):
"""The dotted attribute-object pair before the cursor"""
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
if "." in word[1:]:
return LinePart(start, end, word)
current_expression_attribute_re = LazyReCompile(
r"[.]\s*((?:[\w_][\w0-9_]*)|(?:))"
)
def current_expression_attribute(cursor_offset, line):
"""If after a dot, the attribute being completed"""
# TODO replace with more general current_expression_attribute
matches = current_expression_attribute_re.finditer(line)
for m in matches:
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None | 0.415373 | 0.38885 |
import numpy as np
x = np.load('Data/x.npy')
y = np.load('Data/y.npy')
from maxsmooth.DCF import smooth
N = 5
result = smooth(x, y, N, base_dir='examples/', fit_type='qp')
"""
We have changed the order of the fit to 5 to illustrate that for
order :math:`{N \leq 5}` and fits with derivatives :math:`{m \geq 2}` constrained
the function will plot each region of the graph corresponding to
different sign functions in a different colourmap. If the constraints are
different or the order is greater than 5 then the viable regions will have
a single colourmap. Invalid regions are plotted as black shaded colourmaps
and the contour lines are contours of :math:`{\chi^2}`.
Specifically, invalid regions violate the condition
.. math::
\pm_m \frac{\delta^m y}{\delta x^m} \leq 0
where :math:`{m}` represents the derivative order, :math:`{y}` is the dependent
variable and :math:`{x}` is the independent variable. Violation of the
condition means that one or more of the constrained derivatives crosses 0 in the
band of interest. For an MSF, as mentioned, :math:`{m \geq 2}` and the sign :math:`{\pm_m}`
applies to specific derivative orders. For this specific example there are
3 constrained derivatives, :math:`{m = 2, 3, 4}` and consequently 3 signs to
optimise for alongside the parameters :math:`{a_k}`. The coloured valid regions
therefore correspond to a specific combination of :math:`{\pm_m}` for the problem.
:math:`{\pm_m}` is also referred to as :math:`{\mathbf{s}}` in the theory
section and the ``maxsmooth`` paper.
We can import the function like so,
"""
from maxsmooth.parameter_plotter import param_plotter
"""
and access it using,
"""
param_plotter(result.optimum_params, result.optimum_signs,
x, y, N, base_dir='examples/')
"""
The function takes in the optimum parameters and signs found after the fit
as well as the data and order of the fit. There are a number of keyword arguments
detailed in the following section and the resultant fit is shown below. The
function by default samples the parameter ranges 50% either side of the optimum
and calculates 50 spamples for each parameter. In each panel the two
labelled parameters are varied while the others are maintained at their optimum
values.
.. image:: https://github.com/htjb/maxsmooth/raw/master/docs/images/Parameter_plot.png
We are also able to plot the data, fit and residuals alongside the parameter
plot and this can be done by setting data_plot=True. We can also highlight the
central region in each panel of the parameter space by setting center_plot=True.
"""
param_plotter(result.optimum_params, result.optimum_signs,
x, y, N, base_dir='examples/', data_plot=True, center_plot=True)
"""
which gives us the graph below.
.. image:: https://github.com/htjb/maxsmooth/raw/master/docs/images/Parameter_plot_extended.png
""" | example_codes/param_plotter_example.py | import numpy as np
x = np.load('Data/x.npy')
y = np.load('Data/y.npy')
from maxsmooth.DCF import smooth
N = 5
result = smooth(x, y, N, base_dir='examples/', fit_type='qp')
"""
We have changed the order of the fit to 5 to illustrate that for
order :math:`{N \leq 5}` and fits with derivatives :math:`{m \geq 2}` constrained
the function will plot each region of the graph corresponding to
different sign functions in a different colourmap. If the constraints are
different or the order is greater than 5 then the viable regions will have
a single colourmap. Invalid regions are plotted as black shaded colourmaps
and the contour lines are contours of :math:`{\chi^2}`.
Specifically, invalid regions violate the condition
.. math::
\pm_m \frac{\delta^m y}{\delta x^m} \leq 0
where :math:`{m}` represents the derivative order, :math:`{y}` is the dependent
variable and :math:`{x}` is the independent variable. Violation of the
condition means that one or more of the constrained derivatives crosses 0 in the
band of interest. For an MSF, as mentioned, :math:`{m \geq 2}` and the sign :math:`{\pm_m}`
applies to specific derivative orders. For this specific example there are
3 constrained derivatives, :math:`{m = 2, 3, 4}` and consequently 3 signs to
optimise for alongside the parameters :math:`{a_k}`. The coloured valid regions
therefore correspond to a specific combination of :math:`{\pm_m}` for the problem.
:math:`{\pm_m}` is also referred to as :math:`{\mathbf{s}}` in the theory
section and the ``maxsmooth`` paper.
We can import the function like so,
"""
from maxsmooth.parameter_plotter import param_plotter
"""
and access it using,
"""
param_plotter(result.optimum_params, result.optimum_signs,
x, y, N, base_dir='examples/')
"""
The function takes in the optimum parameters and signs found after the fit
as well as the data and order of the fit. There are a number of keyword arguments
detailed in the following section and the resultant fit is shown below. The
function by default samples the parameter ranges 50% either side of the optimum
and calculates 50 spamples for each parameter. In each panel the two
labelled parameters are varied while the others are maintained at their optimum
values.
.. image:: https://github.com/htjb/maxsmooth/raw/master/docs/images/Parameter_plot.png
We are also able to plot the data, fit and residuals alongside the parameter
plot and this can be done by setting data_plot=True. We can also highlight the
central region in each panel of the parameter space by setting center_plot=True.
"""
param_plotter(result.optimum_params, result.optimum_signs,
x, y, N, base_dir='examples/', data_plot=True, center_plot=True)
"""
which gives us the graph below.
.. image:: https://github.com/htjb/maxsmooth/raw/master/docs/images/Parameter_plot_extended.png
""" | 0.905565 | 0.827201 |
import pytestqt.qtbot
from qtpy import QtCore
import pytest
import qtrio._qt
def test_signal_emits(qtbot: pytestqt.qtbot.QtBot) -> None:
"""qtrio._core.Signal emits."""
class NotQObject:
signal = qtrio.Signal()
instance = NotQObject()
with qtbot.wait_signal(instance.signal, 100):
instance.signal.emit()
def test_signal_emits_value(qtbot: pytestqt.qtbot.QtBot) -> None:
"""qtrio._core.Signal emits a value."""
class NotQObject:
signal = qtrio.Signal(int)
result = None
def collect_result(value):
nonlocal result
result = value
instance = NotQObject()
instance.signal.connect(collect_result)
with qtbot.wait_signal(instance.signal, 100):
instance.signal.emit(13)
assert result == 13
def test_accessing_signal_on_class_results_in_our_signal():
"""qtrio._core.Signal instance accessible via class attribute."""
class NotQObject:
signal = qtrio.Signal(int)
assert isinstance(NotQObject.signal, qtrio.Signal)
def test_our_signal_object_method_returns_qobject():
"""qtrio._core.Signal instance provides access to signal-hosting QObject."""
class NotQObject:
signal = qtrio.Signal(int)
instance = NotQObject()
assert isinstance(NotQObject.signal.object(instance=instance), QtCore.QObject)
def test_connection_connects(qtbot: pytestqt.qtbot.QtBot) -> None:
"""qtrio._core.connection connects signal inside managed context."""
class MyQObject(QtCore.QObject):
signal = QtCore.Signal(int)
instance = MyQObject()
results = []
def collect_result(value):
results.append(value)
instance.signal.emit(1)
with qtrio._qt.connection(instance.signal, collect_result):
instance.signal.emit(2)
assert results == [2]
def test_connection_disconnects(qtbot: pytestqt.qtbot.QtBot) -> None:
"""qtrio._core.connection disconnects signal when exiting managed context."""
class MyQObject(QtCore.QObject):
signal = QtCore.Signal(int)
instance = MyQObject()
results = []
def collect_result(value):
results.append(value)
with qtrio._qt.connection(instance.signal, collect_result):
instance.signal.emit(1)
instance.signal.emit(2)
assert results == [1]
def test_connection_yield_can_be_disconnected(qtbot: pytestqt.qtbot.QtBot) -> None:
"""qtrio._core.connection result can be used to disconnect the signal early."""
class MyQObject(QtCore.QObject):
signal = QtCore.Signal(int)
instance = MyQObject()
results = []
def collect_result(value):
results.append(value)
with qtrio._qt.connection(instance.signal, collect_result) as connection:
instance.signal.emit(1)
instance.signal.disconnect(connection)
instance.signal.emit(2)
assert results == [1]
def test_failed_connection_raises():
"""qtrio._core.connection raises TypeError on failure to connect."""
class MyQObject(QtCore.QObject):
signal = QtCore.Signal()
instance = MyQObject()
# TODO: get more specific about the exception
with pytest.raises(TypeError):
with qtrio._qt.connection(instance.signal, 2): # type: ignore
pass # pragma: no cover | qtrio/_tests/test_qt.py | import pytestqt.qtbot
from qtpy import QtCore
import pytest
import qtrio._qt
def test_signal_emits(qtbot: pytestqt.qtbot.QtBot) -> None:
"""qtrio._core.Signal emits."""
class NotQObject:
signal = qtrio.Signal()
instance = NotQObject()
with qtbot.wait_signal(instance.signal, 100):
instance.signal.emit()
def test_signal_emits_value(qtbot: pytestqt.qtbot.QtBot) -> None:
"""qtrio._core.Signal emits a value."""
class NotQObject:
signal = qtrio.Signal(int)
result = None
def collect_result(value):
nonlocal result
result = value
instance = NotQObject()
instance.signal.connect(collect_result)
with qtbot.wait_signal(instance.signal, 100):
instance.signal.emit(13)
assert result == 13
def test_accessing_signal_on_class_results_in_our_signal():
"""qtrio._core.Signal instance accessible via class attribute."""
class NotQObject:
signal = qtrio.Signal(int)
assert isinstance(NotQObject.signal, qtrio.Signal)
def test_our_signal_object_method_returns_qobject():
"""qtrio._core.Signal instance provides access to signal-hosting QObject."""
class NotQObject:
signal = qtrio.Signal(int)
instance = NotQObject()
assert isinstance(NotQObject.signal.object(instance=instance), QtCore.QObject)
def test_connection_connects(qtbot: pytestqt.qtbot.QtBot) -> None:
"""qtrio._core.connection connects signal inside managed context."""
class MyQObject(QtCore.QObject):
signal = QtCore.Signal(int)
instance = MyQObject()
results = []
def collect_result(value):
results.append(value)
instance.signal.emit(1)
with qtrio._qt.connection(instance.signal, collect_result):
instance.signal.emit(2)
assert results == [2]
def test_connection_disconnects(qtbot: pytestqt.qtbot.QtBot) -> None:
"""qtrio._core.connection disconnects signal when exiting managed context."""
class MyQObject(QtCore.QObject):
signal = QtCore.Signal(int)
instance = MyQObject()
results = []
def collect_result(value):
results.append(value)
with qtrio._qt.connection(instance.signal, collect_result):
instance.signal.emit(1)
instance.signal.emit(2)
assert results == [1]
def test_connection_yield_can_be_disconnected(qtbot: pytestqt.qtbot.QtBot) -> None:
"""qtrio._core.connection result can be used to disconnect the signal early."""
class MyQObject(QtCore.QObject):
signal = QtCore.Signal(int)
instance = MyQObject()
results = []
def collect_result(value):
results.append(value)
with qtrio._qt.connection(instance.signal, collect_result) as connection:
instance.signal.emit(1)
instance.signal.disconnect(connection)
instance.signal.emit(2)
assert results == [1]
def test_failed_connection_raises():
"""qtrio._core.connection raises TypeError on failure to connect."""
class MyQObject(QtCore.QObject):
signal = QtCore.Signal()
instance = MyQObject()
# TODO: get more specific about the exception
with pytest.raises(TypeError):
with qtrio._qt.connection(instance.signal, 2): # type: ignore
pass # pragma: no cover | 0.630912 | 0.657786 |
# [START docs_quickstart]
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import sys
import converters.latex
import converters.markdown
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/documents.readonly']
def auth_and_download_body(doc_id, doc_pickle_file):
"""Shows basic usage of the Docs API.
Prints the title of a sample document.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_console()
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('docs', 'v1', credentials=creds)
# Retrieve the documents contents from the Docs service.
document = service.documents().get(documentId=doc_id).execute()
print('The title of the document is: {}'.format(document.get('title')))
with open(doc_pickle_file, 'wb') as body_pickle:
pickle.dump(document.get('body'), body_pickle)
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Usage: docker run [...] download-doc.py [doc-id] [latex,markdown] [out-file]")
sys.exit(1)
doc_id = sys.argv[1]
format = sys.argv[2]
out_file = sys.argv[3]
doc_pickle_file = 'document-body-' + doc_id + '.pickle'
if not os.path.exists(doc_pickle_file):
print("Downloading document ... ")
auth_and_download_body(doc_id, doc_pickle_file)
if format == "latex":
converters.latex.process_body(doc_pickle_file, out_file)
if format == "markdown":
converters.markdown.process_body(doc_pickle_file, out_file)
# [END docs_quickstart] | download-doc.py |
# [START docs_quickstart]
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import sys
import converters.latex
import converters.markdown
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/documents.readonly']
def auth_and_download_body(doc_id, doc_pickle_file):
"""Shows basic usage of the Docs API.
Prints the title of a sample document.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_console()
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('docs', 'v1', credentials=creds)
# Retrieve the documents contents from the Docs service.
document = service.documents().get(documentId=doc_id).execute()
print('The title of the document is: {}'.format(document.get('title')))
with open(doc_pickle_file, 'wb') as body_pickle:
pickle.dump(document.get('body'), body_pickle)
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Usage: docker run [...] download-doc.py [doc-id] [latex,markdown] [out-file]")
sys.exit(1)
doc_id = sys.argv[1]
format = sys.argv[2]
out_file = sys.argv[3]
doc_pickle_file = 'document-body-' + doc_id + '.pickle'
if not os.path.exists(doc_pickle_file):
print("Downloading document ... ")
auth_and_download_body(doc_id, doc_pickle_file)
if format == "latex":
converters.latex.process_body(doc_pickle_file, out_file)
if format == "markdown":
converters.markdown.process_body(doc_pickle_file, out_file)
# [END docs_quickstart] | 0.338186 | 0.172799 |
import numpy as np
from enum import Enum
class BasePalette(Enum):
"""Palette Enum class with additional helper functions"""
def __len__(self):
"""Number of colors in palette"""
return len(self.value)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self):
result = self.value[self.n]
self.n += 1
return result
else:
raise StopIteration
@classmethod
def list(self):
"""list all available Palette names"""
return list(map(lambda x: x.name, self))
def from_hex(hex_list):
"""Generate Pal palette from list of #HEX color values"""
hex_list = [h.lstrip("#") for h in hex_list]
return np.array([[tuple(int(h[i:i+2], 16) for i in (0, 2, 4))] for h in hex_list], dtype=float) / 255.
def from_rgb(rgb_list):
"""Generate Pal palette from list of 0-255 [R, G, B] values"""
return np.array([[rgb] for rgb in rgb_list], dtype=float) / 255.
class Pal(BasePalette):
"""Enum of common palettes based on https://en.wikipedia.org/wiki/List_of_color_palettes"""
TELETEXT = [
[[0. , 0. , 0. ]],
[[1. , 0. , 0. ]],
[[0. , 0.50196078, 0. ]],
[[1. , 1. , 0. ]],
[[0. , 0. , 1. ]],
[[1. , 0. , 1. ]],
[[0. , 1. , 1. ]],
[[1. , 1. , 1. ]]
]
BBC_MICRO = TELETEXT
CGA_MODE4_PAL1 = [
[[0., 0., 0.]],
[[1., 1., 1.]],
[[0., 1., 1.]],
[[1., 0., 1.]]
]
CGA_MODE5_PAL1 = [
[[0. , 0. , 0. ]],
[[0.33333333, 1. , 1. ]],
[[1. , 0.33333333, 0.33333333]],
[[1. , 1. , 1. ]]
]
CGA_MODE4_PAL2 = [
[[0., 0., 0.]],
[[0.33333333, 1. , 0.33333333]],
[[1. , 0.33333333, 0.33333333]],
[[0.33333333, 1. , 0.33333333]]
]
ZX_SPECTRUM = [
[[0. , 0. , 0. ]],
[[0. , 0.15294118, 0.98431373]],
[[1. , 0.18823529, 0.08627451]],
[[1. , 0.24705882, 0.98823529]],
[[0. , 0.97647059, 0.17254902]],
[[0. , 0.98823529, 0.99607843]],
[[1. , 0.99215686, 0.2 ]],
[[1. , 1. , 1. ]]
]
APPLE_II_LO = [
[[0. , 0. , 0. ]],
[[0.52156863, 0.23137255, 0.31764706]],
[[0.31372549, 0.27843137, 0.5372549 ]],
[[0.91764706, 0.36470588, 0.94117647]],
[[0. , 0.40784314, 0.32156863]],
[[0.57254902, 0.57254902, 0.57254902]],
[[0. , 0.65882353, 0.94509804]],
[[0.79215686, 0.76470588, 0.97254902]],
[[0.31764706, 0.36078431, 0.05882353]],
[[0.92156863, 0.49803922, 0.1372549 ]],
[[0.57254902, 0.57254902, 0.57254902]],
[[0.96470588, 0.7254902 , 0.79215686]],
[[0. , 0.79215686, 0.16078431]],
[[0.79607843, 0.82745098, 0.60784314]],
[[0.60392157, 0.8627451 , 0.79607843]],
[[1. , 1. , 1. ]]
]
APPLE_II_HI = [
[[0. , 0. , 0. ]],
[[1. , 0. , 1. ]],
[[0. , 1. , 0. ]],
[[1. , 1. , 1. ]],
[[0. , 0.68627451, 1. ]],
[[1. , 0.31372549, 0. ]]
]
COMMODORE_64 = [
[[0. , 0. , 0. ]],
[[1. , 1. , 1. ]],
[[0.63137255, 0.30196078, 0.2627451 ]],
[[0.41568627, 0.75686275, 0.78431373]],
[[0.63529412, 0.34117647, 0.64705882]],
[[0.36078431, 0.67843137, 0.37254902]],
[[0.30980392, 0.26666667, 0.61176471]],
[[0.79607843, 0.83921569, 0.5372549 ]],
[[0.63921569, 0.40784314, 0.22745098]],
[[0.43137255, 0.32941176, 0.04313725]],
[[0.8 , 0.49803922, 0.4627451 ]],
[[0.38823529, 0.38823529, 0.38823529]],
[[0.54509804, 0.54509804, 0.54509804]],
[[0.60784314, 0.89019608, 0.61568627]],
[[0.54117647, 0.49803922, 0.80392157]],
[[0.68627451, 0.68627451, 0.68627451]]
]
GAMEBOY_COMBO_UP = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.51764706, 0.25882353, 0.01568627]],
[[0.9254902 , 0.60392157, 0.32941176]],
[[0.98823529, 0.98039216, 0.98823529]]
]
GAMEBOY_COMBO_DOWN = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.61176471, 0.57254902, 0.95686275]],
[[0.9254902 , 0.54117647, 0.54901961]],
[[0.98823529, 0.98039216, 0.6745098 ]]
]
GAMEBOY_COMBO_LEFT = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.01568627, 0.19607843, 0.98823529]],
[[0.48627451, 0.66666667, 0.98823529]],
[[0.98823529, 0.98039216, 0.98823529]],
[[0.6745098 , 0.14901961, 0.14117647]],
[[0.9254902 , 0.54117647, 0.54901961]],
[[0.29803922, 0.54117647, 0.01568627]],
[[0.01568627, 0.98039216, 0.01568627]]
]
GAMEBOY_COMBO_RIGHT = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.98823529, 0.19607843, 0.01568627]],
[[0.01568627, 0.98039216, 0.01568627]],
[[0.98823529, 0.98039216, 0.98823529]]
]
GAMEBOY_A_UP = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.6745098 , 0.14901961, 0.14117647]],
[[0.9254902 , 0.54117647, 0.54901961]],
[[0.98823529, 0.98039216, 0.98823529]],
[[0.29803922, 0.54117647, 0.01568627]],
[[0.01568627, 0.98039216, 0.01568627]],
[[0.01568627, 0.19607843, 0.98823529]],
[[0.48627451, 0.66666667, 0.98823529]]
]
GAMEBOY_A_DOWN = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.98823529, 0.19607843, 0.01568627]],
[[0.95686275, 0.99607843, 0.01568627]],
[[0.98823529, 0.98039216, 0.98823529]]
]
GAMEBOY_A_LEFT = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.04313725, 0.02745098, 0.08235294]],
[[0.55686275, 0.52156863, 0.87058824]],
[[0.98823529, 0.98039216, 0.98823529]],
[[0.6745098 , 0.14901961, 0.14117647]],
[[0.9254902 , 0.54117647, 0.54901961]],
[[0.51764706, 0.25882353, 0.01568627]],
[[0.9254902 , 0.60392157, 0.32941176]]
]
GAMEBOY_A_RIGHT = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.01568627, 0.19607843, 0.98823529]],
[[0.01568627, 0.98039216, 0.01568627]],
[[0.98823529, 0.98039216, 0.98823529]],
[[0.6745098 , 0.14901961, 0.14117647]],
[[0.9254902 , 0.54117647, 0.54901961]]
]
GAMEBOY_B_UP = [
[[0.29803922, 0.16470588, 0.01568627]],
[[0.58039216, 0.47843137, 0.29803922]],
[[0.76862745, 0.68235294, 0.58039216]],
[[0.98823529, 0.91764706, 0.89411765]],
[[0. , 0. , 0. ]],
[[0.51764706, 0.25882353, 0.01568627]],
[[0.9254902 , 0.60392157, 0.32941176]]
]
GAMEBOY_B_DOWN = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.51764706, 0.25882353, 0.01568627]],
[[0.95686275, 0.99607843, 0.01568627]],
[[0.98823529, 0.98039216, 0.98823529]],
[[0.01568627, 0.19607843, 0.98823529]],
[[0.48627451, 0.66666667, 0.98823529]],
[[0.29803922, 0.54117647, 0.01568627]],
[[0.01568627, 0.98039216, 0.01568627]]
]
GAMEBOY_B_LEFT = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.45490196, 0.44705882, 0.45490196]],
[[0.7372549 , 0.72941176, 0.7372549 ]],
[[0.98823529, 0.98039216, 0.98823529]]
]
GAMEBOY_B_RIGHT = [
[[0.98823529, 0.98039216, 0.98823529]],
[[0.95686275, 0.99607843, 0.01568627]],
[[0.01568627, 0.63529412, 0.64313725]],
[[0.01568627, 0.00784314, 0.01568627]]
]
GAMEBOY_ORIGINAL = [
[[0. , 0.24705882, 0. ]],
[[0.18039216, 0.45098039, 0.1254902 ]],
[[0.54901961, 0.74901961, 0.03921569]],
[[0.62745098, 0.81176471, 0.03921569]]
]
GAMEBOY_POCKET = [
[[0. , 0. , 0. ]],
[[0.33333333, 0.33333333, 0.33333333]],
[[0.66666667, 0.66666667, 0.66666667]],
[[1. , 1. , 1. ]]
]
GAMEBOY_VIRTUALBOY = [
[[0.9372549 , 0. , 0. ]],
[[0.64313725, 0. , 0. ]],
[[0.33333333, 0. , 0. ]],
[[0. , 0. , 0. ]]
]
MICROSOFT_WINDOWS_16 = [
[[0. , 0. , 0. ]],
[[0.50196078, 0. , 0. ]],
[[0. , 0.50196078, 0. ]],
[[0.50196078, 0.50196078, 0. ]],
[[0. , 0. , 0.50196078]],
[[0.50196078, 0. , 0.50196078]],
[[0. , 0.50196078, 0.50196078]],
[[0.75294118, 0.75294118, 0.75294118]],
[[0.50196078, 0.50196078, 0.50196078]],
[[1. , 0. , 0. ]],
[[0. , 1. , 0. ]],
[[1. , 1. , 0. ]],
[[0. , 0. , 1. ]],
[[1. , 0. , 1. ]],
[[0. , 1. , 1. ]],
[[1. , 1. , 1. ]]
]
MICROSOFT_WINDOWS_20 = [
[[0. , 0. , 0. ]],
[[0.50196078, 0. , 0. ]],
[[0. , 0.50196078, 0. ]],
[[0.50196078, 0.50196078, 0. ]],
[[0. , 0. , 0.50196078]],
[[0.50196078, 0. , 0.50196078]],
[[0. , 0.50196078, 0.50196078]],
[[0.75294118, 0.75294118, 0.75294118]],
[[0.75294118, 0.8627451 , 0.75294118]],
[[0.65098039, 0.79215686, 0.94117647]],
[[1. , 0.98431373, 0.94117647]],
[[0.62745098, 0.62745098, 0.64313725]],
[[0.50196078, 0.50196078, 0.50196078]],
[[1. , 0. , 0. ]],
[[0. , 1. , 0. ]],
[[1. , 1. , 0. ]],
[[0. , 0. , 1. ]],
[[1. , 0. , 1. ]],
[[0. , 1. , 1. ]],
[[1. , 1. , 1. ]]
]
MICROSOFT_WINDOWS_PAINT = [
[[0. , 0. , 0. ]],
[[1. , 1. , 1. ]],
[[0.48235294, 0.48235294, 0.48235294]],
[[0.74117647, 0.74117647, 0.74117647]],
[[0.48235294, 0.04705882, 0.00784314]],
[[1. , 0.14509804, 0. ]],
[[0.48235294, 0.48235294, 0.00392157]],
[[1. , 0.98431373, 0.00392157]],
[[0. , 0.48235294, 0.00784314]],
[[0.00784314, 0.97647059, 0.00392157]],
[[0. , 0.48235294, 0.47843137]],
[[0.00784314, 0.99215686, 0.99607843]],
[[0.00392157, 0.0745098 , 0.47843137]],
[[0.01568627, 0.19607843, 1. ]],
[[0.48235294, 0.09803922, 0.47843137]],
[[1. , 0.25098039, 0.99607843]],
[[0.47843137, 0.22352941, 0.00392157]],
[[1. , 0.47843137, 0.22352941]],
[[0.48235294, 0.48235294, 0.21960784]],
[[1. , 0.98823529, 0.47843137]],
[[0.00784314, 0.22352941, 0.22352941]],
[[0.01176471, 0.98039216, 0.48235294]],
[[0. , 0.48235294, 1. ]],
[[1. , 0.17254902, 0.48235294]]
]
PICO_8 = [
[[0. , 0. , 0. ]],
[[0.11372549, 0.16862745, 0.3254902 ]],
[[0.49411765, 0.14509804, 0.3254902 ]],
[[0. , 0.52941176, 0.31764706]],
[[0.67058824, 0.32156863, 0.21176471]],
[[0.37254902, 0.34117647, 0.30980392]],
[[0.76078431, 0.76470588, 0.78039216]],
[[1. , 0.94509804, 0.90980392]],
[[1. , 0. , 0.30196078]],
[[1. , 0.63921569, 0. ]],
[[1. , 0.9254902 , 0.15294118]],
[[0. , 0.89411765, 0.21176471]],
[[0.16078431, 0.67843137, 1. ]],
[[0.51372549, 0.4627451 , 0.61176471]],
[[1. , 0.46666667, 0.65882353]],
[[1. , 0.8 , 0.66666667]]
]
MSX = [
[[0. , 0. , 0. ]],
[[0.24313725, 0.72156863, 0.28627451]],
[[0.45490196, 0.81568627, 0.49019608]],
[[0.34901961, 0.33333333, 0.87843137]],
[[0.50196078, 0.4627451 , 0.94509804]],
[[0.7254902 , 0.36862745, 0.31764706]],
[[0.39607843, 0.85882353, 0.9372549 ]],
[[0.85882353, 0.39607843, 0.34901961]],
[[1. , 0.5372549 , 0.49019608]],
[[0.8 , 0.76470588, 0.36862745]],
[[0.87058824, 0.81568627, 0.52941176]],
[[0.22745098, 0.63529412, 0.25490196]],
[[0.71764706, 0.4 , 0.70980392]],
[[0.8 , 0.8 , 0.8 ]],
[[1. , 1. , 1. ]]
]
MONO_OBRADINN_IBM = [
[[0.18039216, 0.18823529, 0.21568627]],
[[0.92156863, 0.89803922, 0.80784314]]
]
MONO_OBRADINN_MAC = [
[[0.2 , 0.2 , 0.09803922]],
[[0.89803922, 1. , 1. ]]
]
MONO_BJG = [
[[0.9372549 , 1. , 0.95686275]],
[[0.17254902, 0.05882353, 0.2 ]]
]
MONO_BW = [
[[0., 0., 0.]],
[[1., 1., 1.]]
]
# https://superuser.com/questions/361297/what-colour-is-the-dark-green-on-old-fashioned-green-screen-computer-displays/1206781#1206781
MONO_PHOSPHOR_AMBER = [
[[0.15686275, 0.15686275, 0.15686275]],
[[1. , 0.69019608, 0. ]]
]
MONO_PHOSPHOR_LTAMBER = [
[[0.15686275, 0.15686275, 0.15686275]],
[[1. , 0.8 , 0. ]]
]
MONO_PHOSPHOR_GREEN1 = [
[[0.15686275, 0.15686275, 0.15686275]],
[[0.2 , 1. , 0. ]]
]
MONO_PHOSPHOR_GREEN2 = [
[[0.15686275, 0.15686275, 0.15686275]],
[[0 , 1. , 0.2 ]]
]
MONO_PHOSPHOR_GREEN3 = [
[[0.15686275, 0.15686275, 0.15686275]],
[[0 , 1. , 0.4 ]]
]
MONO_PHOSPHOR_APPLE = [
[[0.15686275, 0.15686275, 0.15686275]],
[[0.2 , 1. , 0.2 ]]
]
APPLE_II_MONO = MONO_PHOSPHOR_APPLE
MONO_PHOSPHOR_APPLEC = [
[[0.15686275, 0.15686275, 0.15686275]],
[[0.4 , 1. , 0.4 ]]
]
APPLE_II_MONOC = MONO_PHOSPHOR_APPLEC | pyxelate/pal.py | import numpy as np
from enum import Enum
class BasePalette(Enum):
"""Palette Enum class with additional helper functions"""
def __len__(self):
"""Number of colors in palette"""
return len(self.value)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self):
result = self.value[self.n]
self.n += 1
return result
else:
raise StopIteration
@classmethod
def list(self):
"""list all available Palette names"""
return list(map(lambda x: x.name, self))
def from_hex(hex_list):
"""Generate Pal palette from list of #HEX color values"""
hex_list = [h.lstrip("#") for h in hex_list]
return np.array([[tuple(int(h[i:i+2], 16) for i in (0, 2, 4))] for h in hex_list], dtype=float) / 255.
def from_rgb(rgb_list):
"""Generate Pal palette from list of 0-255 [R, G, B] values"""
return np.array([[rgb] for rgb in rgb_list], dtype=float) / 255.
class Pal(BasePalette):
"""Enum of common palettes based on https://en.wikipedia.org/wiki/List_of_color_palettes"""
TELETEXT = [
[[0. , 0. , 0. ]],
[[1. , 0. , 0. ]],
[[0. , 0.50196078, 0. ]],
[[1. , 1. , 0. ]],
[[0. , 0. , 1. ]],
[[1. , 0. , 1. ]],
[[0. , 1. , 1. ]],
[[1. , 1. , 1. ]]
]
BBC_MICRO = TELETEXT
CGA_MODE4_PAL1 = [
[[0., 0., 0.]],
[[1., 1., 1.]],
[[0., 1., 1.]],
[[1., 0., 1.]]
]
CGA_MODE5_PAL1 = [
[[0. , 0. , 0. ]],
[[0.33333333, 1. , 1. ]],
[[1. , 0.33333333, 0.33333333]],
[[1. , 1. , 1. ]]
]
CGA_MODE4_PAL2 = [
[[0., 0., 0.]],
[[0.33333333, 1. , 0.33333333]],
[[1. , 0.33333333, 0.33333333]],
[[0.33333333, 1. , 0.33333333]]
]
ZX_SPECTRUM = [
[[0. , 0. , 0. ]],
[[0. , 0.15294118, 0.98431373]],
[[1. , 0.18823529, 0.08627451]],
[[1. , 0.24705882, 0.98823529]],
[[0. , 0.97647059, 0.17254902]],
[[0. , 0.98823529, 0.99607843]],
[[1. , 0.99215686, 0.2 ]],
[[1. , 1. , 1. ]]
]
APPLE_II_LO = [
[[0. , 0. , 0. ]],
[[0.52156863, 0.23137255, 0.31764706]],
[[0.31372549, 0.27843137, 0.5372549 ]],
[[0.91764706, 0.36470588, 0.94117647]],
[[0. , 0.40784314, 0.32156863]],
[[0.57254902, 0.57254902, 0.57254902]],
[[0. , 0.65882353, 0.94509804]],
[[0.79215686, 0.76470588, 0.97254902]],
[[0.31764706, 0.36078431, 0.05882353]],
[[0.92156863, 0.49803922, 0.1372549 ]],
[[0.57254902, 0.57254902, 0.57254902]],
[[0.96470588, 0.7254902 , 0.79215686]],
[[0. , 0.79215686, 0.16078431]],
[[0.79607843, 0.82745098, 0.60784314]],
[[0.60392157, 0.8627451 , 0.79607843]],
[[1. , 1. , 1. ]]
]
APPLE_II_HI = [
[[0. , 0. , 0. ]],
[[1. , 0. , 1. ]],
[[0. , 1. , 0. ]],
[[1. , 1. , 1. ]],
[[0. , 0.68627451, 1. ]],
[[1. , 0.31372549, 0. ]]
]
COMMODORE_64 = [
[[0. , 0. , 0. ]],
[[1. , 1. , 1. ]],
[[0.63137255, 0.30196078, 0.2627451 ]],
[[0.41568627, 0.75686275, 0.78431373]],
[[0.63529412, 0.34117647, 0.64705882]],
[[0.36078431, 0.67843137, 0.37254902]],
[[0.30980392, 0.26666667, 0.61176471]],
[[0.79607843, 0.83921569, 0.5372549 ]],
[[0.63921569, 0.40784314, 0.22745098]],
[[0.43137255, 0.32941176, 0.04313725]],
[[0.8 , 0.49803922, 0.4627451 ]],
[[0.38823529, 0.38823529, 0.38823529]],
[[0.54509804, 0.54509804, 0.54509804]],
[[0.60784314, 0.89019608, 0.61568627]],
[[0.54117647, 0.49803922, 0.80392157]],
[[0.68627451, 0.68627451, 0.68627451]]
]
GAMEBOY_COMBO_UP = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.51764706, 0.25882353, 0.01568627]],
[[0.9254902 , 0.60392157, 0.32941176]],
[[0.98823529, 0.98039216, 0.98823529]]
]
GAMEBOY_COMBO_DOWN = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.61176471, 0.57254902, 0.95686275]],
[[0.9254902 , 0.54117647, 0.54901961]],
[[0.98823529, 0.98039216, 0.6745098 ]]
]
GAMEBOY_COMBO_LEFT = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.01568627, 0.19607843, 0.98823529]],
[[0.48627451, 0.66666667, 0.98823529]],
[[0.98823529, 0.98039216, 0.98823529]],
[[0.6745098 , 0.14901961, 0.14117647]],
[[0.9254902 , 0.54117647, 0.54901961]],
[[0.29803922, 0.54117647, 0.01568627]],
[[0.01568627, 0.98039216, 0.01568627]]
]
GAMEBOY_COMBO_RIGHT = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.98823529, 0.19607843, 0.01568627]],
[[0.01568627, 0.98039216, 0.01568627]],
[[0.98823529, 0.98039216, 0.98823529]]
]
GAMEBOY_A_UP = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.6745098 , 0.14901961, 0.14117647]],
[[0.9254902 , 0.54117647, 0.54901961]],
[[0.98823529, 0.98039216, 0.98823529]],
[[0.29803922, 0.54117647, 0.01568627]],
[[0.01568627, 0.98039216, 0.01568627]],
[[0.01568627, 0.19607843, 0.98823529]],
[[0.48627451, 0.66666667, 0.98823529]]
]
GAMEBOY_A_DOWN = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.98823529, 0.19607843, 0.01568627]],
[[0.95686275, 0.99607843, 0.01568627]],
[[0.98823529, 0.98039216, 0.98823529]]
]
GAMEBOY_A_LEFT = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.04313725, 0.02745098, 0.08235294]],
[[0.55686275, 0.52156863, 0.87058824]],
[[0.98823529, 0.98039216, 0.98823529]],
[[0.6745098 , 0.14901961, 0.14117647]],
[[0.9254902 , 0.54117647, 0.54901961]],
[[0.51764706, 0.25882353, 0.01568627]],
[[0.9254902 , 0.60392157, 0.32941176]]
]
GAMEBOY_A_RIGHT = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.01568627, 0.19607843, 0.98823529]],
[[0.01568627, 0.98039216, 0.01568627]],
[[0.98823529, 0.98039216, 0.98823529]],
[[0.6745098 , 0.14901961, 0.14117647]],
[[0.9254902 , 0.54117647, 0.54901961]]
]
GAMEBOY_B_UP = [
[[0.29803922, 0.16470588, 0.01568627]],
[[0.58039216, 0.47843137, 0.29803922]],
[[0.76862745, 0.68235294, 0.58039216]],
[[0.98823529, 0.91764706, 0.89411765]],
[[0. , 0. , 0. ]],
[[0.51764706, 0.25882353, 0.01568627]],
[[0.9254902 , 0.60392157, 0.32941176]]
]
GAMEBOY_B_DOWN = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.51764706, 0.25882353, 0.01568627]],
[[0.95686275, 0.99607843, 0.01568627]],
[[0.98823529, 0.98039216, 0.98823529]],
[[0.01568627, 0.19607843, 0.98823529]],
[[0.48627451, 0.66666667, 0.98823529]],
[[0.29803922, 0.54117647, 0.01568627]],
[[0.01568627, 0.98039216, 0.01568627]]
]
GAMEBOY_B_LEFT = [
[[0.01568627, 0.00784314, 0.01568627]],
[[0.45490196, 0.44705882, 0.45490196]],
[[0.7372549 , 0.72941176, 0.7372549 ]],
[[0.98823529, 0.98039216, 0.98823529]]
]
GAMEBOY_B_RIGHT = [
[[0.98823529, 0.98039216, 0.98823529]],
[[0.95686275, 0.99607843, 0.01568627]],
[[0.01568627, 0.63529412, 0.64313725]],
[[0.01568627, 0.00784314, 0.01568627]]
]
GAMEBOY_ORIGINAL = [
[[0. , 0.24705882, 0. ]],
[[0.18039216, 0.45098039, 0.1254902 ]],
[[0.54901961, 0.74901961, 0.03921569]],
[[0.62745098, 0.81176471, 0.03921569]]
]
GAMEBOY_POCKET = [
[[0. , 0. , 0. ]],
[[0.33333333, 0.33333333, 0.33333333]],
[[0.66666667, 0.66666667, 0.66666667]],
[[1. , 1. , 1. ]]
]
GAMEBOY_VIRTUALBOY = [
[[0.9372549 , 0. , 0. ]],
[[0.64313725, 0. , 0. ]],
[[0.33333333, 0. , 0. ]],
[[0. , 0. , 0. ]]
]
MICROSOFT_WINDOWS_16 = [
[[0. , 0. , 0. ]],
[[0.50196078, 0. , 0. ]],
[[0. , 0.50196078, 0. ]],
[[0.50196078, 0.50196078, 0. ]],
[[0. , 0. , 0.50196078]],
[[0.50196078, 0. , 0.50196078]],
[[0. , 0.50196078, 0.50196078]],
[[0.75294118, 0.75294118, 0.75294118]],
[[0.50196078, 0.50196078, 0.50196078]],
[[1. , 0. , 0. ]],
[[0. , 1. , 0. ]],
[[1. , 1. , 0. ]],
[[0. , 0. , 1. ]],
[[1. , 0. , 1. ]],
[[0. , 1. , 1. ]],
[[1. , 1. , 1. ]]
]
MICROSOFT_WINDOWS_20 = [
[[0. , 0. , 0. ]],
[[0.50196078, 0. , 0. ]],
[[0. , 0.50196078, 0. ]],
[[0.50196078, 0.50196078, 0. ]],
[[0. , 0. , 0.50196078]],
[[0.50196078, 0. , 0.50196078]],
[[0. , 0.50196078, 0.50196078]],
[[0.75294118, 0.75294118, 0.75294118]],
[[0.75294118, 0.8627451 , 0.75294118]],
[[0.65098039, 0.79215686, 0.94117647]],
[[1. , 0.98431373, 0.94117647]],
[[0.62745098, 0.62745098, 0.64313725]],
[[0.50196078, 0.50196078, 0.50196078]],
[[1. , 0. , 0. ]],
[[0. , 1. , 0. ]],
[[1. , 1. , 0. ]],
[[0. , 0. , 1. ]],
[[1. , 0. , 1. ]],
[[0. , 1. , 1. ]],
[[1. , 1. , 1. ]]
]
MICROSOFT_WINDOWS_PAINT = [
[[0. , 0. , 0. ]],
[[1. , 1. , 1. ]],
[[0.48235294, 0.48235294, 0.48235294]],
[[0.74117647, 0.74117647, 0.74117647]],
[[0.48235294, 0.04705882, 0.00784314]],
[[1. , 0.14509804, 0. ]],
[[0.48235294, 0.48235294, 0.00392157]],
[[1. , 0.98431373, 0.00392157]],
[[0. , 0.48235294, 0.00784314]],
[[0.00784314, 0.97647059, 0.00392157]],
[[0. , 0.48235294, 0.47843137]],
[[0.00784314, 0.99215686, 0.99607843]],
[[0.00392157, 0.0745098 , 0.47843137]],
[[0.01568627, 0.19607843, 1. ]],
[[0.48235294, 0.09803922, 0.47843137]],
[[1. , 0.25098039, 0.99607843]],
[[0.47843137, 0.22352941, 0.00392157]],
[[1. , 0.47843137, 0.22352941]],
[[0.48235294, 0.48235294, 0.21960784]],
[[1. , 0.98823529, 0.47843137]],
[[0.00784314, 0.22352941, 0.22352941]],
[[0.01176471, 0.98039216, 0.48235294]],
[[0. , 0.48235294, 1. ]],
[[1. , 0.17254902, 0.48235294]]
]
PICO_8 = [
[[0. , 0. , 0. ]],
[[0.11372549, 0.16862745, 0.3254902 ]],
[[0.49411765, 0.14509804, 0.3254902 ]],
[[0. , 0.52941176, 0.31764706]],
[[0.67058824, 0.32156863, 0.21176471]],
[[0.37254902, 0.34117647, 0.30980392]],
[[0.76078431, 0.76470588, 0.78039216]],
[[1. , 0.94509804, 0.90980392]],
[[1. , 0. , 0.30196078]],
[[1. , 0.63921569, 0. ]],
[[1. , 0.9254902 , 0.15294118]],
[[0. , 0.89411765, 0.21176471]],
[[0.16078431, 0.67843137, 1. ]],
[[0.51372549, 0.4627451 , 0.61176471]],
[[1. , 0.46666667, 0.65882353]],
[[1. , 0.8 , 0.66666667]]
]
MSX = [
[[0. , 0. , 0. ]],
[[0.24313725, 0.72156863, 0.28627451]],
[[0.45490196, 0.81568627, 0.49019608]],
[[0.34901961, 0.33333333, 0.87843137]],
[[0.50196078, 0.4627451 , 0.94509804]],
[[0.7254902 , 0.36862745, 0.31764706]],
[[0.39607843, 0.85882353, 0.9372549 ]],
[[0.85882353, 0.39607843, 0.34901961]],
[[1. , 0.5372549 , 0.49019608]],
[[0.8 , 0.76470588, 0.36862745]],
[[0.87058824, 0.81568627, 0.52941176]],
[[0.22745098, 0.63529412, 0.25490196]],
[[0.71764706, 0.4 , 0.70980392]],
[[0.8 , 0.8 , 0.8 ]],
[[1. , 1. , 1. ]]
]
MONO_OBRADINN_IBM = [
[[0.18039216, 0.18823529, 0.21568627]],
[[0.92156863, 0.89803922, 0.80784314]]
]
MONO_OBRADINN_MAC = [
[[0.2 , 0.2 , 0.09803922]],
[[0.89803922, 1. , 1. ]]
]
MONO_BJG = [
[[0.9372549 , 1. , 0.95686275]],
[[0.17254902, 0.05882353, 0.2 ]]
]
MONO_BW = [
[[0., 0., 0.]],
[[1., 1., 1.]]
]
# https://superuser.com/questions/361297/what-colour-is-the-dark-green-on-old-fashioned-green-screen-computer-displays/1206781#1206781
MONO_PHOSPHOR_AMBER = [
[[0.15686275, 0.15686275, 0.15686275]],
[[1. , 0.69019608, 0. ]]
]
MONO_PHOSPHOR_LTAMBER = [
[[0.15686275, 0.15686275, 0.15686275]],
[[1. , 0.8 , 0. ]]
]
MONO_PHOSPHOR_GREEN1 = [
[[0.15686275, 0.15686275, 0.15686275]],
[[0.2 , 1. , 0. ]]
]
MONO_PHOSPHOR_GREEN2 = [
[[0.15686275, 0.15686275, 0.15686275]],
[[0 , 1. , 0.2 ]]
]
MONO_PHOSPHOR_GREEN3 = [
[[0.15686275, 0.15686275, 0.15686275]],
[[0 , 1. , 0.4 ]]
]
MONO_PHOSPHOR_APPLE = [
[[0.15686275, 0.15686275, 0.15686275]],
[[0.2 , 1. , 0.2 ]]
]
APPLE_II_MONO = MONO_PHOSPHOR_APPLE
MONO_PHOSPHOR_APPLEC = [
[[0.15686275, 0.15686275, 0.15686275]],
[[0.4 , 1. , 0.4 ]]
]
APPLE_II_MONOC = MONO_PHOSPHOR_APPLEC | 0.657209 | 0.359308 |
# Copyright 2006, <NAME> <<EMAIL>>
import logging
from twisted.internet import task, defer
from twisted.internet import reactor
import coherence.extern.louie as louie
from coherence.upnp.core.device import RootDevice
_log = logging.getLogger( "coherence.DeviceList" )
# this is a list of root devices!
class DeviceList(object):
def __init__(self):
self._devices = {} # keyed by udn
# self._devices = []
louie.connect( self.create_device, 'Coherence.UPnP.SSDP.new_device', louie.Any)
louie.connect( self.remove_device, 'Coherence.UPnP.SSDP.removed_device', louie.Any)
louie.connect( self.add_device, 'Coherence.UPnP.RootDevice.detection_completed', louie.Any)
#self.renew_service_subscription_loop = task.LoopingCall(self.check_devices)
# what if the device under question gives less than 20 seconds as the timeout.
#self.renew_service_subscription_loop.start(20.0, now=False)
reactor.addSystemEventTrigger( 'before', 'shutdown', self.shutdown)
def check_devices(self):
""" iterate over devices and their embedded ones and renew subscriptions """
for udn in self._devices:
root_device = self._devices[udn]
root_device.renew_service_subscriptions()
def get_device_by_host(self, host):
found = []
for udn in self._devices:
device = self._devices[udn]
if device.get_host() == host:
found.append(device)
return found
def get_device_with_usn(self, usn):
found = None
for ky in self._devices:
if self._devices[ky].get_usn() == usn:
found = self._devices[ky]
break
return found
def get_device_with_id(self, device_id):
# actually a udn.
found = None
if self._devices.has_key(device_id):
found = self._devices[device_id]
# for device in self._devices:
# id = device.get_id()
# if device_id[:5] != 'uuid:':
# id = id[5:]
# if id == device_id:
# found = device
# break
_log.debug("get_device_with_id %s (%s)", device_id, found )
return found
def get_nonlocal_devices(self):
return [self._devices[d] for d in self._devices if self._devices[d].manifestation == 'remote']
def create_device(self, device_type, infos):
# the louie event is only triggerred for a root device.
root = RootDevice(infos)
# add_device will be called after rood detection completes
def add_device(self, device):
_log.debug("add_device %s (%s)", device.get_id(), device )
self._devices[device.get_id()]= device
def remove_device(self, device_type, infos):
_log.info("removed device %s %s", infos['ST'], infos['USN'] )
device = self.get_device_with_usn(infos['USN'])
if device:
del self._devices[device.get_id()]
if infos['ST'] == 'upnp:rootdevice':
louie.send('Coherence.UPnP.Device.removed', None, udn=device.get_id() )
device.remove()
def shutdown( self):
""" send service unsubscribe messages """
try:
self.renew_service_subscription_loop.stop()
except:
pass
l = []
for ky in self._devices:
root_device = self._devices[ky]
for device in root_device.get_devices():
d = device.unsubscribe_service_subscriptions()
l.append(d)
d.addCallback(device.remove)
d = root_device.unsubscribe_service_subscriptions()
l.append(d)
d.addCallback(root_device.remove)
dl = defer.DeferredList(l)
return dl | WebBrickLibs/coherence/Save/DeviceList.py |
# Copyright 2006, <NAME> <<EMAIL>>
import logging
from twisted.internet import task, defer
from twisted.internet import reactor
import coherence.extern.louie as louie
from coherence.upnp.core.device import RootDevice
_log = logging.getLogger( "coherence.DeviceList" )
# this is a list of root devices!
class DeviceList(object):
def __init__(self):
self._devices = {} # keyed by udn
# self._devices = []
louie.connect( self.create_device, 'Coherence.UPnP.SSDP.new_device', louie.Any)
louie.connect( self.remove_device, 'Coherence.UPnP.SSDP.removed_device', louie.Any)
louie.connect( self.add_device, 'Coherence.UPnP.RootDevice.detection_completed', louie.Any)
#self.renew_service_subscription_loop = task.LoopingCall(self.check_devices)
# what if the device under question gives less than 20 seconds as the timeout.
#self.renew_service_subscription_loop.start(20.0, now=False)
reactor.addSystemEventTrigger( 'before', 'shutdown', self.shutdown)
def check_devices(self):
""" iterate over devices and their embedded ones and renew subscriptions """
for udn in self._devices:
root_device = self._devices[udn]
root_device.renew_service_subscriptions()
def get_device_by_host(self, host):
found = []
for udn in self._devices:
device = self._devices[udn]
if device.get_host() == host:
found.append(device)
return found
def get_device_with_usn(self, usn):
found = None
for ky in self._devices:
if self._devices[ky].get_usn() == usn:
found = self._devices[ky]
break
return found
def get_device_with_id(self, device_id):
# actually a udn.
found = None
if self._devices.has_key(device_id):
found = self._devices[device_id]
# for device in self._devices:
# id = device.get_id()
# if device_id[:5] != 'uuid:':
# id = id[5:]
# if id == device_id:
# found = device
# break
_log.debug("get_device_with_id %s (%s)", device_id, found )
return found
def get_nonlocal_devices(self):
return [self._devices[d] for d in self._devices if self._devices[d].manifestation == 'remote']
def create_device(self, device_type, infos):
# the louie event is only triggerred for a root device.
root = RootDevice(infos)
# add_device will be called after rood detection completes
def add_device(self, device):
_log.debug("add_device %s (%s)", device.get_id(), device )
self._devices[device.get_id()]= device
def remove_device(self, device_type, infos):
_log.info("removed device %s %s", infos['ST'], infos['USN'] )
device = self.get_device_with_usn(infos['USN'])
if device:
del self._devices[device.get_id()]
if infos['ST'] == 'upnp:rootdevice':
louie.send('Coherence.UPnP.Device.removed', None, udn=device.get_id() )
device.remove()
def shutdown( self):
""" send service unsubscribe messages """
try:
self.renew_service_subscription_loop.stop()
except:
pass
l = []
for ky in self._devices:
root_device = self._devices[ky]
for device in root_device.get_devices():
d = device.unsubscribe_service_subscriptions()
l.append(d)
d.addCallback(device.remove)
d = root_device.unsubscribe_service_subscriptions()
l.append(d)
d.addCallback(root_device.remove)
dl = defer.DeferredList(l)
return dl | 0.457137 | 0.056081 |
from jinja2 import Environment, FileSystemLoader
from simplemap.html_render import SilentUndefined
import json
import os
import sys
import traceback
TEMPLATES_DIR = FileSystemLoader('simplemap/templates')
ZOOM_DEFAULT = 11
LINES_DEFAULT = []
class Map(object):
def __init__(self, title, center=None, zoom=11, markers=None, points=None, html_template='basic.html', config_file='config.json'):
self._env = Environment(loader=TEMPLATES_DIR, trim_blocks=True, undefined=SilentUndefined)
self.title = title
self.template = self._env.get_template(html_template)
self.center = center
self.zoom = zoom
self.config = config_file
self.markers = markers
# points for lines added in
self.points = points
def set_center(self, center_point):
self._center = '{{ lat:{}, lng:{}}}'.format(*center_point) if center_point else 'null'
def get_center(self):
return self._center
def set_zoom(self, zoom):
if zoom:
self._zoom = zoom
else:
#Don't allow zoom to be null if customer center is given
self._zoom = ZOOM_DEFAULT if self.center else 'null'
def get_zoom(self):
return self._zoom
def set_config(self, config_file):
try:
with open(config_file, "r") as config:
self._config = json.load(config)
except IOError:
print("Error, unable to open {0} config file.".format(config_file))
sys.exit()
except KeyError:
print("Error, `api_entry` not found in {0} config file.".format(config_file))
sys.exit()
except Exception:
print("An unknown error occured while attempting to read {0} config file.".format(config_file))
traceback.print_exc()
sys.exit()
def get_config(self):
return self._config
def set_markers(self, markers):
if markers:
for i in markers:
if len(i) == 2:
i.insert(0, '')
self._markers = markers
def get_markers(self):
return self._markers
# Points setter and getter
def set_points(self, points):
if points:
self._points = points
else:
self._points = LINES_DEFAULT
def get_points(self):
return self._points
config = property(get_config, set_config)
markers = property(get_markers, set_markers)
center = property(get_center, set_center)
zoom = property(get_zoom, set_zoom)
# Declare the names of the setter and getters
points = property(get_points, set_points)
def write(self, output_path):
try:
html = self.template.render(map_title = self.title, center=self.center,
zoom=self.zoom, markers=self.markers, points=self.points, api_key=self.config['api_key'])
with open(output_path, "w") as out_file:
out_file.write(html)
return 'file://' + os.path.join(os.path.abspath(os.curdir), output_path)
except IOError:
print("Error, unable to write {0}".format(output_path))
sys.exit()
except Exception:
print("Undefined error occured while writing generating {0}".format(output_path))
traceback.print_exc()
sys.exit() | simplemap/map.py | from jinja2 import Environment, FileSystemLoader
from simplemap.html_render import SilentUndefined
import json
import os
import sys
import traceback
TEMPLATES_DIR = FileSystemLoader('simplemap/templates')
ZOOM_DEFAULT = 11
LINES_DEFAULT = []
class Map(object):
def __init__(self, title, center=None, zoom=11, markers=None, points=None, html_template='basic.html', config_file='config.json'):
self._env = Environment(loader=TEMPLATES_DIR, trim_blocks=True, undefined=SilentUndefined)
self.title = title
self.template = self._env.get_template(html_template)
self.center = center
self.zoom = zoom
self.config = config_file
self.markers = markers
# points for lines added in
self.points = points
def set_center(self, center_point):
self._center = '{{ lat:{}, lng:{}}}'.format(*center_point) if center_point else 'null'
def get_center(self):
return self._center
def set_zoom(self, zoom):
if zoom:
self._zoom = zoom
else:
#Don't allow zoom to be null if customer center is given
self._zoom = ZOOM_DEFAULT if self.center else 'null'
def get_zoom(self):
return self._zoom
def set_config(self, config_file):
try:
with open(config_file, "r") as config:
self._config = json.load(config)
except IOError:
print("Error, unable to open {0} config file.".format(config_file))
sys.exit()
except KeyError:
print("Error, `api_entry` not found in {0} config file.".format(config_file))
sys.exit()
except Exception:
print("An unknown error occured while attempting to read {0} config file.".format(config_file))
traceback.print_exc()
sys.exit()
def get_config(self):
return self._config
def set_markers(self, markers):
if markers:
for i in markers:
if len(i) == 2:
i.insert(0, '')
self._markers = markers
def get_markers(self):
return self._markers
# Points setter and getter
def set_points(self, points):
if points:
self._points = points
else:
self._points = LINES_DEFAULT
def get_points(self):
return self._points
config = property(get_config, set_config)
markers = property(get_markers, set_markers)
center = property(get_center, set_center)
zoom = property(get_zoom, set_zoom)
# Declare the names of the setter and getters
points = property(get_points, set_points)
def write(self, output_path):
try:
html = self.template.render(map_title = self.title, center=self.center,
zoom=self.zoom, markers=self.markers, points=self.points, api_key=self.config['api_key'])
with open(output_path, "w") as out_file:
out_file.write(html)
return 'file://' + os.path.join(os.path.abspath(os.curdir), output_path)
except IOError:
print("Error, unable to write {0}".format(output_path))
sys.exit()
except Exception:
print("Undefined error occured while writing generating {0}".format(output_path))
traceback.print_exc()
sys.exit() | 0.234231 | 0.084455 |
import argparse
import logging
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='SageMaker Training Job')
parser.add_argument('--region', type=str.strip, required=True, help='The region where the cluster launches.')
parser.add_argument('--endpoint_config_name', type=str.strip, required=False, help='The name of the endpoint configuration.', default='')
parser.add_argument('--variant_name_1', type=str.strip, required=False, help='The name of the production variant.', default='variant-name-1')
parser.add_argument('--model_name_1', type=str.strip, required=True, help='The model name used for endpoint deployment.')
parser.add_argument('--initial_instance_count_1', type=_utils.str_to_int, required=False, help='Number of instances to launch initially.', default=1)
parser.add_argument('--instance_type_1', choices=['ml.m4.xlarge', 'ml.m4.2xlarge', 'ml.m4.4xlarge', 'ml.m4.10xlarge', 'ml.m4.16xlarge', 'ml.m5.large', 'ml.m5.xlarge', 'ml.m5.2xlarge', 'ml.m5.4xlarge',
'ml.m5.12xlarge', 'ml.m5.24xlarge', 'ml.c4.xlarge', 'ml.c4.2xlarge', 'ml.c4.4xlarge', 'ml.c4.8xlarge', 'ml.p2.xlarge', 'ml.p2.8xlarge', 'ml.p2.16xlarge', 'ml.p3.2xlarge', 'ml.p3.8xlarge', 'ml.p3.16xlarge',
'ml.c5.xlarge', 'ml.c5.2xlarge', 'ml.c5.4xlarge', 'ml.c5.9xlarge', 'ml.c5.18xlarge', ''], type=str.strip, required=False, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--initial_variant_weight_1', type=_utils.str_to_float, required=False, help='Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.', default=1.0)
parser.add_argument('--accelerator_type_1', choices=['ml.eia1.medium', 'ml.eia1.large', 'ml.eia1.xlarge', ''], type=str.strip, required=False, help='The size of the Elastic Inference (EI) instance to use for the production variant.', default='')
parser.add_argument('--variant_name_2', type=str.strip, required=False, help='The name of the production variant.', default='variant-name-2')
parser.add_argument('--model_name_2', type=str.strip, required=False, help='The model name used for endpoint deployment.', default='')
parser.add_argument('--initial_instance_count_2', type=_utils.str_to_int, required=False, help='Number of instances to launch initially.', default=1)
parser.add_argument('--instance_type_2', choices=['ml.m4.xlarge', 'ml.m4.2xlarge', 'ml.m4.4xlarge', 'ml.m4.10xlarge', 'ml.m4.16xlarge', 'ml.m5.large', 'ml.m5.xlarge', 'ml.m5.2xlarge', 'ml.m5.4xlarge',
'ml.m5.12xlarge', 'ml.m5.24xlarge', 'ml.c4.xlarge', 'ml.c4.2xlarge', 'ml.c4.4xlarge', 'ml.c4.8xlarge', 'ml.p2.xlarge', 'ml.p2.8xlarge', 'ml.p2.16xlarge', 'ml.p3.2xlarge', 'ml.p3.8xlarge', 'ml.p3.16xlarge',
'ml.c5.xlarge', 'ml.c5.2xlarge', 'ml.c5.4xlarge', 'ml.c5.9xlarge', 'ml.c5.18xlarge', ''], type=str.strip, required=False, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--initial_variant_weight_2', type=_utils.str_to_float, required=False, help='Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.', default=1.0)
parser.add_argument('--accelerator_type_2', choices=['ml.eia1.medium', 'ml.eia1.large', 'ml.eia1.xlarge', ''], type=str.strip, required=False, help='The size of the Elastic Inference (EI) instance to use for the production variant.', default='')
parser.add_argument('--variant_name_3', type=str.strip, required=False, help='The name of the production variant.', default='variant-name-3')
parser.add_argument('--model_name_3', type=str.strip, required=False, help='The model name used for endpoint deployment.', default='')
parser.add_argument('--initial_instance_count_3', type=_utils.str_to_int, required=False, help='Number of instances to launch initially.', default=1)
parser.add_argument('--instance_type_3', choices=['ml.m4.xlarge', 'ml.m4.2xlarge', 'ml.m4.4xlarge', 'ml.m4.10xlarge', 'ml.m4.16xlarge', 'ml.m5.large', 'ml.m5.xlarge', 'ml.m5.2xlarge', 'ml.m5.4xlarge',
'ml.m5.12xlarge', 'ml.m5.24xlarge', 'ml.c4.xlarge', 'ml.c4.2xlarge', 'ml.c4.4xlarge', 'ml.c4.8xlarge', 'ml.p2.xlarge', 'ml.p2.8xlarge', 'ml.p2.16xlarge', 'ml.p3.2xlarge', 'ml.p3.8xlarge', 'ml.p3.16xlarge',
'ml.c5.xlarge', 'ml.c5.2xlarge', 'ml.c5.4xlarge', 'ml.c5.9xlarge', 'ml.c5.18xlarge', ''], type=str.strip, required=False, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--initial_variant_weight_3', type=_utils.str_to_float, required=False, help='Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.', default=1.0)
parser.add_argument('--accelerator_type_3', choices=['ml.eia1.medium', 'ml.eia1.large', 'ml.eia1.xlarge', ''], type=str.strip, required=False, help='The size of the Elastic Inference (EI) instance to use for the production variant.', default='')
parser.add_argument('--resource_encryption_key', type=str.strip, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).', default='')
parser.add_argument('--endpoint_config_tags', type=_utils.str_to_json_dict, required=False, help='An array of key-value pairs, to categorize AWS resources.', default='{}')
parser.add_argument('--endpoint_name', type=str.strip, required=False, help='The name of the endpoint.', default='')
parser.add_argument('--endpoint_tags', type=_utils.str_to_json_dict, required=False, help='An array of key-value pairs, to categorize AWS resources.', default='{}')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_client(args.region)
logging.info('Submitting Endpoint request to SageMaker...')
endpoint_name = _utils.deploy_model(client, vars(args))
logging.info('Endpoint creation request submitted. Waiting for completion...')
_utils.wait_for_endpoint_creation(client, endpoint_name)
with open('/tmp/endpoint_name.txt', 'w') as f:
f.write(endpoint_name)
logging.info('Endpoint creation completed.')
if __name__== "__main__":
main() | components/aws/sagemaker/deploy/src/deploy.py |
import argparse
import logging
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='SageMaker Training Job')
parser.add_argument('--region', type=str.strip, required=True, help='The region where the cluster launches.')
parser.add_argument('--endpoint_config_name', type=str.strip, required=False, help='The name of the endpoint configuration.', default='')
parser.add_argument('--variant_name_1', type=str.strip, required=False, help='The name of the production variant.', default='variant-name-1')
parser.add_argument('--model_name_1', type=str.strip, required=True, help='The model name used for endpoint deployment.')
parser.add_argument('--initial_instance_count_1', type=_utils.str_to_int, required=False, help='Number of instances to launch initially.', default=1)
parser.add_argument('--instance_type_1', choices=['ml.m4.xlarge', 'ml.m4.2xlarge', 'ml.m4.4xlarge', 'ml.m4.10xlarge', 'ml.m4.16xlarge', 'ml.m5.large', 'ml.m5.xlarge', 'ml.m5.2xlarge', 'ml.m5.4xlarge',
'ml.m5.12xlarge', 'ml.m5.24xlarge', 'ml.c4.xlarge', 'ml.c4.2xlarge', 'ml.c4.4xlarge', 'ml.c4.8xlarge', 'ml.p2.xlarge', 'ml.p2.8xlarge', 'ml.p2.16xlarge', 'ml.p3.2xlarge', 'ml.p3.8xlarge', 'ml.p3.16xlarge',
'ml.c5.xlarge', 'ml.c5.2xlarge', 'ml.c5.4xlarge', 'ml.c5.9xlarge', 'ml.c5.18xlarge', ''], type=str.strip, required=False, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--initial_variant_weight_1', type=_utils.str_to_float, required=False, help='Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.', default=1.0)
parser.add_argument('--accelerator_type_1', choices=['ml.eia1.medium', 'ml.eia1.large', 'ml.eia1.xlarge', ''], type=str.strip, required=False, help='The size of the Elastic Inference (EI) instance to use for the production variant.', default='')
parser.add_argument('--variant_name_2', type=str.strip, required=False, help='The name of the production variant.', default='variant-name-2')
parser.add_argument('--model_name_2', type=str.strip, required=False, help='The model name used for endpoint deployment.', default='')
parser.add_argument('--initial_instance_count_2', type=_utils.str_to_int, required=False, help='Number of instances to launch initially.', default=1)
parser.add_argument('--instance_type_2', choices=['ml.m4.xlarge', 'ml.m4.2xlarge', 'ml.m4.4xlarge', 'ml.m4.10xlarge', 'ml.m4.16xlarge', 'ml.m5.large', 'ml.m5.xlarge', 'ml.m5.2xlarge', 'ml.m5.4xlarge',
'ml.m5.12xlarge', 'ml.m5.24xlarge', 'ml.c4.xlarge', 'ml.c4.2xlarge', 'ml.c4.4xlarge', 'ml.c4.8xlarge', 'ml.p2.xlarge', 'ml.p2.8xlarge', 'ml.p2.16xlarge', 'ml.p3.2xlarge', 'ml.p3.8xlarge', 'ml.p3.16xlarge',
'ml.c5.xlarge', 'ml.c5.2xlarge', 'ml.c5.4xlarge', 'ml.c5.9xlarge', 'ml.c5.18xlarge', ''], type=str.strip, required=False, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--initial_variant_weight_2', type=_utils.str_to_float, required=False, help='Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.', default=1.0)
parser.add_argument('--accelerator_type_2', choices=['ml.eia1.medium', 'ml.eia1.large', 'ml.eia1.xlarge', ''], type=str.strip, required=False, help='The size of the Elastic Inference (EI) instance to use for the production variant.', default='')
parser.add_argument('--variant_name_3', type=str.strip, required=False, help='The name of the production variant.', default='variant-name-3')
parser.add_argument('--model_name_3', type=str.strip, required=False, help='The model name used for endpoint deployment.', default='')
parser.add_argument('--initial_instance_count_3', type=_utils.str_to_int, required=False, help='Number of instances to launch initially.', default=1)
parser.add_argument('--instance_type_3', choices=['ml.m4.xlarge', 'ml.m4.2xlarge', 'ml.m4.4xlarge', 'ml.m4.10xlarge', 'ml.m4.16xlarge', 'ml.m5.large', 'ml.m5.xlarge', 'ml.m5.2xlarge', 'ml.m5.4xlarge',
'ml.m5.12xlarge', 'ml.m5.24xlarge', 'ml.c4.xlarge', 'ml.c4.2xlarge', 'ml.c4.4xlarge', 'ml.c4.8xlarge', 'ml.p2.xlarge', 'ml.p2.8xlarge', 'ml.p2.16xlarge', 'ml.p3.2xlarge', 'ml.p3.8xlarge', 'ml.p3.16xlarge',
'ml.c5.xlarge', 'ml.c5.2xlarge', 'ml.c5.4xlarge', 'ml.c5.9xlarge', 'ml.c5.18xlarge', ''], type=str.strip, required=False, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--initial_variant_weight_3', type=_utils.str_to_float, required=False, help='Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.', default=1.0)
parser.add_argument('--accelerator_type_3', choices=['ml.eia1.medium', 'ml.eia1.large', 'ml.eia1.xlarge', ''], type=str.strip, required=False, help='The size of the Elastic Inference (EI) instance to use for the production variant.', default='')
parser.add_argument('--resource_encryption_key', type=str.strip, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).', default='')
parser.add_argument('--endpoint_config_tags', type=_utils.str_to_json_dict, required=False, help='An array of key-value pairs, to categorize AWS resources.', default='{}')
parser.add_argument('--endpoint_name', type=str.strip, required=False, help='The name of the endpoint.', default='')
parser.add_argument('--endpoint_tags', type=_utils.str_to_json_dict, required=False, help='An array of key-value pairs, to categorize AWS resources.', default='{}')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_client(args.region)
logging.info('Submitting Endpoint request to SageMaker...')
endpoint_name = _utils.deploy_model(client, vars(args))
logging.info('Endpoint creation request submitted. Waiting for completion...')
_utils.wait_for_endpoint_creation(client, endpoint_name)
with open('/tmp/endpoint_name.txt', 'w') as f:
f.write(endpoint_name)
logging.info('Endpoint creation completed.')
if __name__== "__main__":
main() | 0.591605 | 0.203312 |
import pprint
import re # noqa: F401
import six
from aylien_news_api.configuration import Configuration
class RelatedStories(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'related_stories': 'list[Story]',
'story_body': 'str',
'story_language': 'str',
'story_title': 'str',
'published_at_end': 'datetime',
'published_at_start': 'datetime'
}
attribute_map = {
'related_stories': 'related_stories',
'story_body': 'story_body',
'story_language': 'story_language',
'story_title': 'story_title',
'published_at_end': 'published_at.end',
'published_at_start': 'published_at.start'
}
def __init__(self, related_stories=None, story_body=None, story_language=None, story_title=None, published_at_end=None, published_at_start=None, local_vars_configuration=None): # noqa: E501
"""RelatedStories - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._related_stories = None
self._story_body = None
self._story_language = None
self._story_title = None
self._published_at_end = None
self._published_at_start = None
self.discriminator = None
if related_stories is not None:
self.related_stories = related_stories
if story_body is not None:
self.story_body = story_body
if story_language is not None:
self.story_language = story_language
if story_title is not None:
self.story_title = story_title
if published_at_end is not None:
self.published_at_end = published_at_end
if published_at_start is not None:
self.published_at_start = published_at_start
@property
def related_stories(self):
"""Gets the related_stories of this RelatedStories. # noqa: E501
An array of related stories for the input story # noqa: E501
:return: The related_stories of this RelatedStories. # noqa: E501
:rtype: list[Story]
"""
return self._related_stories
@related_stories.setter
def related_stories(self, related_stories):
"""Sets the related_stories of this RelatedStories.
An array of related stories for the input story # noqa: E501
:param related_stories: The related_stories of this RelatedStories. # noqa: E501
:type related_stories: list[Story]
"""
self._related_stories = related_stories
@property
def story_body(self):
"""Gets the story_body of this RelatedStories. # noqa: E501
The input story body # noqa: E501
:return: The story_body of this RelatedStories. # noqa: E501
:rtype: str
"""
return self._story_body
@story_body.setter
def story_body(self, story_body):
"""Sets the story_body of this RelatedStories.
The input story body # noqa: E501
:param story_body: The story_body of this RelatedStories. # noqa: E501
:type story_body: str
"""
self._story_body = story_body
@property
def story_language(self):
"""Gets the story_language of this RelatedStories. # noqa: E501
The input story language # noqa: E501
:return: The story_language of this RelatedStories. # noqa: E501
:rtype: str
"""
return self._story_language
@story_language.setter
def story_language(self, story_language):
"""Sets the story_language of this RelatedStories.
The input story language # noqa: E501
:param story_language: The story_language of this RelatedStories. # noqa: E501
:type story_language: str
"""
self._story_language = story_language
@property
def story_title(self):
"""Gets the story_title of this RelatedStories. # noqa: E501
The input story title # noqa: E501
:return: The story_title of this RelatedStories. # noqa: E501
:rtype: str
"""
return self._story_title
@story_title.setter
def story_title(self, story_title):
"""Sets the story_title of this RelatedStories.
The input story title # noqa: E501
:param story_title: The story_title of this RelatedStories. # noqa: E501
:type story_title: str
"""
self._story_title = story_title
@property
def published_at_end(self):
"""Gets the published_at_end of this RelatedStories. # noqa: E501
The end of a period in which searched stories were published # noqa: E501
:return: The published_at_end of this RelatedStories. # noqa: E501
:rtype: datetime
"""
return self._published_at_end
@published_at_end.setter
def published_at_end(self, published_at_end):
"""Sets the published_at_end of this RelatedStories.
The end of a period in which searched stories were published # noqa: E501
:param published_at_end: The published_at_end of this RelatedStories. # noqa: E501
:type published_at_end: datetime
"""
self._published_at_end = published_at_end
@property
def published_at_start(self):
"""Gets the published_at_start of this RelatedStories. # noqa: E501
The start of a period in which searched stories were published # noqa: E501
:return: The published_at_start of this RelatedStories. # noqa: E501
:rtype: datetime
"""
return self._published_at_start
@published_at_start.setter
def published_at_start(self, published_at_start):
"""Sets the published_at_start of this RelatedStories.
The start of a period in which searched stories were published # noqa: E501
:param published_at_start: The published_at_start of this RelatedStories. # noqa: E501
:type published_at_start: datetime
"""
self._published_at_start = published_at_start
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RelatedStories):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RelatedStories):
return True
return self.to_dict() != other.to_dict() | aylien_news_api/models/related_stories.py | import pprint
import re # noqa: F401
import six
from aylien_news_api.configuration import Configuration
class RelatedStories(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'related_stories': 'list[Story]',
'story_body': 'str',
'story_language': 'str',
'story_title': 'str',
'published_at_end': 'datetime',
'published_at_start': 'datetime'
}
attribute_map = {
'related_stories': 'related_stories',
'story_body': 'story_body',
'story_language': 'story_language',
'story_title': 'story_title',
'published_at_end': 'published_at.end',
'published_at_start': 'published_at.start'
}
def __init__(self, related_stories=None, story_body=None, story_language=None, story_title=None, published_at_end=None, published_at_start=None, local_vars_configuration=None): # noqa: E501
"""RelatedStories - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._related_stories = None
self._story_body = None
self._story_language = None
self._story_title = None
self._published_at_end = None
self._published_at_start = None
self.discriminator = None
if related_stories is not None:
self.related_stories = related_stories
if story_body is not None:
self.story_body = story_body
if story_language is not None:
self.story_language = story_language
if story_title is not None:
self.story_title = story_title
if published_at_end is not None:
self.published_at_end = published_at_end
if published_at_start is not None:
self.published_at_start = published_at_start
@property
def related_stories(self):
"""Gets the related_stories of this RelatedStories. # noqa: E501
An array of related stories for the input story # noqa: E501
:return: The related_stories of this RelatedStories. # noqa: E501
:rtype: list[Story]
"""
return self._related_stories
@related_stories.setter
def related_stories(self, related_stories):
"""Sets the related_stories of this RelatedStories.
An array of related stories for the input story # noqa: E501
:param related_stories: The related_stories of this RelatedStories. # noqa: E501
:type related_stories: list[Story]
"""
self._related_stories = related_stories
@property
def story_body(self):
"""Gets the story_body of this RelatedStories. # noqa: E501
The input story body # noqa: E501
:return: The story_body of this RelatedStories. # noqa: E501
:rtype: str
"""
return self._story_body
@story_body.setter
def story_body(self, story_body):
"""Sets the story_body of this RelatedStories.
The input story body # noqa: E501
:param story_body: The story_body of this RelatedStories. # noqa: E501
:type story_body: str
"""
self._story_body = story_body
@property
def story_language(self):
"""Gets the story_language of this RelatedStories. # noqa: E501
The input story language # noqa: E501
:return: The story_language of this RelatedStories. # noqa: E501
:rtype: str
"""
return self._story_language
@story_language.setter
def story_language(self, story_language):
"""Sets the story_language of this RelatedStories.
The input story language # noqa: E501
:param story_language: The story_language of this RelatedStories. # noqa: E501
:type story_language: str
"""
self._story_language = story_language
@property
def story_title(self):
"""Gets the story_title of this RelatedStories. # noqa: E501
The input story title # noqa: E501
:return: The story_title of this RelatedStories. # noqa: E501
:rtype: str
"""
return self._story_title
@story_title.setter
def story_title(self, story_title):
"""Sets the story_title of this RelatedStories.
The input story title # noqa: E501
:param story_title: The story_title of this RelatedStories. # noqa: E501
:type story_title: str
"""
self._story_title = story_title
@property
def published_at_end(self):
"""Gets the published_at_end of this RelatedStories. # noqa: E501
The end of a period in which searched stories were published # noqa: E501
:return: The published_at_end of this RelatedStories. # noqa: E501
:rtype: datetime
"""
return self._published_at_end
@published_at_end.setter
def published_at_end(self, published_at_end):
"""Sets the published_at_end of this RelatedStories.
The end of a period in which searched stories were published # noqa: E501
:param published_at_end: The published_at_end of this RelatedStories. # noqa: E501
:type published_at_end: datetime
"""
self._published_at_end = published_at_end
@property
def published_at_start(self):
"""Gets the published_at_start of this RelatedStories. # noqa: E501
The start of a period in which searched stories were published # noqa: E501
:return: The published_at_start of this RelatedStories. # noqa: E501
:rtype: datetime
"""
return self._published_at_start
@published_at_start.setter
def published_at_start(self, published_at_start):
"""Sets the published_at_start of this RelatedStories.
The start of a period in which searched stories were published # noqa: E501
:param published_at_start: The published_at_start of this RelatedStories. # noqa: E501
:type published_at_start: datetime
"""
self._published_at_start = published_at_start
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RelatedStories):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RelatedStories):
return True
return self.to_dict() != other.to_dict() | 0.768907 | 0.105073 |
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as language_utils
from unittest.mock import patch
class LanguagesAddTestCase(BaseTestGenerator):
skip_on_database = ['gpdb']
scenarios = utils.generate_scenarios('create_language',
language_utils.test_cases)
def setUp(self):
super(LanguagesAddTestCase, self).setUp()
db_user = self.server['username']
self.data = self.test_data
self.data['name'] = "language_%s" % str(uuid.uuid4())[1:8]
self.data['lanowner'] = db_user
self.server_data = parent_node_dict["database"][-1]
self.server_id = self.server_data["server_id"]
self.db_id = self.server_data['db_id']
self.db_name = self.server_data["db_name"]
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
def runTest(self):
"""This function will add language under test database."""
actual_status_code = ''
expected_status_code = ''
if self.is_positive_test:
response = self.create_language()
actual_status_code = response.status_code
expected_output = language_utils.verify_language(self)
expected_status_code = self.expected_data["status_code"]
self.assertDictEqual(expected_output, self.data)
else:
if hasattr(self, "missing_name"):
del self.data["name"]
response = self.create_language()
actual_status_code = response.status_code
expected_status_code = self.expected_data["status_code"]
if hasattr(self, "missing_lang_pack"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.create_language()
actual_status_code = response.status_code
expected_status_code = self.expected_data["status_code"]
if hasattr(self, "error_in_properties"):
with patch(self.mock_data["function_name"],
side_effect=[eval(self.mock_data["return_value"])]):
response = self.create_language()
actual_status_code = response.status_code
expected_status_code = self.expected_data["status_code"]
self.assertEqual(actual_status_code, expected_status_code)
def create_language(self):
"""This function will add language under test database."""
return self.tester.post(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' + str(self.db_id) + '/',
data=json.dumps(self.data),
content_type='html/json')
def tearDown(self):
"""This function delete added language and
disconnect the test database."""
if self.is_positive_test or hasattr(self, "error_in_properties"):
language_utils.delete_language(
self.server, self.db_name, self.data['name'])
database_utils.disconnect_database(self, self.server_id,
self.db_id) | pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/languages/tests/test_language_add.py |
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as language_utils
from unittest.mock import patch
class LanguagesAddTestCase(BaseTestGenerator):
skip_on_database = ['gpdb']
scenarios = utils.generate_scenarios('create_language',
language_utils.test_cases)
def setUp(self):
super(LanguagesAddTestCase, self).setUp()
db_user = self.server['username']
self.data = self.test_data
self.data['name'] = "language_%s" % str(uuid.uuid4())[1:8]
self.data['lanowner'] = db_user
self.server_data = parent_node_dict["database"][-1]
self.server_id = self.server_data["server_id"]
self.db_id = self.server_data['db_id']
self.db_name = self.server_data["db_name"]
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
def runTest(self):
"""This function will add language under test database."""
actual_status_code = ''
expected_status_code = ''
if self.is_positive_test:
response = self.create_language()
actual_status_code = response.status_code
expected_output = language_utils.verify_language(self)
expected_status_code = self.expected_data["status_code"]
self.assertDictEqual(expected_output, self.data)
else:
if hasattr(self, "missing_name"):
del self.data["name"]
response = self.create_language()
actual_status_code = response.status_code
expected_status_code = self.expected_data["status_code"]
if hasattr(self, "missing_lang_pack"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.create_language()
actual_status_code = response.status_code
expected_status_code = self.expected_data["status_code"]
if hasattr(self, "error_in_properties"):
with patch(self.mock_data["function_name"],
side_effect=[eval(self.mock_data["return_value"])]):
response = self.create_language()
actual_status_code = response.status_code
expected_status_code = self.expected_data["status_code"]
self.assertEqual(actual_status_code, expected_status_code)
def create_language(self):
"""This function will add language under test database."""
return self.tester.post(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' + str(self.db_id) + '/',
data=json.dumps(self.data),
content_type='html/json')
def tearDown(self):
"""This function delete added language and
disconnect the test database."""
if self.is_positive_test or hasattr(self, "error_in_properties"):
language_utils.delete_language(
self.server, self.db_name, self.data['name'])
database_utils.disconnect_database(self, self.server_id,
self.db_id) | 0.500977 | 0.178454 |
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Mapping, Optional, Sequence, Type, cast
from uuid import uuid4
from intelliflow.core.application.context.traversal import ContextVisitor
from intelliflow.core.platform.development import HostPlatform
from intelliflow.core.serialization import Serializable, dumps, loads
from intelliflow.core.signal_processing.definitions.metric_alarm_defs import (
ALARM_STATE_TRANSITION_DIMENSION_TYPE,
ALARM_TIME_DIMENSION_TYPE,
METRIC_NAME_DIMENSION_TYPE,
METRIC_PERIOD_DIMENSION_TYPE,
METRIC_STATISTIC_DIMENSION_TYPE,
METRIC_TIME_DIMENSION_TYPE,
AlarmDimension,
MetricDimension,
MetricSubDimensionMapType,
)
from intelliflow.core.signal_processing.dimension_constructs import DateVariant, Dimension, DimensionFilter, DimensionSpec
from intelliflow.core.signal_processing.signal import Signal, SignalDomainSpec, SignalType
from intelliflow.core.signal_processing.signal_source import (
AlarmSignalSourceAccessSpec,
CompositeAlarmSignalSourceAccessSpec,
MetricSignalSourceAccessSpec,
SignalSourceAccessSpec,
TimerSignalSourceAccessSpec,
)
from intelliflow.core.signal_processing.slot import Slot
class Node(Serializable["Node"], ABC):
"""Abstract class representing an atomic signal/slot provider within Application flow.
A Node generically represents a persistent action taken against an entity flowing through
an Application. This entities are represented with Signal abstraction. So basically a Node
is at least a transformation or a listener on a Signal. As a transformation, it should
definitely provide a Signal, and as a listener it should be taking some persistent action
and yielding new data in Platform::storage, which cannot be exposed as a Signal to the rest
of the flow.
So it should have at least one Slot and optionally expose a Signal. Because of this fact,
a Node should register its connections (input Signals and at least one Slot) during Activation
so that in runtime its Slot(s) can be executed, based on changes in its input Signal(s).
Obeys Serializable protocol to guarantee serialization at Context level.
"""
class QueryVisitor(ContextVisitor):
"""Node impls can (are supposed to) provide QueryVisitor impls to abstract high-level modules
from the details of how to build a query for a particular node type (and its attributes)"""
def __init__(self, node_type: Type["Node"], *args, **kwargs) -> None:
"""Impls should provide a pretty interface for more convenient high-level instantiation a query visitor"""
self._results: Dict[str, "Node"] = dict()
self._args: Sequence[Any] = list(args) if args else []
self._kwargs: Dict[str, Any] = dict(kwargs) if kwargs else {}
self._node_type = node_type if node_type else kwargs.get("node_type", None)
@property
def results(self) -> Mapping[str, "Node"]:
return self._results
# overrides
def visit(self, node: "Node") -> None:
if self.match(node):
self._results[node.node_id] = node
def match(self, node: "Node") -> bool:
return type(node) == self._node_type or isinstance(node, self._node_type)
# parent and child_nodes strong/direct references would normally mean memory leak here.
# currently this issue is intentionally ignored based on the particular use-cases that
# we are targeting for POC (as of 07/2020).
# TODO Will move to 'weakref' or ignore it at all, depepending on our FUTURE evaluation.
def __init__(self, parent_node: "Node", child_nodes: List["Node"] = None, node_id: str = None) -> None:
"""
Create a Node.
Parameters
----------
parent_node: Node
Container Node. Owner/creator of this Node. Not to be confused with an upstream relationship.
That type of relationship will be up to sub-classes / Node implementations.
node_id: str
Unique ID for this Node. Within a Context this id is used to retrieve the handle for this Node.
*child_node : Node
Children.
"""
self._parent_node = parent_node
self._child_nodes = [] if child_nodes is None else child_nodes
# don't forget that our serializer should not call __init__ on new objects during deserialization.
# so this assignment is safe for now since we are using pickle. Pickle does not call __init__.
self._node_id = "{0}-{1}".format(self.__class__.__name__, uuid4()) if node_id is None else node_id
self._check_cylic_dependency()
def _check_cylic_dependency(self) -> None:
if self._parent_node == self or self in self._child_nodes:
raise TypeError("Node ({0}) has a cyclic relationship")
# TODO check the whole hierarchy
def __hash__(self) -> int:
return hash(self._node_id)
def __eq__(self, other: Any) -> bool:
try:
return self._node_id == cast("Node", other).node_id
except AttributeError:
return NotImplemented
@property
def parent_node(self) -> "Node":
return self._parent_node
@parent_node.setter
def parent_node(self, value: "Node") -> None:
self._parent_node = value
@property
def node_id(self) -> str:
return self._node_id
@abstractmethod
def _id(self) -> str:
pass
@property
def route_id(self) -> str:
return self._node_id
def is_root(self) -> bool:
return self._parent_node is None
@property
def child_nodes(self) -> Sequence["Node"]:
return self._child_nodes
def add_child(self, child: "Node") -> None:
child.parent_node = self
self._child_nodes.append(child)
@abstractmethod
def signal(self) -> Optional[Signal]:
"""What does this Node represent?
Emit it as a Signal during development and use the same during 'activation'
This method is particularly important while Nodes are referring each other.
While a Node binds to another one (like MarshalerNode to a DataNode) or when
a Node is implicitly used as an input for another Node (i.e Application::createDataset),
this method provides the necessary abstraction in terms of representing this particular
Node impl (a subclass) as a Signal object.
"""
pass
@abstractmethod
def _slots(self) -> Optional[List[Slot]]:
"""A Node is supposed to provide at least one Slot.
if a Slot is not going to be provided, then the existence of Node impl
should be questioned as it can only be a development-time entity such
as the views in "node.marshaling.filter_views" module. Currently only
exception to this logic is <ExternalDataNode>. That is why the return
type is still Optional. Otherwise this would not make sense.
The reason this method is supposed to be 'private' is that currently
we cannot envision a scenario where the slots would be accessed
externally. So sole purpose of this abstract method is to high-light
the above logic behind having Node impls.
"""
pass
def activate(self, platform: HostPlatform) -> None:
"""Do activation while the high-level Context (and Application) is being interpreted/activated.
Node activates itself and then scans its children.
This basic logic is what determines the order of route entries (i.e Code Segment)
within a DAG of Nodes (that represents an <Instruction>). High-level order of route entries
(aligned with the flow of Application code) is determined by :class:`InstructionChain` .
"""
self.do_activate(platform)
for child_node in self._child_nodes:
child_node.activate(platform)
@abstractmethod
def do_activate(self, platform: HostPlatform) -> None:
pass
def accept(self, visitor: ContextVisitor) -> None:
visitor.visit(self)
for child_node in self._child_nodes:
child_node.accept(visitor)
class DataNode(Node, ABC):
"""Base class for all Data (new Signal) provider Nodes.
It encapsulates bare-bones fields for the creation of a new Signal.
"""
class QueryVisitor(Node.QueryVisitor):
def __init__(self, data_id: str, *args, **kwargs) -> None:
super().__init__(None, args, kwargs)
self._data_id: str = data_id if data_id else kwargs.get("data_id", None)
self._exact_match: bool = kwargs.get("exact_match", False)
# overrides
def match(self, node: "Node") -> bool:
if isinstance(node, DataNode):
if self._exact_match:
return self._data_id == node.data_id
else:
return self._data_id in node.data_id
return False
def __init__(
self,
data_id: str,
source_access_spec: SignalSourceAccessSpec,
domain_spec: SignalDomainSpec,
parent_node: Node = None,
child_nodes: List[Node] = None,
node_id: str = None,
) -> None:
"""
Create a DataNode.
Parameters
----------
data_id: str
Identifier for the logical data object represented by this Node.
resource_format: str
See Signal::resource_format
domain_spec: SignalDomainSpec
See Signal::domain_spec
parent_node: Node
Container Node. Owner/creator of this Node. Not to be confused with an upstream relationship.
That type of relationship will be up to sub-classes / Node implementations.
node_id: str
Unique ID for this Node. Within a Context this id is used to retrieve the handle for this Node.
*child_node : Node
Children.
"""
super().__init__(parent_node, child_nodes, node_id)
self._data_id = data_id
self._source_access_spec = source_access_spec
self._domain_spec = domain_spec
# overrides
@property
def _id(self) -> str:
return self.data_id
@property
def data_id(self) -> str:
return self._data_id
# overrides
@property
def route_id(self) -> str:
# It is by design that we allow the same route_id generation for the same data_id.
# This allows the most recent node to overwrite / update the route (during activation).
return "{0}-{1}".format(self.__class__.__name__, self._data_id)
class TimerNode(Node):
class QueryVisitor(Node.QueryVisitor):
def __init__(self, timer_id: str, *args, **kwargs) -> None:
super().__init__(None, args, kwargs)
self._timer_id: str = timer_id if timer_id else kwargs.get("timer_id", None)
self._exact_match: bool = kwargs.get("exact_match", False)
# overrides
def match(self, node: "Node") -> bool:
if isinstance(node, TimerNode):
if self._exact_match:
return self._timer_id == node.timer_id
else:
return self._timer_id in node.timer_id
return False
def __init__(
self,
timer_id: str,
schedule_expression: str,
date_dimension: DateVariant,
context_id: str,
parent_node: Node = None,
child_nodes: List[Node] = None,
node_id: str = None,
**kwargs,
) -> None:
super().__init__(parent_node, child_nodes, node_id)
self._timer_id = timer_id
self._schedule_expresssion = schedule_expression
spec: DimensionSpec = DimensionSpec()
spec.add_dimension(Dimension(date_dimension.name, date_dimension.type, date_dimension.params), None)
dim_filter: DimensionFilter = DimensionFilter()
dim_filter.add_dimension(date_dimension, None)
source_access_spec = TimerSignalSourceAccessSpec(timer_id, schedule_expression, context_id, **kwargs)
domain_spec = SignalDomainSpec(spec, dim_filter, None)
self._timer_signal = Signal(SignalType.TIMER_EVENT, source_access_spec, domain_spec, timer_id, False)
def signal(self) -> Optional[Signal]:
return self._timer_signal
def _slots(self) -> Optional[List[Slot]]:
return []
def do_activate(self, platform: HostPlatform) -> None:
platform.connect_internal_signal(self._timer_signal)
@property
def timer_id(self) -> str:
return self._timer_id
# overrides
@property
def _id(self) -> str:
return self.timer_id
class MetricNode(Node, ABC):
class QueryVisitor(Node.QueryVisitor):
def __init__(self, metric_id: str, sub_dimensions: MetricSubDimensionMapType, *args, **kwargs) -> None:
super().__init__(None, *args, **kwargs)
self._metric_id: str = metric_id if metric_id else kwargs.get("metric_id", None)
self._sub_dimensions = sub_dimensions
self._exact_match: bool = kwargs.get("exact_match", False)
# overrides
def match(self, node: "Node") -> bool:
if isinstance(node, MetricNode):
id_match = True
sub_dims_match = True
if self._metric_id:
if self._exact_match:
id_match = self._metric_id == node.metric_id
else:
id_match = self._metric_id in node.metric_id
if id_match:
if self._sub_dimensions:
sub_dims_match = all([node.sub_dimensions.get(key, None) == value for key, value in self._sub_dimensions.items()])
return id_match and sub_dims_match
return False
def __init__(
self,
signal_type: SignalType,
metric_id: str,
dimension_filter_spec: DimensionFilter,
source_access_spec: MetricSignalSourceAccessSpec,
parent_node: Node = None,
child_nodes: List[Node] = None,
node_id: str = None,
) -> None:
super().__init__(parent_node, child_nodes, node_id)
self._metric_id = metric_id
self._source_access_spec = source_access_spec
dimension_spec: DimensionSpec = DimensionSpec.load_from_pretty(
{
MetricDimension.NAME: {
type: METRIC_NAME_DIMENSION_TYPE,
MetricDimension.STATISTIC: {
type: METRIC_STATISTIC_DIMENSION_TYPE,
MetricDimension.PERIOD: {
type: METRIC_PERIOD_DIMENSION_TYPE,
MetricDimension.TIME: {type: METRIC_TIME_DIMENSION_TYPE, "format": "%Y-%m-%d %H:%M:%S"},
},
},
}
}
)
if not dimension_filter_spec.check_spec_match(dimension_spec):
raise ValueError(
f"Metric (id={metric_id}: Dimension filter {dimension_filter_spec!r} is not compatible "
f"with the expected dimension spec {dimension_spec!r}."
)
dimension_filter_spec.set_spec(dimension_spec)
domain_spec = SignalDomainSpec(dimension_spec, dimension_filter_spec, None)
self._metric_signal = Signal(signal_type, source_access_spec, domain_spec, metric_id)
def signal(self) -> Optional[Signal]:
return self._metric_signal
def _slots(self) -> Optional[List[Slot]]:
raise NotImplemented
@property
def metric_id(self) -> str:
return self._metric_id
@property
def sub_dimensions(self) -> MetricSubDimensionMapType:
return self._source_access_spec.sub_dimensions
# overrides
@property
def _id(self) -> str:
return self.metric_id
class AlarmNode(Node, ABC):
class QueryVisitor(Node.QueryVisitor):
def __init__(self, alarm_id: str, *args, **kwargs) -> None:
super().__init__(None, *args, **kwargs)
self._alarm_id: str = alarm_id if alarm_id else kwargs.get("alarm_id", None)
self._exact_match: bool = kwargs.get("exact_match", False)
# overrides
def match(self, node: "Node") -> bool:
if isinstance(node, AlarmNode):
if self._exact_match:
return self._alarm_id == node.alarm_id
else:
return self._alarm_id in node.alarm_id
return False
def __init__(
self,
signal_type: SignalType,
alarm_id: str,
dimension_filter_spec: DimensionFilter,
source_access_spec: AlarmSignalSourceAccessSpec,
parent_node: Node = None,
child_nodes: List[Node] = None,
node_id: str = None,
) -> None:
super().__init__(parent_node, child_nodes, node_id)
self._alarm_id = alarm_id
self._source_access_spec = source_access_spec
dimension_spec: DimensionSpec = DimensionSpec.load_from_pretty(
{
AlarmDimension.STATE_TRANSITION: {
type: ALARM_STATE_TRANSITION_DIMENSION_TYPE,
AlarmDimension.TIME: {type: ALARM_TIME_DIMENSION_TYPE, "format": "%Y-%m-%d %H:%M:%S"},
}
}
)
if not dimension_filter_spec.check_spec_match(dimension_spec):
raise ValueError(
f"Alarm (id={alarm_id}: Dimension filter {dimension_filter_spec!r} is not compatible "
f"with the expected dimension spec {dimension_spec!r}."
)
dimension_filter_spec.set_spec(dimension_spec)
domain_spec = SignalDomainSpec(dimension_spec, dimension_filter_spec, None)
self._alarm_signal = Signal(signal_type, source_access_spec, domain_spec, alarm_id)
def signal(self) -> Optional[Signal]:
return self._alarm_signal
def _slots(self) -> Optional[List[Slot]]:
raise NotImplemented
@property
def alarm_id(self) -> str:
return self._alarm_id
# overrides
@property
def _id(self) -> str:
return self.alarm_id
class CompositeAlarmNode(Node, ABC):
class QueryVisitor(Node.QueryVisitor):
def __init__(self, alarm_id: str, *args, **kwargs) -> None:
super().__init__(None, *args, **kwargs)
self._alarm_id: str = alarm_id if alarm_id else kwargs.get("alarm_id", None)
self._exact_match: bool = kwargs.get("exact_match", False)
# overrides
def match(self, node: "Node") -> bool:
if isinstance(node, CompositeAlarmNode):
if self._exact_match:
return self._alarm_id == node.alarm_id
else:
return self._alarm_id in node.alarm_id
return False
def __init__(
self,
signal_type: SignalType,
alarm_id: str,
dimension_filter_spec: DimensionFilter,
source_access_spec: CompositeAlarmSignalSourceAccessSpec,
parent_node: Node = None,
child_nodes: List[Node] = None,
node_id: str = None,
) -> None:
super().__init__(parent_node, child_nodes, node_id)
self._alarm_id = alarm_id
self._source_access_spec = source_access_spec
dimension_spec: DimensionSpec = DimensionSpec.load_from_pretty(
{
AlarmDimension.STATE_TRANSITION: {
type: ALARM_STATE_TRANSITION_DIMENSION_TYPE,
AlarmDimension.TIME: {type: ALARM_TIME_DIMENSION_TYPE, "format": "%Y-%m-%d %H:%M:%S"},
}
}
)
if not dimension_filter_spec.check_spec_match(dimension_spec):
raise ValueError(
f"Composite alarm (id={alarm_id}: Dimension filter {dimension_filter_spec!r} is not "
f"compatible with the expected dimension spec {dimension_spec!r}."
)
dimension_filter_spec.set_spec(dimension_spec)
domain_spec = SignalDomainSpec(dimension_spec, dimension_filter_spec, None)
self._alarm_signal = Signal(signal_type, source_access_spec, domain_spec, alarm_id)
def signal(self) -> Optional[Signal]:
return self._alarm_signal
def _slots(self) -> Optional[List[Slot]]:
raise NotImplemented
@property
def alarm_id(self) -> str:
return self._alarm_id
# overrides
@property
def _id(self) -> str:
return self.alarm_id | src/intelliflow/core/application/context/node/base.py |
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Mapping, Optional, Sequence, Type, cast
from uuid import uuid4
from intelliflow.core.application.context.traversal import ContextVisitor
from intelliflow.core.platform.development import HostPlatform
from intelliflow.core.serialization import Serializable, dumps, loads
from intelliflow.core.signal_processing.definitions.metric_alarm_defs import (
ALARM_STATE_TRANSITION_DIMENSION_TYPE,
ALARM_TIME_DIMENSION_TYPE,
METRIC_NAME_DIMENSION_TYPE,
METRIC_PERIOD_DIMENSION_TYPE,
METRIC_STATISTIC_DIMENSION_TYPE,
METRIC_TIME_DIMENSION_TYPE,
AlarmDimension,
MetricDimension,
MetricSubDimensionMapType,
)
from intelliflow.core.signal_processing.dimension_constructs import DateVariant, Dimension, DimensionFilter, DimensionSpec
from intelliflow.core.signal_processing.signal import Signal, SignalDomainSpec, SignalType
from intelliflow.core.signal_processing.signal_source import (
AlarmSignalSourceAccessSpec,
CompositeAlarmSignalSourceAccessSpec,
MetricSignalSourceAccessSpec,
SignalSourceAccessSpec,
TimerSignalSourceAccessSpec,
)
from intelliflow.core.signal_processing.slot import Slot
class Node(Serializable["Node"], ABC):
"""Abstract class representing an atomic signal/slot provider within Application flow.
A Node generically represents a persistent action taken against an entity flowing through
an Application. This entities are represented with Signal abstraction. So basically a Node
is at least a transformation or a listener on a Signal. As a transformation, it should
definitely provide a Signal, and as a listener it should be taking some persistent action
and yielding new data in Platform::storage, which cannot be exposed as a Signal to the rest
of the flow.
So it should have at least one Slot and optionally expose a Signal. Because of this fact,
a Node should register its connections (input Signals and at least one Slot) during Activation
so that in runtime its Slot(s) can be executed, based on changes in its input Signal(s).
Obeys Serializable protocol to guarantee serialization at Context level.
"""
class QueryVisitor(ContextVisitor):
"""Node impls can (are supposed to) provide QueryVisitor impls to abstract high-level modules
from the details of how to build a query for a particular node type (and its attributes)"""
def __init__(self, node_type: Type["Node"], *args, **kwargs) -> None:
"""Impls should provide a pretty interface for more convenient high-level instantiation a query visitor"""
self._results: Dict[str, "Node"] = dict()
self._args: Sequence[Any] = list(args) if args else []
self._kwargs: Dict[str, Any] = dict(kwargs) if kwargs else {}
self._node_type = node_type if node_type else kwargs.get("node_type", None)
@property
def results(self) -> Mapping[str, "Node"]:
return self._results
# overrides
def visit(self, node: "Node") -> None:
if self.match(node):
self._results[node.node_id] = node
def match(self, node: "Node") -> bool:
return type(node) == self._node_type or isinstance(node, self._node_type)
# parent and child_nodes strong/direct references would normally mean memory leak here.
# currently this issue is intentionally ignored based on the particular use-cases that
# we are targeting for POC (as of 07/2020).
# TODO Will move to 'weakref' or ignore it at all, depepending on our FUTURE evaluation.
def __init__(self, parent_node: "Node", child_nodes: List["Node"] = None, node_id: str = None) -> None:
"""
Create a Node.
Parameters
----------
parent_node: Node
Container Node. Owner/creator of this Node. Not to be confused with an upstream relationship.
That type of relationship will be up to sub-classes / Node implementations.
node_id: str
Unique ID for this Node. Within a Context this id is used to retrieve the handle for this Node.
*child_node : Node
Children.
"""
self._parent_node = parent_node
self._child_nodes = [] if child_nodes is None else child_nodes
# don't forget that our serializer should not call __init__ on new objects during deserialization.
# so this assignment is safe for now since we are using pickle. Pickle does not call __init__.
self._node_id = "{0}-{1}".format(self.__class__.__name__, uuid4()) if node_id is None else node_id
self._check_cylic_dependency()
def _check_cylic_dependency(self) -> None:
if self._parent_node == self or self in self._child_nodes:
raise TypeError("Node ({0}) has a cyclic relationship")
# TODO check the whole hierarchy
def __hash__(self) -> int:
return hash(self._node_id)
def __eq__(self, other: Any) -> bool:
try:
return self._node_id == cast("Node", other).node_id
except AttributeError:
return NotImplemented
@property
def parent_node(self) -> "Node":
return self._parent_node
@parent_node.setter
def parent_node(self, value: "Node") -> None:
self._parent_node = value
@property
def node_id(self) -> str:
return self._node_id
@abstractmethod
def _id(self) -> str:
pass
@property
def route_id(self) -> str:
return self._node_id
def is_root(self) -> bool:
return self._parent_node is None
@property
def child_nodes(self) -> Sequence["Node"]:
return self._child_nodes
def add_child(self, child: "Node") -> None:
child.parent_node = self
self._child_nodes.append(child)
@abstractmethod
def signal(self) -> Optional[Signal]:
"""What does this Node represent?
Emit it as a Signal during development and use the same during 'activation'
This method is particularly important while Nodes are referring each other.
While a Node binds to another one (like MarshalerNode to a DataNode) or when
a Node is implicitly used as an input for another Node (i.e Application::createDataset),
this method provides the necessary abstraction in terms of representing this particular
Node impl (a subclass) as a Signal object.
"""
pass
@abstractmethod
def _slots(self) -> Optional[List[Slot]]:
"""A Node is supposed to provide at least one Slot.
if a Slot is not going to be provided, then the existence of Node impl
should be questioned as it can only be a development-time entity such
as the views in "node.marshaling.filter_views" module. Currently only
exception to this logic is <ExternalDataNode>. That is why the return
type is still Optional. Otherwise this would not make sense.
The reason this method is supposed to be 'private' is that currently
we cannot envision a scenario where the slots would be accessed
externally. So sole purpose of this abstract method is to high-light
the above logic behind having Node impls.
"""
pass
def activate(self, platform: HostPlatform) -> None:
"""Do activation while the high-level Context (and Application) is being interpreted/activated.
Node activates itself and then scans its children.
This basic logic is what determines the order of route entries (i.e Code Segment)
within a DAG of Nodes (that represents an <Instruction>). High-level order of route entries
(aligned with the flow of Application code) is determined by :class:`InstructionChain` .
"""
self.do_activate(platform)
for child_node in self._child_nodes:
child_node.activate(platform)
@abstractmethod
def do_activate(self, platform: HostPlatform) -> None:
pass
def accept(self, visitor: ContextVisitor) -> None:
visitor.visit(self)
for child_node in self._child_nodes:
child_node.accept(visitor)
class DataNode(Node, ABC):
"""Base class for all Data (new Signal) provider Nodes.
It encapsulates bare-bones fields for the creation of a new Signal.
"""
class QueryVisitor(Node.QueryVisitor):
def __init__(self, data_id: str, *args, **kwargs) -> None:
super().__init__(None, args, kwargs)
self._data_id: str = data_id if data_id else kwargs.get("data_id", None)
self._exact_match: bool = kwargs.get("exact_match", False)
# overrides
def match(self, node: "Node") -> bool:
if isinstance(node, DataNode):
if self._exact_match:
return self._data_id == node.data_id
else:
return self._data_id in node.data_id
return False
def __init__(
self,
data_id: str,
source_access_spec: SignalSourceAccessSpec,
domain_spec: SignalDomainSpec,
parent_node: Node = None,
child_nodes: List[Node] = None,
node_id: str = None,
) -> None:
"""
Create a DataNode.
Parameters
----------
data_id: str
Identifier for the logical data object represented by this Node.
resource_format: str
See Signal::resource_format
domain_spec: SignalDomainSpec
See Signal::domain_spec
parent_node: Node
Container Node. Owner/creator of this Node. Not to be confused with an upstream relationship.
That type of relationship will be up to sub-classes / Node implementations.
node_id: str
Unique ID for this Node. Within a Context this id is used to retrieve the handle for this Node.
*child_node : Node
Children.
"""
super().__init__(parent_node, child_nodes, node_id)
self._data_id = data_id
self._source_access_spec = source_access_spec
self._domain_spec = domain_spec
# overrides
@property
def _id(self) -> str:
return self.data_id
@property
def data_id(self) -> str:
return self._data_id
# overrides
@property
def route_id(self) -> str:
# It is by design that we allow the same route_id generation for the same data_id.
# This allows the most recent node to overwrite / update the route (during activation).
return "{0}-{1}".format(self.__class__.__name__, self._data_id)
class TimerNode(Node):
class QueryVisitor(Node.QueryVisitor):
def __init__(self, timer_id: str, *args, **kwargs) -> None:
super().__init__(None, args, kwargs)
self._timer_id: str = timer_id if timer_id else kwargs.get("timer_id", None)
self._exact_match: bool = kwargs.get("exact_match", False)
# overrides
def match(self, node: "Node") -> bool:
if isinstance(node, TimerNode):
if self._exact_match:
return self._timer_id == node.timer_id
else:
return self._timer_id in node.timer_id
return False
def __init__(
self,
timer_id: str,
schedule_expression: str,
date_dimension: DateVariant,
context_id: str,
parent_node: Node = None,
child_nodes: List[Node] = None,
node_id: str = None,
**kwargs,
) -> None:
super().__init__(parent_node, child_nodes, node_id)
self._timer_id = timer_id
self._schedule_expresssion = schedule_expression
spec: DimensionSpec = DimensionSpec()
spec.add_dimension(Dimension(date_dimension.name, date_dimension.type, date_dimension.params), None)
dim_filter: DimensionFilter = DimensionFilter()
dim_filter.add_dimension(date_dimension, None)
source_access_spec = TimerSignalSourceAccessSpec(timer_id, schedule_expression, context_id, **kwargs)
domain_spec = SignalDomainSpec(spec, dim_filter, None)
self._timer_signal = Signal(SignalType.TIMER_EVENT, source_access_spec, domain_spec, timer_id, False)
def signal(self) -> Optional[Signal]:
return self._timer_signal
def _slots(self) -> Optional[List[Slot]]:
return []
def do_activate(self, platform: HostPlatform) -> None:
platform.connect_internal_signal(self._timer_signal)
@property
def timer_id(self) -> str:
return self._timer_id
# overrides
@property
def _id(self) -> str:
return self.timer_id
class MetricNode(Node, ABC):
class QueryVisitor(Node.QueryVisitor):
def __init__(self, metric_id: str, sub_dimensions: MetricSubDimensionMapType, *args, **kwargs) -> None:
super().__init__(None, *args, **kwargs)
self._metric_id: str = metric_id if metric_id else kwargs.get("metric_id", None)
self._sub_dimensions = sub_dimensions
self._exact_match: bool = kwargs.get("exact_match", False)
# overrides
def match(self, node: "Node") -> bool:
if isinstance(node, MetricNode):
id_match = True
sub_dims_match = True
if self._metric_id:
if self._exact_match:
id_match = self._metric_id == node.metric_id
else:
id_match = self._metric_id in node.metric_id
if id_match:
if self._sub_dimensions:
sub_dims_match = all([node.sub_dimensions.get(key, None) == value for key, value in self._sub_dimensions.items()])
return id_match and sub_dims_match
return False
def __init__(
self,
signal_type: SignalType,
metric_id: str,
dimension_filter_spec: DimensionFilter,
source_access_spec: MetricSignalSourceAccessSpec,
parent_node: Node = None,
child_nodes: List[Node] = None,
node_id: str = None,
) -> None:
super().__init__(parent_node, child_nodes, node_id)
self._metric_id = metric_id
self._source_access_spec = source_access_spec
dimension_spec: DimensionSpec = DimensionSpec.load_from_pretty(
{
MetricDimension.NAME: {
type: METRIC_NAME_DIMENSION_TYPE,
MetricDimension.STATISTIC: {
type: METRIC_STATISTIC_DIMENSION_TYPE,
MetricDimension.PERIOD: {
type: METRIC_PERIOD_DIMENSION_TYPE,
MetricDimension.TIME: {type: METRIC_TIME_DIMENSION_TYPE, "format": "%Y-%m-%d %H:%M:%S"},
},
},
}
}
)
if not dimension_filter_spec.check_spec_match(dimension_spec):
raise ValueError(
f"Metric (id={metric_id}: Dimension filter {dimension_filter_spec!r} is not compatible "
f"with the expected dimension spec {dimension_spec!r}."
)
dimension_filter_spec.set_spec(dimension_spec)
domain_spec = SignalDomainSpec(dimension_spec, dimension_filter_spec, None)
self._metric_signal = Signal(signal_type, source_access_spec, domain_spec, metric_id)
def signal(self) -> Optional[Signal]:
return self._metric_signal
def _slots(self) -> Optional[List[Slot]]:
raise NotImplemented
@property
def metric_id(self) -> str:
return self._metric_id
@property
def sub_dimensions(self) -> MetricSubDimensionMapType:
return self._source_access_spec.sub_dimensions
# overrides
@property
def _id(self) -> str:
return self.metric_id
class AlarmNode(Node, ABC):
class QueryVisitor(Node.QueryVisitor):
def __init__(self, alarm_id: str, *args, **kwargs) -> None:
super().__init__(None, *args, **kwargs)
self._alarm_id: str = alarm_id if alarm_id else kwargs.get("alarm_id", None)
self._exact_match: bool = kwargs.get("exact_match", False)
# overrides
def match(self, node: "Node") -> bool:
if isinstance(node, AlarmNode):
if self._exact_match:
return self._alarm_id == node.alarm_id
else:
return self._alarm_id in node.alarm_id
return False
def __init__(
self,
signal_type: SignalType,
alarm_id: str,
dimension_filter_spec: DimensionFilter,
source_access_spec: AlarmSignalSourceAccessSpec,
parent_node: Node = None,
child_nodes: List[Node] = None,
node_id: str = None,
) -> None:
super().__init__(parent_node, child_nodes, node_id)
self._alarm_id = alarm_id
self._source_access_spec = source_access_spec
dimension_spec: DimensionSpec = DimensionSpec.load_from_pretty(
{
AlarmDimension.STATE_TRANSITION: {
type: ALARM_STATE_TRANSITION_DIMENSION_TYPE,
AlarmDimension.TIME: {type: ALARM_TIME_DIMENSION_TYPE, "format": "%Y-%m-%d %H:%M:%S"},
}
}
)
if not dimension_filter_spec.check_spec_match(dimension_spec):
raise ValueError(
f"Alarm (id={alarm_id}: Dimension filter {dimension_filter_spec!r} is not compatible "
f"with the expected dimension spec {dimension_spec!r}."
)
dimension_filter_spec.set_spec(dimension_spec)
domain_spec = SignalDomainSpec(dimension_spec, dimension_filter_spec, None)
self._alarm_signal = Signal(signal_type, source_access_spec, domain_spec, alarm_id)
def signal(self) -> Optional[Signal]:
return self._alarm_signal
def _slots(self) -> Optional[List[Slot]]:
raise NotImplemented
@property
def alarm_id(self) -> str:
return self._alarm_id
# overrides
@property
def _id(self) -> str:
return self.alarm_id
class CompositeAlarmNode(Node, ABC):
class QueryVisitor(Node.QueryVisitor):
def __init__(self, alarm_id: str, *args, **kwargs) -> None:
super().__init__(None, *args, **kwargs)
self._alarm_id: str = alarm_id if alarm_id else kwargs.get("alarm_id", None)
self._exact_match: bool = kwargs.get("exact_match", False)
# overrides
def match(self, node: "Node") -> bool:
if isinstance(node, CompositeAlarmNode):
if self._exact_match:
return self._alarm_id == node.alarm_id
else:
return self._alarm_id in node.alarm_id
return False
def __init__(
self,
signal_type: SignalType,
alarm_id: str,
dimension_filter_spec: DimensionFilter,
source_access_spec: CompositeAlarmSignalSourceAccessSpec,
parent_node: Node = None,
child_nodes: List[Node] = None,
node_id: str = None,
) -> None:
super().__init__(parent_node, child_nodes, node_id)
self._alarm_id = alarm_id
self._source_access_spec = source_access_spec
dimension_spec: DimensionSpec = DimensionSpec.load_from_pretty(
{
AlarmDimension.STATE_TRANSITION: {
type: ALARM_STATE_TRANSITION_DIMENSION_TYPE,
AlarmDimension.TIME: {type: ALARM_TIME_DIMENSION_TYPE, "format": "%Y-%m-%d %H:%M:%S"},
}
}
)
if not dimension_filter_spec.check_spec_match(dimension_spec):
raise ValueError(
f"Composite alarm (id={alarm_id}: Dimension filter {dimension_filter_spec!r} is not "
f"compatible with the expected dimension spec {dimension_spec!r}."
)
dimension_filter_spec.set_spec(dimension_spec)
domain_spec = SignalDomainSpec(dimension_spec, dimension_filter_spec, None)
self._alarm_signal = Signal(signal_type, source_access_spec, domain_spec, alarm_id)
def signal(self) -> Optional[Signal]:
return self._alarm_signal
def _slots(self) -> Optional[List[Slot]]:
raise NotImplemented
@property
def alarm_id(self) -> str:
return self._alarm_id
# overrides
@property
def _id(self) -> str:
return self.alarm_id | 0.899842 | 0.336658 |
import os
import sys
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# different source folders
version_info = sys.version_info[:2]
source_folder = {
(2, 5): 'py25',
(2, 6): 'py25',
(2, 7): 'py27',
(3, 4): 'py34',
(3, 5): 'py34',
(3, 6): 'py34',
(3, 7): 'py34',
}.get(version_info, None)
if not source_folder:
raise EnvironmentError("unsupported version of Python")
if not os.path.exists(source_folder):
raise EnvironmentError("broken distirbution, looking for " +
repr(source_folder) + " in " +
os.getcwd()
)
# load in the project metadata
init_py = open(os.path.join(source_folder, 'bacpypes', '__init__.py')).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", init_py))
requirements = [
# no external requirements
]
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest',
'bacpypes',
]
setup(
name="bacpypes",
version=metadata['version'],
description="BACnet Communications Library",
long_description="BACpypes provides a BACnet application layer and network layer written in Python for daemons, scripting, and graphical interfaces.",
author=metadata['author'],
author_email=metadata['email'],
url="https://github.com/JoelBender/bacpypes",
packages=[
'bacpypes',
'bacpypes.local',
'bacpypes.service',
],
package_dir={
'': os.path.join(source_folder, ''),
},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
) | setup.py |
import os
import sys
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# different source folders
version_info = sys.version_info[:2]
source_folder = {
(2, 5): 'py25',
(2, 6): 'py25',
(2, 7): 'py27',
(3, 4): 'py34',
(3, 5): 'py34',
(3, 6): 'py34',
(3, 7): 'py34',
}.get(version_info, None)
if not source_folder:
raise EnvironmentError("unsupported version of Python")
if not os.path.exists(source_folder):
raise EnvironmentError("broken distirbution, looking for " +
repr(source_folder) + " in " +
os.getcwd()
)
# load in the project metadata
init_py = open(os.path.join(source_folder, 'bacpypes', '__init__.py')).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", init_py))
requirements = [
# no external requirements
]
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest',
'bacpypes',
]
setup(
name="bacpypes",
version=metadata['version'],
description="BACnet Communications Library",
long_description="BACpypes provides a BACnet application layer and network layer written in Python for daemons, scripting, and graphical interfaces.",
author=metadata['author'],
author_email=metadata['email'],
url="https://github.com/JoelBender/bacpypes",
packages=[
'bacpypes',
'bacpypes.local',
'bacpypes.service',
],
package_dir={
'': os.path.join(source_folder, ''),
},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
) | 0.328422 | 0.247817 |
import argparse
import os
import logging
import random
import numpy as np
import torch
from torchvision.datasets import MNIST
import torchvision.transforms as xforms
from torch.utils.data import DataLoader
import ttools
import ttools.interfaces
import pydiffvg
LOG = ttools.get_logger(__name__)
pydiffvg.render_pytorch.print_timing = False
torch.manual_seed(123)
np.random.seed(123)
torch.backends.cudnn.deterministic = True
latent_dim = 100
img_size = 32
num_paths = 8
num_segments = 8
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class VisdomImageCallback(ttools.callbacks.ImageDisplayCallback):
def visualized_image(self, batch, fwd_result):
return torch.cat([batch[0], fwd_result.cpu()], dim = 2)
# From https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/dcgan/dcgan.py
class Generator(torch.nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.fc = torch.nn.Sequential(
torch.nn.Linear(latent_dim, 128),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(128, 256),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(256, 512),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(512, 1024),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(1024, 2 * num_paths * (num_segments + 1) + num_paths + num_paths),
torch.nn.Sigmoid()
)
def forward(self, z):
out = self.fc(z)
# construct paths
imgs = []
for b in range(out.shape[0]):
index = 0
shapes = []
shape_groups = []
for i in range(num_paths):
points = img_size * out[b, index: index + 2 * (num_segments + 1)].view(-1, 2).cpu()
index += 2 * (num_segments + 1)
stroke_width = img_size * out[b, index].view(1).cpu()
index += 1
num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2
path = pydiffvg.Path(num_control_points = num_control_points,
points = points,
stroke_width = stroke_width,
is_closed = False)
shapes.append(path)
stroke_color = out[b, index].view(1).cpu()
index += 1
stroke_color = torch.cat([stroke_color, torch.tensor([0.0, 0.0, 1.0])])
path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes) - 1]),
fill_color = None,
stroke_color = stroke_color)
shape_groups.append(path_group)
scene_args = pydiffvg.RenderFunction.serialize_scene(img_size, img_size, shapes, shape_groups)
render = pydiffvg.RenderFunction.apply
img = render(img_size, # width
img_size, # height
2, # num_samples_x
2, # num_samples_y
random.randint(0, 1048576), # seed
None,
*scene_args)
img = img[:, :, :1]
# HWC -> NCHW
img = img.unsqueeze(0)
img = img.permute(0, 3, 1, 2) # NHWC -> NCHW
imgs.append(img)
img = torch.cat(imgs, dim = 0)
return img
class Discriminator(torch.nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [torch.nn.Conv2d(in_filters, out_filters, 3, 2, 1),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Dropout2d(0.25)]
if bn:
block.append(torch.nn.BatchNorm2d(out_filters, 0.8))
return block
self.model = torch.nn.Sequential(
*discriminator_block(1, 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
ds_size = img_size // 2 ** 4
self.adv_layer = torch.nn.Sequential(
torch.nn.Linear(128 * ds_size ** 2, 1),
torch.nn.Sigmoid())
def forward(self, img):
out = self.model(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
return validity
class MNISTInterface(ttools.interfaces.SGANInterface):
"""An adapter to run or train a model."""
def __init__(self, gen, discrim, lr=2e-4):
super(MNISTInterface, self).__init__(gen, discrim, lr, opt = 'adam')
def forward(self, batch):
return self.gen(torch.zeros([batch[0].shape[0], latent_dim], device = self.device).normal_())
def _discriminator_input(self, batch, fwd_data, fake=False):
if fake:
return fwd_data
else:
return batch[0].to(self.device)
def train(args):
"""Train a MNIST classifier."""
# Setup train and val data
_xform = xforms.Compose([xforms.Resize([32, 32]), xforms.ToTensor()])
data = MNIST("data/mnist", train=True, download=True, transform=_xform)
# Initialize asynchronous dataloaders
loader = DataLoader(data, batch_size=args.bs, num_workers=2)
# Instantiate the models
gen = Generator()
discrim = Discriminator()
gen.apply(weights_init_normal)
discrim.apply(weights_init_normal)
# Checkpointer to save/recall model parameters
checkpointer_gen = ttools.Checkpointer(os.path.join(args.out, "checkpoints"), model=gen, prefix="gen_")
checkpointer_discrim = ttools.Checkpointer(os.path.join(args.out, "checkpoints"), model=discrim, prefix="discrim_")
# resume from a previous checkpoint, if any
checkpointer_gen.load_latest()
checkpointer_discrim.load_latest()
# Setup a training interface for the model
interface = MNISTInterface(gen, discrim, lr=args.lr)
# Create a training looper with the interface we defined
trainer = ttools.Trainer(interface)
# Adds several callbacks, that will be called by the trainer --------------
# A periodic checkpointing operation
trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer_gen))
trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer_discrim))
# A simple progress bar
trainer.add_callback(ttools.callbacks.ProgressBarCallback(
keys=["loss_g", "loss_d", "loss"]))
# A volatile logging using visdom
trainer.add_callback(ttools.callbacks.VisdomLoggingCallback(
keys=["loss_g", "loss_d", "loss"],
port=8080, env="mnist_demo"))
# Image
trainer.add_callback(VisdomImageCallback(port=8080, env="mnist_demo"))
# -------------------------------------------------------------------------
# Start the training
LOG.info("Training started, press Ctrl-C to interrupt.")
trainer.train(loader, num_epochs=args.epochs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# TODO: subparsers
parser.add_argument("data", help="directory where we download and store the MNIST dataset.")
parser.add_argument("out", help="directory where we write the checkpoints and visualizations.")
parser.add_argument("--lr", type=float, default=1e-4, help="learning rate for the optimizer.")
parser.add_argument("--epochs", type=int, default=500, help="number of epochs to train for.")
parser.add_argument("--bs", type=int, default=64, help="number of elements per batch.")
args = parser.parse_args()
ttools.set_logger(True) # activate debug prints
train(args) | apps/sketch_gan.py | import argparse
import os
import logging
import random
import numpy as np
import torch
from torchvision.datasets import MNIST
import torchvision.transforms as xforms
from torch.utils.data import DataLoader
import ttools
import ttools.interfaces
import pydiffvg
LOG = ttools.get_logger(__name__)
pydiffvg.render_pytorch.print_timing = False
torch.manual_seed(123)
np.random.seed(123)
torch.backends.cudnn.deterministic = True
latent_dim = 100
img_size = 32
num_paths = 8
num_segments = 8
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class VisdomImageCallback(ttools.callbacks.ImageDisplayCallback):
def visualized_image(self, batch, fwd_result):
return torch.cat([batch[0], fwd_result.cpu()], dim = 2)
# From https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/dcgan/dcgan.py
class Generator(torch.nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.fc = torch.nn.Sequential(
torch.nn.Linear(latent_dim, 128),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(128, 256),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(256, 512),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(512, 1024),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(1024, 2 * num_paths * (num_segments + 1) + num_paths + num_paths),
torch.nn.Sigmoid()
)
def forward(self, z):
out = self.fc(z)
# construct paths
imgs = []
for b in range(out.shape[0]):
index = 0
shapes = []
shape_groups = []
for i in range(num_paths):
points = img_size * out[b, index: index + 2 * (num_segments + 1)].view(-1, 2).cpu()
index += 2 * (num_segments + 1)
stroke_width = img_size * out[b, index].view(1).cpu()
index += 1
num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2
path = pydiffvg.Path(num_control_points = num_control_points,
points = points,
stroke_width = stroke_width,
is_closed = False)
shapes.append(path)
stroke_color = out[b, index].view(1).cpu()
index += 1
stroke_color = torch.cat([stroke_color, torch.tensor([0.0, 0.0, 1.0])])
path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes) - 1]),
fill_color = None,
stroke_color = stroke_color)
shape_groups.append(path_group)
scene_args = pydiffvg.RenderFunction.serialize_scene(img_size, img_size, shapes, shape_groups)
render = pydiffvg.RenderFunction.apply
img = render(img_size, # width
img_size, # height
2, # num_samples_x
2, # num_samples_y
random.randint(0, 1048576), # seed
None,
*scene_args)
img = img[:, :, :1]
# HWC -> NCHW
img = img.unsqueeze(0)
img = img.permute(0, 3, 1, 2) # NHWC -> NCHW
imgs.append(img)
img = torch.cat(imgs, dim = 0)
return img
class Discriminator(torch.nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [torch.nn.Conv2d(in_filters, out_filters, 3, 2, 1),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Dropout2d(0.25)]
if bn:
block.append(torch.nn.BatchNorm2d(out_filters, 0.8))
return block
self.model = torch.nn.Sequential(
*discriminator_block(1, 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
ds_size = img_size // 2 ** 4
self.adv_layer = torch.nn.Sequential(
torch.nn.Linear(128 * ds_size ** 2, 1),
torch.nn.Sigmoid())
def forward(self, img):
out = self.model(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
return validity
class MNISTInterface(ttools.interfaces.SGANInterface):
"""An adapter to run or train a model."""
def __init__(self, gen, discrim, lr=2e-4):
super(MNISTInterface, self).__init__(gen, discrim, lr, opt = 'adam')
def forward(self, batch):
return self.gen(torch.zeros([batch[0].shape[0], latent_dim], device = self.device).normal_())
def _discriminator_input(self, batch, fwd_data, fake=False):
if fake:
return fwd_data
else:
return batch[0].to(self.device)
def train(args):
"""Train a MNIST classifier."""
# Setup train and val data
_xform = xforms.Compose([xforms.Resize([32, 32]), xforms.ToTensor()])
data = MNIST("data/mnist", train=True, download=True, transform=_xform)
# Initialize asynchronous dataloaders
loader = DataLoader(data, batch_size=args.bs, num_workers=2)
# Instantiate the models
gen = Generator()
discrim = Discriminator()
gen.apply(weights_init_normal)
discrim.apply(weights_init_normal)
# Checkpointer to save/recall model parameters
checkpointer_gen = ttools.Checkpointer(os.path.join(args.out, "checkpoints"), model=gen, prefix="gen_")
checkpointer_discrim = ttools.Checkpointer(os.path.join(args.out, "checkpoints"), model=discrim, prefix="discrim_")
# resume from a previous checkpoint, if any
checkpointer_gen.load_latest()
checkpointer_discrim.load_latest()
# Setup a training interface for the model
interface = MNISTInterface(gen, discrim, lr=args.lr)
# Create a training looper with the interface we defined
trainer = ttools.Trainer(interface)
# Adds several callbacks, that will be called by the trainer --------------
# A periodic checkpointing operation
trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer_gen))
trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer_discrim))
# A simple progress bar
trainer.add_callback(ttools.callbacks.ProgressBarCallback(
keys=["loss_g", "loss_d", "loss"]))
# A volatile logging using visdom
trainer.add_callback(ttools.callbacks.VisdomLoggingCallback(
keys=["loss_g", "loss_d", "loss"],
port=8080, env="mnist_demo"))
# Image
trainer.add_callback(VisdomImageCallback(port=8080, env="mnist_demo"))
# -------------------------------------------------------------------------
# Start the training
LOG.info("Training started, press Ctrl-C to interrupt.")
trainer.train(loader, num_epochs=args.epochs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# TODO: subparsers
parser.add_argument("data", help="directory where we download and store the MNIST dataset.")
parser.add_argument("out", help="directory where we write the checkpoints and visualizations.")
parser.add_argument("--lr", type=float, default=1e-4, help="learning rate for the optimizer.")
parser.add_argument("--epochs", type=int, default=500, help="number of epochs to train for.")
parser.add_argument("--bs", type=int, default=64, help="number of elements per batch.")
args = parser.parse_args()
ttools.set_logger(True) # activate debug prints
train(args) | 0.842701 | 0.442215 |
import opensearchpy
import curator
import os
import json
import string, random, tempfile
import time
from click import testing as clicktest
from mock import patch, Mock
import unittest
from . import CuratorTestCase
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
port = int(port) if port else 9200
global_client = elasticsearch.Elasticsearch(host=host, port=port)
EMPTY710ROUTING = {'allocation': {'include': {'_tier_preference': 'data_content'}}}
delete_count_pattern = ('---\n'
'actions:\n'
' 1:\n'
' description: "Delete indices as filtered"\n'
' action: delete_indices\n'
' options:\n'
' continue_if_exception: False\n'
' disable_action: False\n'
' filters:\n'
' - filtertype: count\n'
' pattern: {0}\n'
' use_age: {1}\n'
' source: {2}\n'
' timestring: {3}\n'
' reverse: {4}\n'
' count: {5}\n')
class TestCLICountPattern(CuratorTestCase):
def test_match_proper_indices(self):
for i in range(1, 4):
self.create_index('a-{0}'.format(i))
for i in range(4, 7):
self.create_index('b-{0}'.format(i))
for i in range(5, 9):
self.create_index('c-{0}'.format(i))
self.create_index('not_a_match')
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'],
delete_count_pattern.format(
'\'^(a|b|c)-\d$\'', 'false', 'name', '\'%Y.%m.%d\'', 'true', 1
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
indices = sorted(list(self.client.indices.get('_all')))
self.assertEquals(['a-3', 'b-6', 'c-8', 'not_a_match'], indices)
def test_match_proper_indices_by_age(self):
self.create_index('a-2017.10.01')
self.create_index('a-2017.10.02')
self.create_index('a-2017.10.03')
self.create_index('b-2017.09.01')
self.create_index('b-2017.09.02')
self.create_index('b-2017.09.03')
self.create_index('not_a_match')
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'],
delete_count_pattern.format(
'\'^(a|b)-\d{4}\.\d{2}\.\d{2}$\'', 'true', 'name', '\'%Y.%m.%d\'', 'true', 1
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
indices = sorted(list(self.client.indices.get('_all')))
self.assertEquals(['a-2017.10.03', 'b-2017.09.03', 'not_a_match'], indices)
def test_count_indices_by_age_same_age(self):
key = 'tag'
value = 'value'
at = 'include'
ver = curator.get_version(self.client)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.allocation_count_test.format(key, value, at, False))
self.create_index('c-2017.10.01')
self.create_index('c-2017.10.02')
self.create_index('c-2017.10.03')
self.create_index('a-2017.10.01')
self.create_index('a-2017.10.02')
self.create_index('a-2017.10.03')
self.create_index('b-2017.10.01')
self.create_index('b-2017.10.02')
self.create_index('b-2017.10.03')
self.create_index('d-2017.10.01')
self.create_index('d-2017.10.02')
self.create_index('d-2017.10.03')
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
self.assertEquals(value,
self.client.indices.get_settings(index='c-2017.10.03')['c-2017.10.03']['settings']['index']['routing']['allocation'][at][key])
self.assertEquals(value,
self.client.indices.get_settings(index='d-2017.10.03')['d-2017.10.03']['settings']['index']['routing']['allocation'][at][key])
idxlist = [
'a-2017.10.01', 'a-2017.10.02', 'a-2017.10.03',
'b-2017.10.01', 'b-2017.10.02', 'b-2017.10.03',
'c-2017.10.01', 'c-2017.10.02',
'd-2017.10.01', 'd-2017.10.02'
]
for idx in idxlist:
if ver >= (7, 10, 0):
self.assertEquals(
EMPTY710ROUTING,
self.client.indices.get_settings(index=idx)[idx]['settings']['index']['routing']
)
else:
self.assertNotIn('routing',
self.client.indices.get_settings(
index=idx)[idx]['settings']['index']
) | test/integration/test_count_pattern.py | import opensearchpy
import curator
import os
import json
import string, random, tempfile
import time
from click import testing as clicktest
from mock import patch, Mock
import unittest
from . import CuratorTestCase
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
port = int(port) if port else 9200
global_client = elasticsearch.Elasticsearch(host=host, port=port)
EMPTY710ROUTING = {'allocation': {'include': {'_tier_preference': 'data_content'}}}
delete_count_pattern = ('---\n'
'actions:\n'
' 1:\n'
' description: "Delete indices as filtered"\n'
' action: delete_indices\n'
' options:\n'
' continue_if_exception: False\n'
' disable_action: False\n'
' filters:\n'
' - filtertype: count\n'
' pattern: {0}\n'
' use_age: {1}\n'
' source: {2}\n'
' timestring: {3}\n'
' reverse: {4}\n'
' count: {5}\n')
class TestCLICountPattern(CuratorTestCase):
def test_match_proper_indices(self):
for i in range(1, 4):
self.create_index('a-{0}'.format(i))
for i in range(4, 7):
self.create_index('b-{0}'.format(i))
for i in range(5, 9):
self.create_index('c-{0}'.format(i))
self.create_index('not_a_match')
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'],
delete_count_pattern.format(
'\'^(a|b|c)-\d$\'', 'false', 'name', '\'%Y.%m.%d\'', 'true', 1
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
indices = sorted(list(self.client.indices.get('_all')))
self.assertEquals(['a-3', 'b-6', 'c-8', 'not_a_match'], indices)
def test_match_proper_indices_by_age(self):
self.create_index('a-2017.10.01')
self.create_index('a-2017.10.02')
self.create_index('a-2017.10.03')
self.create_index('b-2017.09.01')
self.create_index('b-2017.09.02')
self.create_index('b-2017.09.03')
self.create_index('not_a_match')
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'],
delete_count_pattern.format(
'\'^(a|b)-\d{4}\.\d{2}\.\d{2}$\'', 'true', 'name', '\'%Y.%m.%d\'', 'true', 1
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
indices = sorted(list(self.client.indices.get('_all')))
self.assertEquals(['a-2017.10.03', 'b-2017.09.03', 'not_a_match'], indices)
def test_count_indices_by_age_same_age(self):
key = 'tag'
value = 'value'
at = 'include'
ver = curator.get_version(self.client)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.allocation_count_test.format(key, value, at, False))
self.create_index('c-2017.10.01')
self.create_index('c-2017.10.02')
self.create_index('c-2017.10.03')
self.create_index('a-2017.10.01')
self.create_index('a-2017.10.02')
self.create_index('a-2017.10.03')
self.create_index('b-2017.10.01')
self.create_index('b-2017.10.02')
self.create_index('b-2017.10.03')
self.create_index('d-2017.10.01')
self.create_index('d-2017.10.02')
self.create_index('d-2017.10.03')
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
self.assertEquals(value,
self.client.indices.get_settings(index='c-2017.10.03')['c-2017.10.03']['settings']['index']['routing']['allocation'][at][key])
self.assertEquals(value,
self.client.indices.get_settings(index='d-2017.10.03')['d-2017.10.03']['settings']['index']['routing']['allocation'][at][key])
idxlist = [
'a-2017.10.01', 'a-2017.10.02', 'a-2017.10.03',
'b-2017.10.01', 'b-2017.10.02', 'b-2017.10.03',
'c-2017.10.01', 'c-2017.10.02',
'd-2017.10.01', 'd-2017.10.02'
]
for idx in idxlist:
if ver >= (7, 10, 0):
self.assertEquals(
EMPTY710ROUTING,
self.client.indices.get_settings(index=idx)[idx]['settings']['index']['routing']
)
else:
self.assertNotIn('routing',
self.client.indices.get_settings(
index=idx)[idx]['settings']['index']
) | 0.365004 | 0.089654 |
from unittest import TestCase
from mkdocs.structure.nav import Section
from ...meta import Meta
from ...navigation import AwesomeNavigation
class TestSetTitle(TestCase):
def setUp(self):
self.section = Section('Section', [])
def test(self):
AwesomeNavigation._set_title(self.section, Meta(title='Section Title'))
self.assertEqual(self.section.title, 'Section Title')
def test_none_value(self):
AwesomeNavigation._set_title(self.section, Meta())
self.assertEqual(self.section.title, 'Section')
def test_empty_string_value(self):
AwesomeNavigation._set_title(self.section, Meta(title=''))
self.assertEqual(self.section.title, '')
class TestCollapse(TestCase):
def setUp(self):
self.child = Section('Child', [])
self.parent = Section('Parent', [
self.child
])
def test_default(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=None, collapse_recursive=False),
self.parent
)
def test_local_false(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=False, collapse_recursive=False),
self.parent
)
def test_explicit(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=True, collapse_recursive=False),
self.child
)
def test_explicit_and_recursive(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=True, collapse_recursive=True),
self.child
)
def test_recursive(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=None, collapse_recursive=True),
self.child
)
def test_local_override_false(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=False, collapse_recursive=True),
self.parent
)
def test_multiple_children(self):
section = Section('Parent', [
Section('Child 1', []),
Section('Child 2', [])
])
self.assertEqual(AwesomeNavigation._collapse(section, None, False), section)
self.assertEqual(AwesomeNavigation._collapse(section, None, True), section)
self.assertEqual(AwesomeNavigation._collapse(section, False, False), section)
self.assertEqual(AwesomeNavigation._collapse(section, True, False), section)
self.assertEqual(AwesomeNavigation._collapse(section, True, True), section)
self.assertEqual(AwesomeNavigation._collapse(section, False, True), section) | mkdocs_awesome_pages_plugin/tests/navigation/test_static.py | from unittest import TestCase
from mkdocs.structure.nav import Section
from ...meta import Meta
from ...navigation import AwesomeNavigation
class TestSetTitle(TestCase):
def setUp(self):
self.section = Section('Section', [])
def test(self):
AwesomeNavigation._set_title(self.section, Meta(title='Section Title'))
self.assertEqual(self.section.title, 'Section Title')
def test_none_value(self):
AwesomeNavigation._set_title(self.section, Meta())
self.assertEqual(self.section.title, 'Section')
def test_empty_string_value(self):
AwesomeNavigation._set_title(self.section, Meta(title=''))
self.assertEqual(self.section.title, '')
class TestCollapse(TestCase):
def setUp(self):
self.child = Section('Child', [])
self.parent = Section('Parent', [
self.child
])
def test_default(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=None, collapse_recursive=False),
self.parent
)
def test_local_false(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=False, collapse_recursive=False),
self.parent
)
def test_explicit(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=True, collapse_recursive=False),
self.child
)
def test_explicit_and_recursive(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=True, collapse_recursive=True),
self.child
)
def test_recursive(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=None, collapse_recursive=True),
self.child
)
def test_local_override_false(self):
self.assertEqual(
AwesomeNavigation._collapse(self.parent, collapse=False, collapse_recursive=True),
self.parent
)
def test_multiple_children(self):
section = Section('Parent', [
Section('Child 1', []),
Section('Child 2', [])
])
self.assertEqual(AwesomeNavigation._collapse(section, None, False), section)
self.assertEqual(AwesomeNavigation._collapse(section, None, True), section)
self.assertEqual(AwesomeNavigation._collapse(section, False, False), section)
self.assertEqual(AwesomeNavigation._collapse(section, True, False), section)
self.assertEqual(AwesomeNavigation._collapse(section, True, True), section)
self.assertEqual(AwesomeNavigation._collapse(section, False, True), section) | 0.563618 | 0.355523 |
import csv
import io
import typing
import audeer
import audfactory.core.api as audfactory
# Skip doctests until we have public lookup tables
__doctest_skip__ = ['*']
LOOKUP_EXT = 'csv'
class Lookup:
r"""Lookup table for managing artifact flavors on Artifactory.
It creates one row for every flavor,
and assigns a unique ID to it.
The columns are parameters associated with the flavor.
The parameter names are stored as column headers.
The column values can be of type
:class:`bool`,
:class:`float`,
:class:`int`,
:class:`NoneType`,
:class:`str`.
You cannot use strings
that would be converted to any of the other types like
``'None'``,
``'True'``,
``'False'``,
``'4.0'``,
and ``'4'``.
The following code converts an :class:`audfactory.Lookup` object
into a :class:`pandas.DataFrame`:
.. code-block:: python
index, data = [], []
if len(lookup.table) > 1:
index = [entry[0] for entry in lookup.table[1:]]
data = [entry[1:] for entry in lookup.table[1:]]
df = pd.DataFrame(data=data, index=index, columns=lookup.columns)
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
name: name of lookup table
version: version of lookup table
Raises:
RuntimeError: if no lookup tables or no lookup
table with the specified version can be found
Example:
>>> lookup = Lookup(
... 'https://artifactory.audeering.com/artifactory',
... 'models-public-local',
... 'com.audeering.models.gender.voxcnn',
... version='0.2.0',
... )
>>> lookup
id purpose sampling_rate train-db
3bb24968-759a-11ea-ab25-309c2364e602 prod 16000 voxceleb1
>>> lookup.table
[['id', 'purpose', 'sampling_rate', 'train-db'],
['3bb24968-759a-11ea-ab25-309c2364e602', 'prod', 16000, 'voxceleb1']]
>>> lookup['3bb24968-759a-11ea-ab25-309c2364e602']
{'purpose': 'prod', 'sampling_rate': 16000, 'train-db': 'voxceleb1'}
"""
def __init__(
self,
server,
repository: str,
group_id: str,
*,
name: str = 'lookup',
version: str = None,
):
self.server = server
"""server URL"""
self.group_id = group_id
"""group ID of lookup table"""
self.name = name
"""name of lookup table"""
self.repository = repository
"""repository of lookup table"""
if version is None:
version = Lookup.latest_version(
server,
repository,
group_id,
name=name,
)
if version is None:
url = audfactory.url(
server,
repository=repository,
group_id=group_id,
name=name,
)
raise RuntimeError(
f"No lookup tables available under '{url}'"
)
elif not Lookup.exists(
server, repository, group_id, version, name=name
):
url = audfactory.url(
server,
repository=repository,
group_id=group_id,
name=name,
version=version,
)
raise RuntimeError(
f"Lookup table '{url}/"
f"{name}-{version}.{LOOKUP_EXT}' does not exist yet."
)
self.version = version
"""version of lookup table"""
self.url = _url_table(server, repository, group_id, name, version)
"""Artifactory URL of lookup table"""
def __getitem__(self, uid: str) -> typing.Dict:
r"""Get lookup table entry by ID.
Args:
uid: ID of lookup table entry
Returns:
lookup table entry
"""
table = self.table
columns = _columns(table)
item = {}
for row in table[1:]:
if row[0] == uid:
item = {c: p for c, p in zip(columns, row[1:])}
break
return item
def __repr__(self):
r"""String representation of lokkup table."""
table = self.table
padding = 2
# Longest string in each column
transposed_table = [list(x) for x in zip(*table)]
col_width = [
len(max([str(word) for word in row], key=len)) + padding
for row in transposed_table
]
# Don't pad the last column
col_width[-1] -= padding
row = [
''.join(
str(word).ljust(width) for word, width in zip(row, col_width)
)
for row in table
]
return '\n'.join(row)
@property
def columns(self) -> typing.List:
r"""Lookup table column names."""
table = _download(self.url)
return _columns(table)
@property
def ids(self) -> typing.List:
r"""Lookup table ids."""
table = _download(self.url)
return _ids(table)
@property
def table(self) -> typing.List[typing.List]:
r"""Lookup table."""
return _download(self.url)
def append(self, params: typing.Dict[str, typing.Any]) -> str:
r"""Append entry to lookup table.
The lookup table entry gets a unique ID
from ``params``,
:attr:`self.name`,
:attr:`self.group_id`,
:attr:`self.version`,
and :attr:`self.repository`.
Args:
params: lookup table entry in the form of ``{column: parameter}``
Returns:
ID of added lookup table entry
Raises:
RuntimeError: if entry for given ``params`` exists already,
or the columns ``params`` do not match the columns
of the lookup
ValueError: if ``params`` contain unsupported data types
"""
table = self.table
columns = _columns(table)
_check_params_type(params)
params = dict(sorted(params.items()))
if self.contains(params):
raise RuntimeError(f"Entry for '{params}' already exists.")
if list(params.keys()) != columns:
raise RuntimeError(
f"Table columns '{columns}' do not match parameters '{params}'"
)
# Add an UID to the new row and append it to the table
uid = self.generate_uid(
params=str(params),
group_id=self.group_id,
name=self.name,
version=self.version,
repository=self.repository,
)
new_row = [uid] + list(params.values())
table.append(new_row)
_upload(table, self.url)
return uid
def clear(self) -> None:
r"""Clear lookup table."""
table = self.table
table = [table[0]] # empty table with header
_upload(table, self.url)
def contains(self, params: typing.Dict[str, typing.Any]) -> bool:
r"""Check if lookup table contains entry.
Args:
params: lookup table entry in the form of ``{column: parameter}``
Returns:
``True`` if lookup table contains entry
"""
try:
self.find(params)
except RuntimeError:
return False
return True
def extend(
self,
params: typing.Union[
str,
typing.Sequence[str],
typing.Dict[str, typing.Any],
],
) -> typing.List[typing.List]:
r"""Extend columns of lookup table.
If no parameter values are given for the new columns,
they are set to ``None``.
Args:
params: lookup table entry in the form of ``{column: parameter}``
or ``[column]`` or ``column``
Returns:
lookup table
Raises:
ValueError: if ``params`` contain unsupported data types
"""
if isinstance(params, str):
params = [params]
if isinstance(params, (tuple, list)):
params = {param: None for param in params}
_check_params_type(params)
table = self.table
columns = _columns(table)
for param, value in params.items():
if param not in columns:
# Append param key to columns
table[0] += [param]
# FIXME: the following code seems ugly to me
if len(table) == 1 and value is not None:
# Start from empty table, by first updating the columns
_upload(table, self.url)
original_params = {p: None for p in columns}
self.append({**original_params, **{param: value}})
table = self.table
else:
for n in range(len(table[1:])):
# Append param value to every row
table[n + 1] += [value]
table = _sort(table)
_upload(table, self.url)
return table
def find(self, params: typing.Dict[str, typing.Any]) -> str:
r"""Find entry in lookup table.
Args:
params: lookup table entry in the form of ``{column: parameter}``
Returns:
ID of lookup table entry
Raises:
RuntimeError: if lookup table entry cannot be found
"""
table = self.table
params = dict(sorted(params.items()))
for row in table[1:]:
uid = row[0]
entries = row[1:]
if entries == list(params.values()):
return uid
raise RuntimeError(
f"Could not find requested entry '{params}' "
f"in version {self.version}:\n\n{table}"
)
def remove(self, params: typing.Dict[str, typing.Any]) -> str:
r"""Remove entry from lookup table.
Args:
params: lookup table entry in the form of ``{column: parameter}``
Returns:
ID of removed entry
"""
table = self.table
uid = self.find(params)
for n in range(len(table)):
if table[n][0] == uid:
table.pop(n)
break
_upload(table, self.url)
return uid
@staticmethod
def create(
server: str,
repository: str,
group_id: str,
version: str,
params: typing.Sequence[str] = (),
*,
name: str = 'lookup',
force: bool = False
) -> str:
r"""Create lookup table on server.
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
version: version of lookup table
params: lookup table column names
name: name of lookup table
force: if ``True`` an existing lookup table is overwritten
Returns:
URL of lookup table
Raises:
RuntimeError: if lookup table exists already
and ``force=False``
"""
ex = Lookup.exists(server, repository, group_id, version, name=name)
url = _url_table(server, repository, group_id, name, version)
if force or not ex:
table = [['id'] + sorted(params)]
_upload(table, url)
else:
raise RuntimeError(
f"Lookup table '{name}-{version}' exists already."
)
return url
@staticmethod
def delete(
server: str,
repository: str,
group_id: str,
version: str,
*,
name: str = 'lookup',
force: bool = True,
) -> None:
r"""Delete lookup table on server.
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
version: version of lookup table
name: name of lookup table
force: if ``True`` removes lookup table even if not empty
Raises:
RuntimeError: if lookup table is not empty
and ``force=False``
"""
lookup = Lookup(
server,
repository,
group_id,
name=name,
version=version,
)
if len(lookup.table) > 1:
if not force:
raise RuntimeError(
f"Cannot remove lookup table '{name}-{version}' "
f"if it is not empty.")
lookup.clear()
audfactory.path(lookup.url).parent.rmdir()
@staticmethod
def exists(
server: str,
repository: str,
group_id: str,
version: str,
*,
name: str = 'lookup',
) -> bool:
r"""Check if lookup table exists on server.
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
version: version of lookup table
name: name of lookup table
Returns:
``True`` if lookup table exists
Example:
>>> Lookup.exists(
... 'https://artifactory.audeering.com/artifactory',
... 'models-public-local',
... 'com.audeering.models.gender.voxcnn',
... '0.1.0',
... )
True
"""
versions = audfactory.versions(server, repository, group_id, name)
return version in versions
@staticmethod
def latest_version(
server: str,
repository: str,
group_id: str,
*,
params: typing.Dict[str, typing.Any] = None,
name: str = 'lookup',
) -> typing.Optional[str]:
r"""Latest version of lookup table on server.
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
params: lookup table entry in the form of ``{column: parameter}``
name: name of lookup table
Returns:
latest version of lookup table
Example:
>>> Lookup.latest_version(
... 'https://artifactory.audeering.com/artifactory',
... 'models-public-local',
... 'com.audeering.models.gender.voxcnn',
... )
'0.2.0'
"""
v = Lookup.versions(server, repository, group_id, params, name=name)
if len(v) > 0:
return v[-1]
else:
return None
@staticmethod
def generate_uid(
*,
params: typing.Dict[str, typing.Any],
name: str,
group_id: str,
version: str,
repository: str,
) -> str:
r"""Generate unique ID.
It converts ``params`` to a string,
and concatenates it with ``name``, ``group_id``, ``version``,
and ``repository``.
From that concatenated string a unique ID is derived.
Args:
params: params in the form of ``{column: parameter}``
group_id: group ID of lookup table
name: name of lookup table
version: version of lookup table
repository: repository of lookup table
Returns:
unique identifier of length 36
Example:
>>> Lookup.generate_uid(
... params={0: None},
... name='name',
... group_id='group.id',
... version='1.0.0',
... repository='models-public-local',
... )
'afe694a5-8bad-4dbc-73ba-636f18340615'
"""
unique_string = (
str(params)
+ group_id
+ name
+ version
+ repository
)
uid = audeer.uid(from_string=unique_string)
return uid
@staticmethod
def versions(
server: str,
repository: str,
group_id: str,
params: typing.Dict[str, typing.Any] = None,
*,
name: str = 'lookup',
) -> list:
r"""Available versions of lookup table on server.
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
params: lookup table entry in the form of ``{column: parameter}``
name: name of lookup table
Returns:
available versions of lookup table
Example:
>>> Lookup.versions(
... 'https://artifactory.audeering.com/artifactory',
... 'models-public-local',
... 'com.audeering.models.gender.voxcnn',
... )
['0.1.0', '0.2.0']
"""
versions = audfactory.versions(server, repository, group_id, name)
if params is not None:
filtered_versions = []
for version in versions:
lookup = Lookup(
server,
repository,
group_id,
name=name,
version=version,
)
if lookup.contains(params):
filtered_versions.append(version)
versions = filtered_versions
return versions
def _check_params_type(params):
r"""Raise error if params includes wrong data types."""
for value in params.values():
if not isinstance(value, (bool, float, int, type(None), str)):
raise ValueError(
"'params' can only contain values of type: "
"bool, float, int, NoneType, str. "
f"Yours includes {type(value)}"
)
if isinstance(value, str):
# Forbid strings that are converted to other types
try:
int(value)
except ValueError:
pass
else:
raise ValueError(
f"'{value}' is forbidden, use the int {value} instead"
)
try:
float(value)
except ValueError:
pass
else:
raise ValueError(
f"'{value}' is forbidden, use the float {value} instead"
)
if value in ['True', 'False']:
raise ValueError(
f"'{value}' is forbidden, use the bool {value} instead"
)
if value == 'None':
raise ValueError(
f"'{value}' is forbidden, use the NoneType {value} instead"
)
def _columns(table: typing.List[typing.List]) -> typing.List:
return table[0][1:]
def _ids(table: typing.List[typing.List]) -> typing.List:
return [row[0] for row in table[1:]]
def _import_csv(s):
r"""Convert strings to int, float, and None.
The build in CSV reader returns only strings.
"""
if s == '':
return None
if s == 'True':
return True
if s == 'False':
return False
try:
s = int(s)
except ValueError:
try:
s = float(s)
except ValueError:
pass
return s
def _download(url: str) -> typing.List[typing.List]:
r = audfactory.rest_api_get(url)
code = r.status_code
if code in [403, 404]: # pragma: no cover
raise RuntimeError(
f"{code}, URL not found or no access rights: '{url}'"
)
elif code != 200: # pragma: no cover
raise RuntimeError(
f"{code}, problem downloading '{url}'.\n{audfactory.REPORT_ISSUE}"
)
r.encoding = 'utf-8'
table = []
csvreader = csv.reader(r.text.splitlines(), delimiter=',')
for row in csvreader:
# Convert '' to None
row = [_import_csv(r) for r in row]
table.append(row)
return table
def _sort(table: typing.List[typing.List]) -> typing.List[typing.List]:
# Get index to sort each row, excluding 'id'
idx = sorted(range(len(table[0][1:])), key=lambda k: table[0][k + 1])
idx = [0] + [i + 1 for i in idx]
for n in range(len(table)):
table[n] = [table[n][i] for i in idx]
return table
def _upload(
table: typing.List[typing.List],
url: str,
) -> None:
r"""Upload table to a CSV file on Artifactory without using a tmp file."""
fobj = io.StringIO()
writer = csv.writer(fobj, delimiter=',')
writer.writerows(table)
# Seek to beginning of file, otherwise an empty CSV file wil be written
fobj.seek(0)
artifactory_path = audfactory.path(url)
if not artifactory_path.parent.exists():
artifactory_path.parent.mkdir()
artifactory_path.deploy(fobj)
return url
def _url_table(
server: str,
repository: str,
group_id: str,
name: str,
version: str,
) -> str:
url = audfactory.url(
server,
repository=repository,
group_id=group_id,
name=name,
version=version,
)
return f'{url}/{name}-{version}.{LOOKUP_EXT}' | audfactory/core/lookup.py | import csv
import io
import typing
import audeer
import audfactory.core.api as audfactory
# Skip doctests until we have public lookup tables
__doctest_skip__ = ['*']
LOOKUP_EXT = 'csv'
class Lookup:
r"""Lookup table for managing artifact flavors on Artifactory.
It creates one row for every flavor,
and assigns a unique ID to it.
The columns are parameters associated with the flavor.
The parameter names are stored as column headers.
The column values can be of type
:class:`bool`,
:class:`float`,
:class:`int`,
:class:`NoneType`,
:class:`str`.
You cannot use strings
that would be converted to any of the other types like
``'None'``,
``'True'``,
``'False'``,
``'4.0'``,
and ``'4'``.
The following code converts an :class:`audfactory.Lookup` object
into a :class:`pandas.DataFrame`:
.. code-block:: python
index, data = [], []
if len(lookup.table) > 1:
index = [entry[0] for entry in lookup.table[1:]]
data = [entry[1:] for entry in lookup.table[1:]]
df = pd.DataFrame(data=data, index=index, columns=lookup.columns)
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
name: name of lookup table
version: version of lookup table
Raises:
RuntimeError: if no lookup tables or no lookup
table with the specified version can be found
Example:
>>> lookup = Lookup(
... 'https://artifactory.audeering.com/artifactory',
... 'models-public-local',
... 'com.audeering.models.gender.voxcnn',
... version='0.2.0',
... )
>>> lookup
id purpose sampling_rate train-db
3bb24968-759a-11ea-ab25-309c2364e602 prod 16000 voxceleb1
>>> lookup.table
[['id', 'purpose', 'sampling_rate', 'train-db'],
['3bb24968-759a-11ea-ab25-309c2364e602', 'prod', 16000, 'voxceleb1']]
>>> lookup['3bb24968-759a-11ea-ab25-309c2364e602']
{'purpose': 'prod', 'sampling_rate': 16000, 'train-db': 'voxceleb1'}
"""
def __init__(
self,
server,
repository: str,
group_id: str,
*,
name: str = 'lookup',
version: str = None,
):
self.server = server
"""server URL"""
self.group_id = group_id
"""group ID of lookup table"""
self.name = name
"""name of lookup table"""
self.repository = repository
"""repository of lookup table"""
if version is None:
version = Lookup.latest_version(
server,
repository,
group_id,
name=name,
)
if version is None:
url = audfactory.url(
server,
repository=repository,
group_id=group_id,
name=name,
)
raise RuntimeError(
f"No lookup tables available under '{url}'"
)
elif not Lookup.exists(
server, repository, group_id, version, name=name
):
url = audfactory.url(
server,
repository=repository,
group_id=group_id,
name=name,
version=version,
)
raise RuntimeError(
f"Lookup table '{url}/"
f"{name}-{version}.{LOOKUP_EXT}' does not exist yet."
)
self.version = version
"""version of lookup table"""
self.url = _url_table(server, repository, group_id, name, version)
"""Artifactory URL of lookup table"""
def __getitem__(self, uid: str) -> typing.Dict:
r"""Get lookup table entry by ID.
Args:
uid: ID of lookup table entry
Returns:
lookup table entry
"""
table = self.table
columns = _columns(table)
item = {}
for row in table[1:]:
if row[0] == uid:
item = {c: p for c, p in zip(columns, row[1:])}
break
return item
def __repr__(self):
r"""String representation of lokkup table."""
table = self.table
padding = 2
# Longest string in each column
transposed_table = [list(x) for x in zip(*table)]
col_width = [
len(max([str(word) for word in row], key=len)) + padding
for row in transposed_table
]
# Don't pad the last column
col_width[-1] -= padding
row = [
''.join(
str(word).ljust(width) for word, width in zip(row, col_width)
)
for row in table
]
return '\n'.join(row)
@property
def columns(self) -> typing.List:
r"""Lookup table column names."""
table = _download(self.url)
return _columns(table)
@property
def ids(self) -> typing.List:
r"""Lookup table ids."""
table = _download(self.url)
return _ids(table)
@property
def table(self) -> typing.List[typing.List]:
r"""Lookup table."""
return _download(self.url)
def append(self, params: typing.Dict[str, typing.Any]) -> str:
r"""Append entry to lookup table.
The lookup table entry gets a unique ID
from ``params``,
:attr:`self.name`,
:attr:`self.group_id`,
:attr:`self.version`,
and :attr:`self.repository`.
Args:
params: lookup table entry in the form of ``{column: parameter}``
Returns:
ID of added lookup table entry
Raises:
RuntimeError: if entry for given ``params`` exists already,
or the columns ``params`` do not match the columns
of the lookup
ValueError: if ``params`` contain unsupported data types
"""
table = self.table
columns = _columns(table)
_check_params_type(params)
params = dict(sorted(params.items()))
if self.contains(params):
raise RuntimeError(f"Entry for '{params}' already exists.")
if list(params.keys()) != columns:
raise RuntimeError(
f"Table columns '{columns}' do not match parameters '{params}'"
)
# Add an UID to the new row and append it to the table
uid = self.generate_uid(
params=str(params),
group_id=self.group_id,
name=self.name,
version=self.version,
repository=self.repository,
)
new_row = [uid] + list(params.values())
table.append(new_row)
_upload(table, self.url)
return uid
def clear(self) -> None:
r"""Clear lookup table."""
table = self.table
table = [table[0]] # empty table with header
_upload(table, self.url)
def contains(self, params: typing.Dict[str, typing.Any]) -> bool:
r"""Check if lookup table contains entry.
Args:
params: lookup table entry in the form of ``{column: parameter}``
Returns:
``True`` if lookup table contains entry
"""
try:
self.find(params)
except RuntimeError:
return False
return True
def extend(
self,
params: typing.Union[
str,
typing.Sequence[str],
typing.Dict[str, typing.Any],
],
) -> typing.List[typing.List]:
r"""Extend columns of lookup table.
If no parameter values are given for the new columns,
they are set to ``None``.
Args:
params: lookup table entry in the form of ``{column: parameter}``
or ``[column]`` or ``column``
Returns:
lookup table
Raises:
ValueError: if ``params`` contain unsupported data types
"""
if isinstance(params, str):
params = [params]
if isinstance(params, (tuple, list)):
params = {param: None for param in params}
_check_params_type(params)
table = self.table
columns = _columns(table)
for param, value in params.items():
if param not in columns:
# Append param key to columns
table[0] += [param]
# FIXME: the following code seems ugly to me
if len(table) == 1 and value is not None:
# Start from empty table, by first updating the columns
_upload(table, self.url)
original_params = {p: None for p in columns}
self.append({**original_params, **{param: value}})
table = self.table
else:
for n in range(len(table[1:])):
# Append param value to every row
table[n + 1] += [value]
table = _sort(table)
_upload(table, self.url)
return table
def find(self, params: typing.Dict[str, typing.Any]) -> str:
r"""Find entry in lookup table.
Args:
params: lookup table entry in the form of ``{column: parameter}``
Returns:
ID of lookup table entry
Raises:
RuntimeError: if lookup table entry cannot be found
"""
table = self.table
params = dict(sorted(params.items()))
for row in table[1:]:
uid = row[0]
entries = row[1:]
if entries == list(params.values()):
return uid
raise RuntimeError(
f"Could not find requested entry '{params}' "
f"in version {self.version}:\n\n{table}"
)
def remove(self, params: typing.Dict[str, typing.Any]) -> str:
r"""Remove entry from lookup table.
Args:
params: lookup table entry in the form of ``{column: parameter}``
Returns:
ID of removed entry
"""
table = self.table
uid = self.find(params)
for n in range(len(table)):
if table[n][0] == uid:
table.pop(n)
break
_upload(table, self.url)
return uid
@staticmethod
def create(
server: str,
repository: str,
group_id: str,
version: str,
params: typing.Sequence[str] = (),
*,
name: str = 'lookup',
force: bool = False
) -> str:
r"""Create lookup table on server.
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
version: version of lookup table
params: lookup table column names
name: name of lookup table
force: if ``True`` an existing lookup table is overwritten
Returns:
URL of lookup table
Raises:
RuntimeError: if lookup table exists already
and ``force=False``
"""
ex = Lookup.exists(server, repository, group_id, version, name=name)
url = _url_table(server, repository, group_id, name, version)
if force or not ex:
table = [['id'] + sorted(params)]
_upload(table, url)
else:
raise RuntimeError(
f"Lookup table '{name}-{version}' exists already."
)
return url
@staticmethod
def delete(
server: str,
repository: str,
group_id: str,
version: str,
*,
name: str = 'lookup',
force: bool = True,
) -> None:
r"""Delete lookup table on server.
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
version: version of lookup table
name: name of lookup table
force: if ``True`` removes lookup table even if not empty
Raises:
RuntimeError: if lookup table is not empty
and ``force=False``
"""
lookup = Lookup(
server,
repository,
group_id,
name=name,
version=version,
)
if len(lookup.table) > 1:
if not force:
raise RuntimeError(
f"Cannot remove lookup table '{name}-{version}' "
f"if it is not empty.")
lookup.clear()
audfactory.path(lookup.url).parent.rmdir()
@staticmethod
def exists(
server: str,
repository: str,
group_id: str,
version: str,
*,
name: str = 'lookup',
) -> bool:
r"""Check if lookup table exists on server.
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
version: version of lookup table
name: name of lookup table
Returns:
``True`` if lookup table exists
Example:
>>> Lookup.exists(
... 'https://artifactory.audeering.com/artifactory',
... 'models-public-local',
... 'com.audeering.models.gender.voxcnn',
... '0.1.0',
... )
True
"""
versions = audfactory.versions(server, repository, group_id, name)
return version in versions
@staticmethod
def latest_version(
server: str,
repository: str,
group_id: str,
*,
params: typing.Dict[str, typing.Any] = None,
name: str = 'lookup',
) -> typing.Optional[str]:
r"""Latest version of lookup table on server.
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
params: lookup table entry in the form of ``{column: parameter}``
name: name of lookup table
Returns:
latest version of lookup table
Example:
>>> Lookup.latest_version(
... 'https://artifactory.audeering.com/artifactory',
... 'models-public-local',
... 'com.audeering.models.gender.voxcnn',
... )
'0.2.0'
"""
v = Lookup.versions(server, repository, group_id, params, name=name)
if len(v) > 0:
return v[-1]
else:
return None
@staticmethod
def generate_uid(
*,
params: typing.Dict[str, typing.Any],
name: str,
group_id: str,
version: str,
repository: str,
) -> str:
r"""Generate unique ID.
It converts ``params`` to a string,
and concatenates it with ``name``, ``group_id``, ``version``,
and ``repository``.
From that concatenated string a unique ID is derived.
Args:
params: params in the form of ``{column: parameter}``
group_id: group ID of lookup table
name: name of lookup table
version: version of lookup table
repository: repository of lookup table
Returns:
unique identifier of length 36
Example:
>>> Lookup.generate_uid(
... params={0: None},
... name='name',
... group_id='group.id',
... version='1.0.0',
... repository='models-public-local',
... )
'afe694a5-8bad-4dbc-73ba-636f18340615'
"""
unique_string = (
str(params)
+ group_id
+ name
+ version
+ repository
)
uid = audeer.uid(from_string=unique_string)
return uid
@staticmethod
def versions(
server: str,
repository: str,
group_id: str,
params: typing.Dict[str, typing.Any] = None,
*,
name: str = 'lookup',
) -> list:
r"""Available versions of lookup table on server.
Args:
server: URL of Artifactory server,
e.g. https://audeering.jfrog.io/artifactory
repository: repository of lookup table
group_id: group ID of lookup table
params: lookup table entry in the form of ``{column: parameter}``
name: name of lookup table
Returns:
available versions of lookup table
Example:
>>> Lookup.versions(
... 'https://artifactory.audeering.com/artifactory',
... 'models-public-local',
... 'com.audeering.models.gender.voxcnn',
... )
['0.1.0', '0.2.0']
"""
versions = audfactory.versions(server, repository, group_id, name)
if params is not None:
filtered_versions = []
for version in versions:
lookup = Lookup(
server,
repository,
group_id,
name=name,
version=version,
)
if lookup.contains(params):
filtered_versions.append(version)
versions = filtered_versions
return versions
def _check_params_type(params):
r"""Raise error if params includes wrong data types."""
for value in params.values():
if not isinstance(value, (bool, float, int, type(None), str)):
raise ValueError(
"'params' can only contain values of type: "
"bool, float, int, NoneType, str. "
f"Yours includes {type(value)}"
)
if isinstance(value, str):
# Forbid strings that are converted to other types
try:
int(value)
except ValueError:
pass
else:
raise ValueError(
f"'{value}' is forbidden, use the int {value} instead"
)
try:
float(value)
except ValueError:
pass
else:
raise ValueError(
f"'{value}' is forbidden, use the float {value} instead"
)
if value in ['True', 'False']:
raise ValueError(
f"'{value}' is forbidden, use the bool {value} instead"
)
if value == 'None':
raise ValueError(
f"'{value}' is forbidden, use the NoneType {value} instead"
)
def _columns(table: typing.List[typing.List]) -> typing.List:
return table[0][1:]
def _ids(table: typing.List[typing.List]) -> typing.List:
return [row[0] for row in table[1:]]
def _import_csv(s):
r"""Convert strings to int, float, and None.
The build in CSV reader returns only strings.
"""
if s == '':
return None
if s == 'True':
return True
if s == 'False':
return False
try:
s = int(s)
except ValueError:
try:
s = float(s)
except ValueError:
pass
return s
def _download(url: str) -> typing.List[typing.List]:
r = audfactory.rest_api_get(url)
code = r.status_code
if code in [403, 404]: # pragma: no cover
raise RuntimeError(
f"{code}, URL not found or no access rights: '{url}'"
)
elif code != 200: # pragma: no cover
raise RuntimeError(
f"{code}, problem downloading '{url}'.\n{audfactory.REPORT_ISSUE}"
)
r.encoding = 'utf-8'
table = []
csvreader = csv.reader(r.text.splitlines(), delimiter=',')
for row in csvreader:
# Convert '' to None
row = [_import_csv(r) for r in row]
table.append(row)
return table
def _sort(table: typing.List[typing.List]) -> typing.List[typing.List]:
# Get index to sort each row, excluding 'id'
idx = sorted(range(len(table[0][1:])), key=lambda k: table[0][k + 1])
idx = [0] + [i + 1 for i in idx]
for n in range(len(table)):
table[n] = [table[n][i] for i in idx]
return table
def _upload(
table: typing.List[typing.List],
url: str,
) -> None:
r"""Upload table to a CSV file on Artifactory without using a tmp file."""
fobj = io.StringIO()
writer = csv.writer(fobj, delimiter=',')
writer.writerows(table)
# Seek to beginning of file, otherwise an empty CSV file wil be written
fobj.seek(0)
artifactory_path = audfactory.path(url)
if not artifactory_path.parent.exists():
artifactory_path.parent.mkdir()
artifactory_path.deploy(fobj)
return url
def _url_table(
server: str,
repository: str,
group_id: str,
name: str,
version: str,
) -> str:
url = audfactory.url(
server,
repository=repository,
group_id=group_id,
name=name,
version=version,
)
return f'{url}/{name}-{version}.{LOOKUP_EXT}' | 0.836955 | 0.382516 |
import collections
import functools
import itertools
import json
import operator
import pathlib
import subprocess
import sys
import pytest
PATH = pathlib.Path(".pytest-deps")
DEPS = set()
TEST_FILES = set()
def pytest_addoption(parser):
group = parser.getgroup("run-changed")
group.addoption(
"--changed-only",
action="store_true",
dest="changed_only",
help="",
)
def pytest_unconfigure():
key = operator.itemgetter(0)
indexed = {
caller: set(c for _, c in called)
for caller, called in itertools.groupby(sorted(DEPS, key=key), key=key)
}
result = {}
for tf in TEST_FILES:
mod_set = {tf}
new = True
while new:
new = [indexed.get(tf, set()) for fname in tf]
new = functools.reduce(lambda x, y: x.union(y), new, set())
new = new.difference(mod_set)
mod_set = mod_set.union(new)
result[tf] = mod_set
del indexed
result = {tf: list(mod_set) for tf, mod_set in result.items()}
if PATH.exists():
with open(PATH, "r") as fp:
old_index = json.load(fp)
for tf, mod_set in result.items():
old_index[tf] = mod_set
result = old_index
with open(PATH, "w") as fp:
json.dump(result, fp, sort_keys=True, indent=4)
def pytest_runtest_makereport(item, call):
pass
def pytest_collection_modifyitems(session, config, items):
changed_only = config.getoption("changed_only")
if changed_only and PATH.exists():
with open(PATH, "r") as fp:
inverse_file_index = json.load(fp)
file_index = collections.defaultdict(set)
for tf, mod_set in inverse_file_index.items():
for mod in mod_set:
file_index[mod].add(tf)
dirty = get_dirty_files()
selected_test_files = [set(file_index.get(fname, [])) for fname in dirty]
selected_test_files = functools.reduce(
lambda x, y: x.union(y), selected_test_files, set()
)
unknown_test_files = {
item.module.__file__
for item in items
if item.module.__file__ not in file_index
}
items[:] = [
item
for item in items
if item.module.__file__ in selected_test_files
or item.module.__file__ in unknown_test_files
]
@pytest.fixture(scope="function", autouse=True)
def profile(request):
if request.config.getoption("changed_only"):
TEST_FILES.add(request.node.module.__file__)
def trace_calls(frame, event, arg):
if event != "call":
return
func_filename = frame.f_code.co_filename
caller_filename = frame.f_back.f_code.co_filename
if func_filename != caller_filename:
DEPS.add((caller_filename, func_filename))
return
sys.settrace(trace_calls)
yield
sys.settrace(None)
else:
yield
def get_dirty_files():
proc = subprocess.run(["git", "status", "--porcelain"], stdout=subprocess.PIPE)
proc.check_returncode()
modified = set()
untracked = set()
for line in proc.stdout.decode().splitlines():
action, fname = line[0:3], line[3:]
if action == " M ":
modified.add(fname)
elif action == " D ":
modified.add(fname)
elif action == "?? ":
fname = pathlib.Path(fname)
if fname.is_dir():
for ffname in fname.glob("*"):
untracked.add(str(ffname))
else:
untracked.add(str(fname))
return {str(pathlib.Path(fname).absolute()) for fname in modified.union(untracked)} | pytest_run_changed/__init__.py | import collections
import functools
import itertools
import json
import operator
import pathlib
import subprocess
import sys
import pytest
PATH = pathlib.Path(".pytest-deps")
DEPS = set()
TEST_FILES = set()
def pytest_addoption(parser):
group = parser.getgroup("run-changed")
group.addoption(
"--changed-only",
action="store_true",
dest="changed_only",
help="",
)
def pytest_unconfigure():
key = operator.itemgetter(0)
indexed = {
caller: set(c for _, c in called)
for caller, called in itertools.groupby(sorted(DEPS, key=key), key=key)
}
result = {}
for tf in TEST_FILES:
mod_set = {tf}
new = True
while new:
new = [indexed.get(tf, set()) for fname in tf]
new = functools.reduce(lambda x, y: x.union(y), new, set())
new = new.difference(mod_set)
mod_set = mod_set.union(new)
result[tf] = mod_set
del indexed
result = {tf: list(mod_set) for tf, mod_set in result.items()}
if PATH.exists():
with open(PATH, "r") as fp:
old_index = json.load(fp)
for tf, mod_set in result.items():
old_index[tf] = mod_set
result = old_index
with open(PATH, "w") as fp:
json.dump(result, fp, sort_keys=True, indent=4)
def pytest_runtest_makereport(item, call):
pass
def pytest_collection_modifyitems(session, config, items):
changed_only = config.getoption("changed_only")
if changed_only and PATH.exists():
with open(PATH, "r") as fp:
inverse_file_index = json.load(fp)
file_index = collections.defaultdict(set)
for tf, mod_set in inverse_file_index.items():
for mod in mod_set:
file_index[mod].add(tf)
dirty = get_dirty_files()
selected_test_files = [set(file_index.get(fname, [])) for fname in dirty]
selected_test_files = functools.reduce(
lambda x, y: x.union(y), selected_test_files, set()
)
unknown_test_files = {
item.module.__file__
for item in items
if item.module.__file__ not in file_index
}
items[:] = [
item
for item in items
if item.module.__file__ in selected_test_files
or item.module.__file__ in unknown_test_files
]
@pytest.fixture(scope="function", autouse=True)
def profile(request):
if request.config.getoption("changed_only"):
TEST_FILES.add(request.node.module.__file__)
def trace_calls(frame, event, arg):
if event != "call":
return
func_filename = frame.f_code.co_filename
caller_filename = frame.f_back.f_code.co_filename
if func_filename != caller_filename:
DEPS.add((caller_filename, func_filename))
return
sys.settrace(trace_calls)
yield
sys.settrace(None)
else:
yield
def get_dirty_files():
proc = subprocess.run(["git", "status", "--porcelain"], stdout=subprocess.PIPE)
proc.check_returncode()
modified = set()
untracked = set()
for line in proc.stdout.decode().splitlines():
action, fname = line[0:3], line[3:]
if action == " M ":
modified.add(fname)
elif action == " D ":
modified.add(fname)
elif action == "?? ":
fname = pathlib.Path(fname)
if fname.is_dir():
for ffname in fname.glob("*"):
untracked.add(str(ffname))
else:
untracked.add(str(fname))
return {str(pathlib.Path(fname).absolute()) for fname in modified.union(untracked)} | 0.27914 | 0.158109 |
import argparse
from gym.spaces import Dict, Discrete, Tuple, MultiDiscrete
import logging
import os
import ray
from ray import tune
from ray.tune import register_env
from ray.rllib.algorithms.qmix import QMixConfig
from ray.rllib.env.multi_agent_env import ENV_STATE
from ray.rllib.examples.env.two_step_game import TwoStepGame
from ray.rllib.policy.policy import PolicySpec
from ray.rllib.utils.test_utils import check_learning_achieved
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument(
"--run", type=str, default="PG", help="The RLlib-registered algorithm to use."
)
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.",
)
parser.add_argument("--num-cpus", type=int, default=0)
parser.add_argument(
"--mixer",
type=str,
default="qmix",
choices=["qmix", "vdn", "none"],
help="The mixer model to use.",
)
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.",
)
parser.add_argument(
"--stop-iters", type=int, default=200, help="Number of iterations to train."
)
parser.add_argument(
"--stop-timesteps", type=int, default=70000, help="Number of timesteps to train."
)
parser.add_argument(
"--stop-reward", type=float, default=8.0, help="Reward at which we stop training."
)
parser.add_argument(
"--local-mode",
action="store_true",
help="Init Ray in local mode for easier debugging.",
)
if __name__ == "__main__":
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None, local_mode=args.local_mode)
if args.run == "contrib/MADDPG":
logger.warning(
"`contrib/MADDPG` is not longer a valid algorithm descriptor! "
"Use `MADDPG` instead."
)
args.run = "MADDPG"
grouping = {
"group_1": [0, 1],
}
obs_space = Tuple(
[
Dict(
{
"obs": MultiDiscrete([2, 2, 2, 3]),
ENV_STATE: MultiDiscrete([2, 2, 2]),
}
),
Dict(
{
"obs": MultiDiscrete([2, 2, 2, 3]),
ENV_STATE: MultiDiscrete([2, 2, 2]),
}
),
]
)
act_space = Tuple(
[
TwoStepGame.action_space,
TwoStepGame.action_space,
]
)
register_env(
"grouped_twostep",
lambda config: TwoStepGame(config).with_agent_groups(
grouping, obs_space=obs_space, act_space=act_space
),
)
if args.run == "MADDPG":
obs_space = Discrete(6)
act_space = TwoStepGame.action_space
config = {
"env": TwoStepGame,
"env_config": {
"actions_are_logits": True,
},
"replay_buffer_config": {"learning_starts": 100},
"multiagent": {
"policies": {
"pol1": PolicySpec(
observation_space=obs_space,
action_space=act_space,
config={"agent_id": 0},
),
"pol2": PolicySpec(
observation_space=obs_space,
action_space=act_space,
config={"agent_id": 1},
),
},
"policy_mapping_fn": (lambda aid, **kwargs: "pol2" if aid else "pol1"),
},
"framework": args.framework,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
}
elif args.run == "QMIX":
config = (
QMixConfig()
.training(mixer=args.mixer, train_batch_size=32)
.rollouts(num_rollout_workers=0, rollout_fragment_length=4)
.exploration(
exploration_config={
"final_epsilon": 0.0,
}
)
.environment(
env="grouped_twostep",
env_config={
"separate_state_space": True,
"one_hot_state_encoding": True,
},
)
.resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")))
)
config = config.to_dict()
else:
config = {
"env": TwoStepGame,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"framework": args.framework,
}
stop = {
"episode_reward_mean": args.stop_reward,
"timesteps_total": args.stop_timesteps,
"training_iteration": args.stop_iters,
}
results = tune.run(args.run, stop=stop, config=config, verbose=2)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
ray.shutdown() | rllib/examples/two_step_game.py | import argparse
from gym.spaces import Dict, Discrete, Tuple, MultiDiscrete
import logging
import os
import ray
from ray import tune
from ray.tune import register_env
from ray.rllib.algorithms.qmix import QMixConfig
from ray.rllib.env.multi_agent_env import ENV_STATE
from ray.rllib.examples.env.two_step_game import TwoStepGame
from ray.rllib.policy.policy import PolicySpec
from ray.rllib.utils.test_utils import check_learning_achieved
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument(
"--run", type=str, default="PG", help="The RLlib-registered algorithm to use."
)
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.",
)
parser.add_argument("--num-cpus", type=int, default=0)
parser.add_argument(
"--mixer",
type=str,
default="qmix",
choices=["qmix", "vdn", "none"],
help="The mixer model to use.",
)
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.",
)
parser.add_argument(
"--stop-iters", type=int, default=200, help="Number of iterations to train."
)
parser.add_argument(
"--stop-timesteps", type=int, default=70000, help="Number of timesteps to train."
)
parser.add_argument(
"--stop-reward", type=float, default=8.0, help="Reward at which we stop training."
)
parser.add_argument(
"--local-mode",
action="store_true",
help="Init Ray in local mode for easier debugging.",
)
if __name__ == "__main__":
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None, local_mode=args.local_mode)
if args.run == "contrib/MADDPG":
logger.warning(
"`contrib/MADDPG` is not longer a valid algorithm descriptor! "
"Use `MADDPG` instead."
)
args.run = "MADDPG"
grouping = {
"group_1": [0, 1],
}
obs_space = Tuple(
[
Dict(
{
"obs": MultiDiscrete([2, 2, 2, 3]),
ENV_STATE: MultiDiscrete([2, 2, 2]),
}
),
Dict(
{
"obs": MultiDiscrete([2, 2, 2, 3]),
ENV_STATE: MultiDiscrete([2, 2, 2]),
}
),
]
)
act_space = Tuple(
[
TwoStepGame.action_space,
TwoStepGame.action_space,
]
)
register_env(
"grouped_twostep",
lambda config: TwoStepGame(config).with_agent_groups(
grouping, obs_space=obs_space, act_space=act_space
),
)
if args.run == "MADDPG":
obs_space = Discrete(6)
act_space = TwoStepGame.action_space
config = {
"env": TwoStepGame,
"env_config": {
"actions_are_logits": True,
},
"replay_buffer_config": {"learning_starts": 100},
"multiagent": {
"policies": {
"pol1": PolicySpec(
observation_space=obs_space,
action_space=act_space,
config={"agent_id": 0},
),
"pol2": PolicySpec(
observation_space=obs_space,
action_space=act_space,
config={"agent_id": 1},
),
},
"policy_mapping_fn": (lambda aid, **kwargs: "pol2" if aid else "pol1"),
},
"framework": args.framework,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
}
elif args.run == "QMIX":
config = (
QMixConfig()
.training(mixer=args.mixer, train_batch_size=32)
.rollouts(num_rollout_workers=0, rollout_fragment_length=4)
.exploration(
exploration_config={
"final_epsilon": 0.0,
}
)
.environment(
env="grouped_twostep",
env_config={
"separate_state_space": True,
"one_hot_state_encoding": True,
},
)
.resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")))
)
config = config.to_dict()
else:
config = {
"env": TwoStepGame,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"framework": args.framework,
}
stop = {
"episode_reward_mean": args.stop_reward,
"timesteps_total": args.stop_timesteps,
"training_iteration": args.stop_iters,
}
results = tune.run(args.run, stop=stop, config=config, verbose=2)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
ray.shutdown() | 0.712832 | 0.22465 |
import unittest
import sys
import itertools
import time
import os
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology import schema
import streamsx.topology.context
import streamsx.spl.op as op
class TestPending(unittest.TestCase):
_multiprocess_can_split_ = True
""" Test pending connections.
"""
def setUp(self):
Tester.setup_distributed(self)
def test_simple_map(self):
"""Test pending connection simple case.
"""
data = ['A1','B1', 'A2', 'A3', 'C1', 'C1']
expected = [ e + "PC" for e in data ]
topo = Topology()
pending = PendingStream(topo)
ap = pending.stream.map(lambda s : s + "PC")
self.assertFalse(pending.is_complete())
pending.complete(topo.source(data))
self.assertTrue(pending.is_complete())
tester = Tester(topo)
tester.contents(ap, expected)
tester.test(self.test_ctxtype, self.test_config)
def test_simple_filter(self):
"""Test pending connection simple case.
"""
data = ['A1','B1', 'A2', 'A3', 'C1', 'C1']
expected = ['A3']
topo = Topology()
pending = PendingStream(topo)
ap = pending.stream.filter(lambda s : s.startswith('A'))
ap = ap.filter(lambda s : s.endswith('3'))
self.assertFalse(pending.is_complete())
pending.complete(topo.source(data))
self.assertTrue(pending.is_complete())
tester = Tester(topo)
tester.contents(ap, expected)
tester.test(self.test_ctxtype, self.test_config)
def test_fan_in_out(self):
"""Test pending connection fan in/out.
"""
data1 = ['A1','B1', 'A2', 'A3', 'C1', 'C1']
data2 = ['X','Y', 'Z', 'Q', 'T', 'X']
all_data = data1 + data2
expected_pc = [ e + "PC" for e in all_data ]
expected_cp = [ "CP" + e for e in all_data ]
expected_su = [ "SU" + e + "US" for e in all_data ]
topo = Topology()
pending = PendingStream(topo)
apc = pending.stream.map(lambda s : s + "PC")
acp = pending.stream.map(lambda s : 'CP' + s)
self.assertFalse(pending.is_complete())
s1 = topo.source(data1)
s2 = topo.source(data2)
su = s1.union({s2})
asu = su.map(lambda s : 'SU' + s + 'US')
pending.complete(su)
self.assertTrue(pending.is_complete())
tester = Tester(topo)
tester.contents(apc, expected_pc, ordered=False)
tester.contents(acp, expected_cp, ordered=False)
tester.contents(asu, expected_su, ordered=False)
tester.test(self.test_ctxtype, self.test_config)
def test_feedback_loop(self):
topo = Topology()
data = ['A','B', 'A', 'A', 'X', 'C', 'C', 'D', 'A', 'A', 'E']
expected = ['B', 'X', 'C', 'C', 'D', 'A', 'A', 'E']
s = topo.source(data)
s = s.filter(lambda t : time.sleep(1) or True).as_string();
feedback = PendingStream(topo)
df = op.Invoke(topo, 'spl.utility::DynamicFilter',
inputs = [s, feedback.stream],
schemas= [schema.CommonSchema.String])
df.params['key'] = df.attribute(s, 'string')
df.params['addKey'] = df.attribute(feedback.stream, 'string')
delayed_out = op.Map('spl.utility::Delay', df.outputs[0], params={'delay': 0.05}).stream
x = delayed_out.filter(lambda s : s == 'X').map(lambda s : 'A').as_string()
i = topo.source(['B', 'X', 'C', 'D', 'E']).as_string()
x = x.union({i})
feedback.complete(x)
result = delayed_out
result.print()
#streamsx.topology.context.submit('TOOLKIT', topo)
tester = Tester(topo)
tester.contents(result, expected)
tester.test(self.test_ctxtype, self.test_config)
class TestSasPending(TestPending):
def setUp(self):
Tester.setup_streaming_analytics(self, force_remote_build=True)
class TestPendingCompileOnly(unittest.TestCase):
@unittest.skipIf("STREAMS_INSTALL" not in os.environ, "STREAMS_INSTALL not set")
def test_pure_loop(self):
topo = Topology()
feedback = PendingStream(topo)
df = op.Map('spl.utility::Custom',
feedback.stream,
schema=schema.CommonSchema.String)
delayed_out = op.Map('spl.utility::Delay', df.stream, params={'delay': 0.05}).stream
feedback.complete(delayed_out)
sr = streamsx.topology.context.submit('BUNDLE', topo)
self.assertEqual(0, sr['return_code'])
os.remove(sr.bundlePath) | test/python/topology/test2_pending.py | import unittest
import sys
import itertools
import time
import os
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology import schema
import streamsx.topology.context
import streamsx.spl.op as op
class TestPending(unittest.TestCase):
_multiprocess_can_split_ = True
""" Test pending connections.
"""
def setUp(self):
Tester.setup_distributed(self)
def test_simple_map(self):
"""Test pending connection simple case.
"""
data = ['A1','B1', 'A2', 'A3', 'C1', 'C1']
expected = [ e + "PC" for e in data ]
topo = Topology()
pending = PendingStream(topo)
ap = pending.stream.map(lambda s : s + "PC")
self.assertFalse(pending.is_complete())
pending.complete(topo.source(data))
self.assertTrue(pending.is_complete())
tester = Tester(topo)
tester.contents(ap, expected)
tester.test(self.test_ctxtype, self.test_config)
def test_simple_filter(self):
"""Test pending connection simple case.
"""
data = ['A1','B1', 'A2', 'A3', 'C1', 'C1']
expected = ['A3']
topo = Topology()
pending = PendingStream(topo)
ap = pending.stream.filter(lambda s : s.startswith('A'))
ap = ap.filter(lambda s : s.endswith('3'))
self.assertFalse(pending.is_complete())
pending.complete(topo.source(data))
self.assertTrue(pending.is_complete())
tester = Tester(topo)
tester.contents(ap, expected)
tester.test(self.test_ctxtype, self.test_config)
def test_fan_in_out(self):
"""Test pending connection fan in/out.
"""
data1 = ['A1','B1', 'A2', 'A3', 'C1', 'C1']
data2 = ['X','Y', 'Z', 'Q', 'T', 'X']
all_data = data1 + data2
expected_pc = [ e + "PC" for e in all_data ]
expected_cp = [ "CP" + e for e in all_data ]
expected_su = [ "SU" + e + "US" for e in all_data ]
topo = Topology()
pending = PendingStream(topo)
apc = pending.stream.map(lambda s : s + "PC")
acp = pending.stream.map(lambda s : 'CP' + s)
self.assertFalse(pending.is_complete())
s1 = topo.source(data1)
s2 = topo.source(data2)
su = s1.union({s2})
asu = su.map(lambda s : 'SU' + s + 'US')
pending.complete(su)
self.assertTrue(pending.is_complete())
tester = Tester(topo)
tester.contents(apc, expected_pc, ordered=False)
tester.contents(acp, expected_cp, ordered=False)
tester.contents(asu, expected_su, ordered=False)
tester.test(self.test_ctxtype, self.test_config)
def test_feedback_loop(self):
topo = Topology()
data = ['A','B', 'A', 'A', 'X', 'C', 'C', 'D', 'A', 'A', 'E']
expected = ['B', 'X', 'C', 'C', 'D', 'A', 'A', 'E']
s = topo.source(data)
s = s.filter(lambda t : time.sleep(1) or True).as_string();
feedback = PendingStream(topo)
df = op.Invoke(topo, 'spl.utility::DynamicFilter',
inputs = [s, feedback.stream],
schemas= [schema.CommonSchema.String])
df.params['key'] = df.attribute(s, 'string')
df.params['addKey'] = df.attribute(feedback.stream, 'string')
delayed_out = op.Map('spl.utility::Delay', df.outputs[0], params={'delay': 0.05}).stream
x = delayed_out.filter(lambda s : s == 'X').map(lambda s : 'A').as_string()
i = topo.source(['B', 'X', 'C', 'D', 'E']).as_string()
x = x.union({i})
feedback.complete(x)
result = delayed_out
result.print()
#streamsx.topology.context.submit('TOOLKIT', topo)
tester = Tester(topo)
tester.contents(result, expected)
tester.test(self.test_ctxtype, self.test_config)
class TestSasPending(TestPending):
def setUp(self):
Tester.setup_streaming_analytics(self, force_remote_build=True)
class TestPendingCompileOnly(unittest.TestCase):
@unittest.skipIf("STREAMS_INSTALL" not in os.environ, "STREAMS_INSTALL not set")
def test_pure_loop(self):
topo = Topology()
feedback = PendingStream(topo)
df = op.Map('spl.utility::Custom',
feedback.stream,
schema=schema.CommonSchema.String)
delayed_out = op.Map('spl.utility::Delay', df.stream, params={'delay': 0.05}).stream
feedback.complete(delayed_out)
sr = streamsx.topology.context.submit('BUNDLE', topo)
self.assertEqual(0, sr['return_code'])
os.remove(sr.bundlePath) | 0.380183 | 0.39257 |
from urllib.parse import urlparse
from google.cloud import storage
import numpy as np
import resampy
import tensorflow as tf
import urllib.request
import itertools
import io
import jsonlines
from pydub import AudioSegment
import yamnet.params as yamnet_params
import yamnet.yamnet as yamnet_model
import os
from argparse import ArgumentParser
import config
import logging
logger = logging.getLogger(__name__)
def main():
config = parse_arguments()
setup_logging(config)
run_inference(config)
def parse_arguments():
parser = ArgumentParser("Run YAMNET on a set of audio files.")
parser.add_argument("-i", "--input-path", default="gs://the-peoples-speech-west-europe/forced-aligner/cuda-forced-aligner/output_work_dir_5b/output_work_dir_5b/training_set", help="Path to yamnet dataset.")
parser.add_argument("-o", "--output-path", default="results.jsonl", help="The path to save the results.")
parser.add_argument("-c", "--config-file-path", default=".json", help="The path to the config file.")
parser.add_argument("-v", "--verbose", default=False, action="store_true", help="Print out debug messages.")
parser.add_argument("-vi", "--verbose-info", default=False, action="store_true", help="Print out info messages.")
args = parser.parse_args()
arguments = vars(args)
config = setup_config(arguments)
return config
def run_inference(config):
model, classes, params = load_model(config)
dataset, filenames = get_dataset(config)
run_model_on_dataset(model, classes, params, dataset, filenames, config)
def get_dataset(config):
logger.debug("Getting file paths")
files, filenames = list_files(config["input_path"], config)
logger.debug("Making dataset")
ds = files.map(tf.io.read_file)
#ds = ds.map(tf.audio.decode_wav)
ds = ds.map(lambda file : tf.py_function(func=decode_mp3, inp=[file], Tout=tf.float32))
ds = ds.prefetch(buffer_size=tf.data.AUTOTUNE)
return ds, filenames
def decode_mp3(mp3_tensor):
mp3_data = mp3_tensor.numpy()
mp3_file = io.BytesIO(mp3_data)
mp3_audio = AudioSegment.from_file(mp3_file, format="mp3")
logger.debug("duration: " + str(mp3_audio.duration_seconds) + ", channels: " +
str(mp3_audio.channels) + ", sampling_width: " + str(mp3_audio.sample_width) +
", sampling rate: " + str(mp3_audio.frame_rate) + ", dbfs: " + str(mp3_audio.dBFS) )
mp3_audio = mp3_audio.set_channels(1)
mp3_audio = mp3_audio.set_sample_width(2)
seconds = mp3_audio.duration_seconds
max_duration = 600
if seconds > max_duration:
mp3_audio = mp3_audio[0:max_duration*1000]
array = mp3_audio.get_array_of_samples()
array = np.append(array, [int(mp3_audio.frame_rate)])
return array
def list_files(path, config):
if not "max_files" in config:
filenames = tf.data.Dataset.list_files(path, "*/*.wav", shuffle=False)
return filenames, [filename.numpy().decode("utf-8") for filename in filenames]
return list_blobs(path, int(config["max_files"]))
def list_blobs(path, max_files):
result = urlparse(path, allow_fragments=False)
logger.debug("Path: " + str(result))
files = list_blobs_with_prefix(result.netloc, result.path.lstrip("/"))
files = list(itertools.islice(files, max_files))
logger.debug("Found matching files under " + path + ": " + str(files))
filenames = [os.path.join("gs://" + result.netloc, filename) for filename in files]
return tf.data.Dataset.list_files(filenames, shuffle=False), filenames
def list_blobs_with_prefix(bucket_name, prefix, delimiter=None):
"""Lists all the blobs in the bucket that begin with the prefix.
This can be used to list all blobs in a "folder", e.g. "public/".
The delimiter argument can be used to restrict the results to only the
"files" in the given "folder". Without the delimiter, the entire tree under
the prefix is returned. For example, given these blobs:
a/1.txt
a/b/2.txt
If you specify prefix ='a/', without a delimiter, you'll get back:
a/1.txt
a/b/2.txt
However, if you specify prefix='a/' and delimiter='/', you'll get back
only the file directly under 'a/':
a/1.txt
As part of the response, you'll also get back a blobs.prefixes entity
that lists the "subfolders" under `a/`:
a/b/
"""
storage_client = storage.Client()
# Note: Client.list_blobs requires at least package version 1.17.0.
blobs = storage_client.list_blobs(bucket_name, prefix=prefix, delimiter=delimiter)
for blob in blobs:
logger.debug("Got file: " + str(blob.name))
if is_audio(blob.name):
yield blob.name
if delimiter:
for prefix in blobs.prefixes:
yield prefix
def is_audio(path):
base, extension = os.path.splitext(path)
if extension.strip() == ".mp3":
return True
return False
def load_model(config):
logger.debug("Loading model...")
weights = load_weights(config)
params = yamnet_params.Params()
yamnet = yamnet_model.yamnet_frames_model(params)
yamnet.load_weights(weights)
yamnet_classes = yamnet_model.class_names(os.path.join(os.path.dirname(__file__), "yamnet", "yamnet_class_map.csv"))
return yamnet, yamnet_classes, params
def load_weights(config):
download_path = "https://storage.googleapis.com/audioset/yamnet.h5"
target_path = "/tmp/yamnet/yamnet.h5"
download(download_path, target_path)
return target_path
def download(url, path):
logger.debug("Downloading from " + url + " to " + path)
directory = os.path.dirname(path)
os.makedirs(directory, exist_ok=True)
if not os.path.exists(path):
urllib.request.urlretrieve(url, path)
logger.debug("Download success")
def run_model_on_dataset(yamnet, classes, params, dataset, filenames, config):
with jsonlines.open(config["output_path"], mode='w') as writer:
for batch, filename in zip(dataset, filenames):
logger.debug(filename)
items = split_into_items(batch, config)
logger.debug("chunks" + str(len(items)))
for index, item in enumerate(items):
results = run_model_on_batch(yamnet, classes, params, item)
print_results(writer, filename, results, classes, index, config)
def split_into_items(pair, config):
batch = pair[:-1]
sr = int(pair[-1])
chunk_size = int(float(config["seconds_per_chunk"]) * sr)
array = batch.numpy()
sample_count = array.shape[-1]
logger.debug("total samples " + str(array.shape))
items = []
chunks = (sample_count + chunk_size - 1) // chunk_size
for chunk in range(chunks):
start = chunk * chunk_size
end = min((chunk+1) * chunk_size, sample_count)
items.append((array[start:end], sr))
return items
def print_results(writer, filename, results, yamnet_classes, index, config):
top, prediction = results
seconds = index * float(config["seconds_per_chunk"])
print(str(int(seconds // 60)) + ":" + str(int(seconds) % 60) + '\n'.join(' {:12s}: {:.3f}'.format(yamnet_classes[i], prediction[i])
for i in top[0:1]))
result = { "path" : filename, "seconds" : seconds }
for i in top:
result[yamnet_classes[i]] = float(prediction[i])
writer.write(result)
def run_model_on_batch(yamnet, classes, params, pair):
batch, sr = pair
waveform = batch / 32768.0 # Convert to [-1.0, +1.0]
# Convert to mono and the sample rate expected by YAMNet.
if len(waveform.shape) > 1:
waveform = np.mean(waveform, axis=1)
if sr != params.sample_rate:
waveform = resampy.resample(waveform, sr, params.sample_rate)
# Predict YAMNet classes.
scores, embeddings, spectrogram = yamnet(waveform)
# Scores is a matrix of (time_frames, num_classes) classifier scores.
# Average them along time to get an overall classifier output for the clip.
prediction = np.mean(scores, axis=0)
# Report the highest-scoring classes and their scores.
top = np.argsort(prediction)[::-1][:5]
return top, prediction
def setup_config(dictionary):
return config.ConfigurationSet(
config.config_from_env(prefix="MLCOMMONS"),
config.config_from_yaml(config_path(), read_from_file=True),
config.config_from_dict(dictionary),
)
def config_path():
home = os.path.expanduser("~")
home_config_path = os.path.join(home, ".mlcommons", "config.yaml")
if os.path.exists(home_config_path):
return home_config_path
return os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "config", "default.yaml")
def setup_logging(arguments):
logging_format = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
if arguments["verbose"]:
logging.basicConfig(level=logging.DEBUG, format=logging_format)
elif arguments["verbose_info"]:
logging.basicConfig(level=logging.INFO, format=logging_format)
else:
logging.basicConfig(level=logging.WARNING, format=logging_format)
root_logger = logging.getLogger()
if arguments["verbose"]:
root_logger.setLevel(logging.DEBUG)
elif arguments["verbose_info"]:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
logging.getLogger("numba.core.ssa").setLevel(logging.CRITICAL)
logging.getLogger("numba.core.byteflow").setLevel(logging.CRITICAL)
logging.getLogger("numba.core.interpreter").setLevel(logging.CRITICAL)
if __name__ == '__main__':
main() | galvasr2/yamnet/inference.py | from urllib.parse import urlparse
from google.cloud import storage
import numpy as np
import resampy
import tensorflow as tf
import urllib.request
import itertools
import io
import jsonlines
from pydub import AudioSegment
import yamnet.params as yamnet_params
import yamnet.yamnet as yamnet_model
import os
from argparse import ArgumentParser
import config
import logging
logger = logging.getLogger(__name__)
def main():
config = parse_arguments()
setup_logging(config)
run_inference(config)
def parse_arguments():
parser = ArgumentParser("Run YAMNET on a set of audio files.")
parser.add_argument("-i", "--input-path", default="gs://the-peoples-speech-west-europe/forced-aligner/cuda-forced-aligner/output_work_dir_5b/output_work_dir_5b/training_set", help="Path to yamnet dataset.")
parser.add_argument("-o", "--output-path", default="results.jsonl", help="The path to save the results.")
parser.add_argument("-c", "--config-file-path", default=".json", help="The path to the config file.")
parser.add_argument("-v", "--verbose", default=False, action="store_true", help="Print out debug messages.")
parser.add_argument("-vi", "--verbose-info", default=False, action="store_true", help="Print out info messages.")
args = parser.parse_args()
arguments = vars(args)
config = setup_config(arguments)
return config
def run_inference(config):
model, classes, params = load_model(config)
dataset, filenames = get_dataset(config)
run_model_on_dataset(model, classes, params, dataset, filenames, config)
def get_dataset(config):
logger.debug("Getting file paths")
files, filenames = list_files(config["input_path"], config)
logger.debug("Making dataset")
ds = files.map(tf.io.read_file)
#ds = ds.map(tf.audio.decode_wav)
ds = ds.map(lambda file : tf.py_function(func=decode_mp3, inp=[file], Tout=tf.float32))
ds = ds.prefetch(buffer_size=tf.data.AUTOTUNE)
return ds, filenames
def decode_mp3(mp3_tensor):
mp3_data = mp3_tensor.numpy()
mp3_file = io.BytesIO(mp3_data)
mp3_audio = AudioSegment.from_file(mp3_file, format="mp3")
logger.debug("duration: " + str(mp3_audio.duration_seconds) + ", channels: " +
str(mp3_audio.channels) + ", sampling_width: " + str(mp3_audio.sample_width) +
", sampling rate: " + str(mp3_audio.frame_rate) + ", dbfs: " + str(mp3_audio.dBFS) )
mp3_audio = mp3_audio.set_channels(1)
mp3_audio = mp3_audio.set_sample_width(2)
seconds = mp3_audio.duration_seconds
max_duration = 600
if seconds > max_duration:
mp3_audio = mp3_audio[0:max_duration*1000]
array = mp3_audio.get_array_of_samples()
array = np.append(array, [int(mp3_audio.frame_rate)])
return array
def list_files(path, config):
if not "max_files" in config:
filenames = tf.data.Dataset.list_files(path, "*/*.wav", shuffle=False)
return filenames, [filename.numpy().decode("utf-8") for filename in filenames]
return list_blobs(path, int(config["max_files"]))
def list_blobs(path, max_files):
result = urlparse(path, allow_fragments=False)
logger.debug("Path: " + str(result))
files = list_blobs_with_prefix(result.netloc, result.path.lstrip("/"))
files = list(itertools.islice(files, max_files))
logger.debug("Found matching files under " + path + ": " + str(files))
filenames = [os.path.join("gs://" + result.netloc, filename) for filename in files]
return tf.data.Dataset.list_files(filenames, shuffle=False), filenames
def list_blobs_with_prefix(bucket_name, prefix, delimiter=None):
"""Lists all the blobs in the bucket that begin with the prefix.
This can be used to list all blobs in a "folder", e.g. "public/".
The delimiter argument can be used to restrict the results to only the
"files" in the given "folder". Without the delimiter, the entire tree under
the prefix is returned. For example, given these blobs:
a/1.txt
a/b/2.txt
If you specify prefix ='a/', without a delimiter, you'll get back:
a/1.txt
a/b/2.txt
However, if you specify prefix='a/' and delimiter='/', you'll get back
only the file directly under 'a/':
a/1.txt
As part of the response, you'll also get back a blobs.prefixes entity
that lists the "subfolders" under `a/`:
a/b/
"""
storage_client = storage.Client()
# Note: Client.list_blobs requires at least package version 1.17.0.
blobs = storage_client.list_blobs(bucket_name, prefix=prefix, delimiter=delimiter)
for blob in blobs:
logger.debug("Got file: " + str(blob.name))
if is_audio(blob.name):
yield blob.name
if delimiter:
for prefix in blobs.prefixes:
yield prefix
def is_audio(path):
base, extension = os.path.splitext(path)
if extension.strip() == ".mp3":
return True
return False
def load_model(config):
logger.debug("Loading model...")
weights = load_weights(config)
params = yamnet_params.Params()
yamnet = yamnet_model.yamnet_frames_model(params)
yamnet.load_weights(weights)
yamnet_classes = yamnet_model.class_names(os.path.join(os.path.dirname(__file__), "yamnet", "yamnet_class_map.csv"))
return yamnet, yamnet_classes, params
def load_weights(config):
download_path = "https://storage.googleapis.com/audioset/yamnet.h5"
target_path = "/tmp/yamnet/yamnet.h5"
download(download_path, target_path)
return target_path
def download(url, path):
logger.debug("Downloading from " + url + " to " + path)
directory = os.path.dirname(path)
os.makedirs(directory, exist_ok=True)
if not os.path.exists(path):
urllib.request.urlretrieve(url, path)
logger.debug("Download success")
def run_model_on_dataset(yamnet, classes, params, dataset, filenames, config):
with jsonlines.open(config["output_path"], mode='w') as writer:
for batch, filename in zip(dataset, filenames):
logger.debug(filename)
items = split_into_items(batch, config)
logger.debug("chunks" + str(len(items)))
for index, item in enumerate(items):
results = run_model_on_batch(yamnet, classes, params, item)
print_results(writer, filename, results, classes, index, config)
def split_into_items(pair, config):
batch = pair[:-1]
sr = int(pair[-1])
chunk_size = int(float(config["seconds_per_chunk"]) * sr)
array = batch.numpy()
sample_count = array.shape[-1]
logger.debug("total samples " + str(array.shape))
items = []
chunks = (sample_count + chunk_size - 1) // chunk_size
for chunk in range(chunks):
start = chunk * chunk_size
end = min((chunk+1) * chunk_size, sample_count)
items.append((array[start:end], sr))
return items
def print_results(writer, filename, results, yamnet_classes, index, config):
top, prediction = results
seconds = index * float(config["seconds_per_chunk"])
print(str(int(seconds // 60)) + ":" + str(int(seconds) % 60) + '\n'.join(' {:12s}: {:.3f}'.format(yamnet_classes[i], prediction[i])
for i in top[0:1]))
result = { "path" : filename, "seconds" : seconds }
for i in top:
result[yamnet_classes[i]] = float(prediction[i])
writer.write(result)
def run_model_on_batch(yamnet, classes, params, pair):
batch, sr = pair
waveform = batch / 32768.0 # Convert to [-1.0, +1.0]
# Convert to mono and the sample rate expected by YAMNet.
if len(waveform.shape) > 1:
waveform = np.mean(waveform, axis=1)
if sr != params.sample_rate:
waveform = resampy.resample(waveform, sr, params.sample_rate)
# Predict YAMNet classes.
scores, embeddings, spectrogram = yamnet(waveform)
# Scores is a matrix of (time_frames, num_classes) classifier scores.
# Average them along time to get an overall classifier output for the clip.
prediction = np.mean(scores, axis=0)
# Report the highest-scoring classes and their scores.
top = np.argsort(prediction)[::-1][:5]
return top, prediction
def setup_config(dictionary):
return config.ConfigurationSet(
config.config_from_env(prefix="MLCOMMONS"),
config.config_from_yaml(config_path(), read_from_file=True),
config.config_from_dict(dictionary),
)
def config_path():
home = os.path.expanduser("~")
home_config_path = os.path.join(home, ".mlcommons", "config.yaml")
if os.path.exists(home_config_path):
return home_config_path
return os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "config", "default.yaml")
def setup_logging(arguments):
logging_format = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
if arguments["verbose"]:
logging.basicConfig(level=logging.DEBUG, format=logging_format)
elif arguments["verbose_info"]:
logging.basicConfig(level=logging.INFO, format=logging_format)
else:
logging.basicConfig(level=logging.WARNING, format=logging_format)
root_logger = logging.getLogger()
if arguments["verbose"]:
root_logger.setLevel(logging.DEBUG)
elif arguments["verbose_info"]:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
logging.getLogger("numba.core.ssa").setLevel(logging.CRITICAL)
logging.getLogger("numba.core.byteflow").setLevel(logging.CRITICAL)
logging.getLogger("numba.core.interpreter").setLevel(logging.CRITICAL)
if __name__ == '__main__':
main() | 0.52902 | 0.145479 |
import time
from urllib.parse import urlparse
from Jumpscale import j
from .ZeroOSClient import ZeroOSClient
class ZeroOSFactory(j.application.JSBaseConfigsClass):
"""
"""
_CHILDCLASS = ZeroOSClient
__jslocation__ = "j.clients.zos"
def get_by_id(self, node_id):
directory = j.clients.threefold_directory.get()
node, resp = directory.api.GetCapacity(node_id)
resp.raise_for_status()
u = urlparse(node.robot_address)
node = self.get(node_id)
if node.host != u.hostname:
node.host = u.hostname
node.save()
return self.get(node_id)
def zero_node_ovh_install(self, OVHHostName, OVHClient, zerotierNetworkID, zerotierClient):
"""
OVHHostName is server name as known by OVH
get clients as follows:
- zerotierClient = j.clients.zerotier.get(instance='main', data={'data': ZT_API_TOKEN})
- OVHClient = j.clients.ovh.get(...)
"""
cl = OVHClient
self._log_debug("booting server {} to zero-os".format(OVHHostName))
task = cl.zero_os_boot(target=OVHHostName, zerotierNetworkID=zerotierNetworkID)
self._log_debug("waiting for {} to reboote".format(OVHHostName))
cl.server_wait_reboot(OVHHostName, task["taskId"])
ip_pub = cl.server_detail_get(OVHHostName)["ip"]
self._log_info("ip addr is:%s" % ip_pub)
while True:
try:
network = zerotierClient.get_network(network_id=zerotierNetworkID)
member = network.member_get(public_ip=ip_pub)
ipaddr_priv = member.private_ip
break
except RuntimeError as e:
# case where we don't find the member in zerotier
self._log_error(e)
time.sleep(1)
except IndexError as e:
# case were we the member doesn't have a private ip
self._log_error("please authorize the server with the public ip %s in the zerotier network" % ip_pub)
time.sleep(1)
self._log_debug("server found: %s" % member["id"])
self._log_debug("zerotier IP: %s" % ipaddr_priv)
return ip_pub, ipaddr_priv
def zero_node_packetnet_install(
self,
packetnetClient,
zerotierClient,
project_name,
plan_type,
location,
server_name,
zerotierNetworkID,
ipxe_base="https://bootstrap.grid.tf/ipxe/master",
):
"""
packetnetClient = j.clients.packetnet.get('TOKEN')
zerotierClient = j.clients.zerotier.get(instance='main', data={'token': 'TOKEN'})
project_name = packet.net project
plan_type: one of "Type 0", "Type 1", "Type 2" ,"Type 2A", "Type 3", "Type S"
location: one of "Amsterdam", "Tokyo", "Synnuvale", "Parsippany"
server_name: name of the server that is going to be created
zerotierNetworkID: zertotier network id
ipxe_base: change this to the version you want, use master branch by default
"""
valid_plan_types = ("Type 0", "Type 1", "Type 2", "Type 2A", "Type 3", "Type S") # FIXME
if plan_type not in valid_plan_types:
j.exceptions.Input("bad plan type %s. Valid plan type are %s" % (plan_type, ",".join(valid_plan_types)))
if zerotierNetworkID:
ipxe_url = "%s/%s" % (ipxe_base, zerotierNetworkID)
else:
ipxe_url = None
hostname = server_name
# find project id
project_ids = [project.id for project in packetnetClient.projects if project.name == project_name]
if not project_ids:
raise j.exceptions.NotFound("No projects found with name %s" % project_name)
project_id = project_ids[0]
packetnetClient.project_id = project_id
packetnetClient.startDevice(
hostname=server_name,
plan=plan_type,
facility=location,
os="ubuntu_17_04",
ipxeUrl=ipxe_url,
wait=True,
remove=False,
)
device = packetnetClient.getDevice(server_name)
ip_pub = [
netinfo["address"]
for netinfo in device.ip_addresses
if netinfo["public"] and netinfo["address_family"] == 4
]
while True:
try:
network = zerotierClient.get_network(network_id=zerotierNetworkID)
member = network.member_get(public_ip=ip_pub[0])
ipaddr_priv = member.private_ip
break
except RuntimeError as e:
# case where we don't find the member in zerotier
self._log_error(e)
time.sleep(1)
except IndexError as e:
# case were we the member doesn't have a private ip
self._log_error("please authorize the server with the public ip %s in the zerotier network" % ip_pub[0])
time.sleep(1)
self._log_debug("server found: %s" % device.id)
self._log_debug("zerotier IP: %s" % ipaddr_priv)
return ip_pub, ipaddr_priv
def get_from_itsyouonline(
self,
name="default",
iyo_instance="default",
iyo_organization=None,
host="localhost",
port=6379,
reset=False,
save=True,
):
"""
:param name: name for this instance
:param iyo_instance: the instance name of the IYO client you will use
:param iyo_organization: the organization in IYO to connect
:param host: addr of the zos
:param port: por tof the zos (for the redis protocol)
:param reset: to refresh you'r jwt
:param save: if the resulting client will be stored on your bcdb
:return:
"""
if iyo_organization is None:
raise RuntimeError("need to specify name of organization.")
jwt_name = j.core.text.strip_to_ascii_dense("zos_%s" % iyo_organization)
iyo = j.clients.itsyouonline.get(name=iyo_instance)
jwt = iyo.jwt_get(
name=jwt_name, scope="user:memberof:%s" % iyo_organization, reset=reset
) # there should be enough protection in here to refresh
# print(jwt)
cl = self.get(name=name, host=host, port=port, password=<PASSWORD>, ssl=True)
print(cl)
cl.ping()
if save:
cl.save()
return cl
def test(self):
"""
kosmos 'j.clients.zos.test()'
:return:
"""
cl = j.clients.zos.get_from_itsyouonline(
name="default", host="10.102.90.219", port=6379, iyo_organization="tf-production", reset=True
)
print(cl)
print(cl.ping())
# use j.clients.zoscmd... to start a local zos
# connect client to zos do quite some tests | Jumpscale/clients/zero_os/ZeroOSClientFactory.py | import time
from urllib.parse import urlparse
from Jumpscale import j
from .ZeroOSClient import ZeroOSClient
class ZeroOSFactory(j.application.JSBaseConfigsClass):
"""
"""
_CHILDCLASS = ZeroOSClient
__jslocation__ = "j.clients.zos"
def get_by_id(self, node_id):
directory = j.clients.threefold_directory.get()
node, resp = directory.api.GetCapacity(node_id)
resp.raise_for_status()
u = urlparse(node.robot_address)
node = self.get(node_id)
if node.host != u.hostname:
node.host = u.hostname
node.save()
return self.get(node_id)
def zero_node_ovh_install(self, OVHHostName, OVHClient, zerotierNetworkID, zerotierClient):
"""
OVHHostName is server name as known by OVH
get clients as follows:
- zerotierClient = j.clients.zerotier.get(instance='main', data={'data': ZT_API_TOKEN})
- OVHClient = j.clients.ovh.get(...)
"""
cl = OVHClient
self._log_debug("booting server {} to zero-os".format(OVHHostName))
task = cl.zero_os_boot(target=OVHHostName, zerotierNetworkID=zerotierNetworkID)
self._log_debug("waiting for {} to reboote".format(OVHHostName))
cl.server_wait_reboot(OVHHostName, task["taskId"])
ip_pub = cl.server_detail_get(OVHHostName)["ip"]
self._log_info("ip addr is:%s" % ip_pub)
while True:
try:
network = zerotierClient.get_network(network_id=zerotierNetworkID)
member = network.member_get(public_ip=ip_pub)
ipaddr_priv = member.private_ip
break
except RuntimeError as e:
# case where we don't find the member in zerotier
self._log_error(e)
time.sleep(1)
except IndexError as e:
# case were we the member doesn't have a private ip
self._log_error("please authorize the server with the public ip %s in the zerotier network" % ip_pub)
time.sleep(1)
self._log_debug("server found: %s" % member["id"])
self._log_debug("zerotier IP: %s" % ipaddr_priv)
return ip_pub, ipaddr_priv
def zero_node_packetnet_install(
self,
packetnetClient,
zerotierClient,
project_name,
plan_type,
location,
server_name,
zerotierNetworkID,
ipxe_base="https://bootstrap.grid.tf/ipxe/master",
):
"""
packetnetClient = j.clients.packetnet.get('TOKEN')
zerotierClient = j.clients.zerotier.get(instance='main', data={'token': 'TOKEN'})
project_name = packet.net project
plan_type: one of "Type 0", "Type 1", "Type 2" ,"Type 2A", "Type 3", "Type S"
location: one of "Amsterdam", "Tokyo", "Synnuvale", "Parsippany"
server_name: name of the server that is going to be created
zerotierNetworkID: zertotier network id
ipxe_base: change this to the version you want, use master branch by default
"""
valid_plan_types = ("Type 0", "Type 1", "Type 2", "Type 2A", "Type 3", "Type S") # FIXME
if plan_type not in valid_plan_types:
j.exceptions.Input("bad plan type %s. Valid plan type are %s" % (plan_type, ",".join(valid_plan_types)))
if zerotierNetworkID:
ipxe_url = "%s/%s" % (ipxe_base, zerotierNetworkID)
else:
ipxe_url = None
hostname = server_name
# find project id
project_ids = [project.id for project in packetnetClient.projects if project.name == project_name]
if not project_ids:
raise j.exceptions.NotFound("No projects found with name %s" % project_name)
project_id = project_ids[0]
packetnetClient.project_id = project_id
packetnetClient.startDevice(
hostname=server_name,
plan=plan_type,
facility=location,
os="ubuntu_17_04",
ipxeUrl=ipxe_url,
wait=True,
remove=False,
)
device = packetnetClient.getDevice(server_name)
ip_pub = [
netinfo["address"]
for netinfo in device.ip_addresses
if netinfo["public"] and netinfo["address_family"] == 4
]
while True:
try:
network = zerotierClient.get_network(network_id=zerotierNetworkID)
member = network.member_get(public_ip=ip_pub[0])
ipaddr_priv = member.private_ip
break
except RuntimeError as e:
# case where we don't find the member in zerotier
self._log_error(e)
time.sleep(1)
except IndexError as e:
# case were we the member doesn't have a private ip
self._log_error("please authorize the server with the public ip %s in the zerotier network" % ip_pub[0])
time.sleep(1)
self._log_debug("server found: %s" % device.id)
self._log_debug("zerotier IP: %s" % ipaddr_priv)
return ip_pub, ipaddr_priv
def get_from_itsyouonline(
self,
name="default",
iyo_instance="default",
iyo_organization=None,
host="localhost",
port=6379,
reset=False,
save=True,
):
"""
:param name: name for this instance
:param iyo_instance: the instance name of the IYO client you will use
:param iyo_organization: the organization in IYO to connect
:param host: addr of the zos
:param port: por tof the zos (for the redis protocol)
:param reset: to refresh you'r jwt
:param save: if the resulting client will be stored on your bcdb
:return:
"""
if iyo_organization is None:
raise RuntimeError("need to specify name of organization.")
jwt_name = j.core.text.strip_to_ascii_dense("zos_%s" % iyo_organization)
iyo = j.clients.itsyouonline.get(name=iyo_instance)
jwt = iyo.jwt_get(
name=jwt_name, scope="user:memberof:%s" % iyo_organization, reset=reset
) # there should be enough protection in here to refresh
# print(jwt)
cl = self.get(name=name, host=host, port=port, password=<PASSWORD>, ssl=True)
print(cl)
cl.ping()
if save:
cl.save()
return cl
def test(self):
"""
kosmos 'j.clients.zos.test()'
:return:
"""
cl = j.clients.zos.get_from_itsyouonline(
name="default", host="10.102.90.219", port=6379, iyo_organization="tf-production", reset=True
)
print(cl)
print(cl.ping())
# use j.clients.zoscmd... to start a local zos
# connect client to zos do quite some tests | 0.305905 | 0.112381 |
import matplotlib.pyplot as plt
import numpy as np
from getopt import gnu_getopt as getopt
import sys
# parse command-line arguments
try:
optlist,args = getopt(sys.argv[1:], ':', ['verbose'])
assert len(args) < 2
except (AssertionError):
sys.exit(__doc__)
if len(args) > 0:
fname = args[0]
else:
fname = 'STDOUT.0000'
numbername, varname='time_tsnumber', 'dynstat_eta_min'
#numbername, varname='seaice_tsnumber', 'seaice_heff_max'
#numbername, varname='seaice_tsnumber', 'seaice_hsnow_max'
niter0=464592
nsteps=48
tsnumber = range(niter0+nsteps,niter0,-1)
def get_parms (fname):
with open(fname) as f:
for line in f:
if 'nIter0 = /* Run starting timestep number */' in line:
nIter0 = int(next(f).strip().split()[-1])
elif 'nTimeSteps = /* Number of timesteps */' in line:
nTimeSteps = int(next(f).strip().split()[-1])
elif 'Model clock timestep' in line:
deltaT = float((next(f).strip().split()[-1]).replace('E','e'))
elif 'Monitor output interval' in line:
interval = float((next(f).strip().split()[-1]).replace('E','e'))
return nIter0, nTimeSteps, max(int(interval/deltaT),1)
niter0,nsteps,interval=get_parms(fname)
tsnumber = range(niter0+nsteps,niter0,-interval)
vars=[]
for k,mynumber in enumerate(tsnumber):
print(k,mynumber)
var = []
with open(fname) as fo:
for line in fo:
if numbername in line and str(mynumber) in line:
# print(line)
while True:
ll=fo.readline()
# print(ll)
if varname in ll:
var.append(float(
ll.strip().split()[-1].replace('E','e')))
break
# # skip two lines ...
# next(fo)
# next(fo)
# # ... and read the third, which is dynstat_eta_min
# ll=fo.readline().strip().split()
# var.append(float(ll[-1].replace('E','e')))
if len(var) < 4: break
vars.append(var)
# convert to numpy array
etamin = np.asarray(vars)
plt.clf();
plt.plot(tsnumber[:len(vars)],etamin[:,2],'+-',label='chklev 2')
plt.plot(tsnumber[:len(vars)],etamin[:,3],'-.',label='chklev 1')
plt.xlabel('timestep')
plt.ylabel(varname)
plt.legend()
plt.show()
#grep -A6 "(PID.TID 0000.0001) %MON time_tsnumber = 472740" STDOUT.0000 | grep eta_min | readtapemonitor.py | import matplotlib.pyplot as plt
import numpy as np
from getopt import gnu_getopt as getopt
import sys
# parse command-line arguments
try:
optlist,args = getopt(sys.argv[1:], ':', ['verbose'])
assert len(args) < 2
except (AssertionError):
sys.exit(__doc__)
if len(args) > 0:
fname = args[0]
else:
fname = 'STDOUT.0000'
numbername, varname='time_tsnumber', 'dynstat_eta_min'
#numbername, varname='seaice_tsnumber', 'seaice_heff_max'
#numbername, varname='seaice_tsnumber', 'seaice_hsnow_max'
niter0=464592
nsteps=48
tsnumber = range(niter0+nsteps,niter0,-1)
def get_parms (fname):
with open(fname) as f:
for line in f:
if 'nIter0 = /* Run starting timestep number */' in line:
nIter0 = int(next(f).strip().split()[-1])
elif 'nTimeSteps = /* Number of timesteps */' in line:
nTimeSteps = int(next(f).strip().split()[-1])
elif 'Model clock timestep' in line:
deltaT = float((next(f).strip().split()[-1]).replace('E','e'))
elif 'Monitor output interval' in line:
interval = float((next(f).strip().split()[-1]).replace('E','e'))
return nIter0, nTimeSteps, max(int(interval/deltaT),1)
niter0,nsteps,interval=get_parms(fname)
tsnumber = range(niter0+nsteps,niter0,-interval)
vars=[]
for k,mynumber in enumerate(tsnumber):
print(k,mynumber)
var = []
with open(fname) as fo:
for line in fo:
if numbername in line and str(mynumber) in line:
# print(line)
while True:
ll=fo.readline()
# print(ll)
if varname in ll:
var.append(float(
ll.strip().split()[-1].replace('E','e')))
break
# # skip two lines ...
# next(fo)
# next(fo)
# # ... and read the third, which is dynstat_eta_min
# ll=fo.readline().strip().split()
# var.append(float(ll[-1].replace('E','e')))
if len(var) < 4: break
vars.append(var)
# convert to numpy array
etamin = np.asarray(vars)
plt.clf();
plt.plot(tsnumber[:len(vars)],etamin[:,2],'+-',label='chklev 2')
plt.plot(tsnumber[:len(vars)],etamin[:,3],'-.',label='chklev 1')
plt.xlabel('timestep')
plt.ylabel(varname)
plt.legend()
plt.show()
#grep -A6 "(PID.TID 0000.0001) %MON time_tsnumber = 472740" STDOUT.0000 | grep eta_min | 0.1069 | 0.349783 |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import random
import numpy as np
from tensorflow.keras.layers import *
from tensorflow.keras.activations import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.initializers import *
from tensorflow.keras.callbacks import *
from DogsCats_Dataset_class import DOGSCATS
data = DOGSCATS()
data.data_augmentation(augment_size=5000)
data.data_preprocessing(preprocess_mode="MinMax")
x_train_splitted, x_val, y_train_splitted, y_val = data.get_splitted_train_validation_set()
x_train, y_train = data.get_train_set()
x_test, y_test = data.get_test_set()
num_classes = data.num_classes
# Define the CNN
def model_fn(optimizer, learning_rate, filter_block1, kernel_size_block1, filter_block2,
kernel_size_block2, filter_block3, kernel_size_block3, dense_layer_size,
kernel_initializer, bias_initializer):
# Input
input_img = Input(shape=x_train.shape[1:])
# Conv Block 1
x = Conv2D(filters=filter_block1, kernel_size=kernel_size_block1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(input_img)
x = Activation("relu")(x)
x = Conv2D(filters=filter_block1, kernel_size=kernel_size_block1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
# Conv Block 2
x = Conv2D(filters=filter_block2, kernel_size=kernel_size_block2, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = Activation("relu")(x)
x = Conv2D(filters=filter_block2, kernel_size=kernel_size_block2, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
# Conv Block 3
x = Conv2D(filters=filter_block3, kernel_size=kernel_size_block3, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = Activation("relu")(x)
x = Conv2D(filters=filter_block3, kernel_size=kernel_size_block3, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
# Dense Part
x = Flatten()(x)
x = Dense(units=dense_layer_size)(x)
x = Activation("relu")(x)
x = Dense(units=num_classes)(x)
y_pred = Activation("softmax")(x)
# Build the model
model = Model(inputs=[input_img], outputs=[y_pred])
opt = optimizer(learning_rate=learning_rate)
model.compile(
loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"])
return model
# Global params
epochs = 20
batch_size = 256
optimizer = Adam
learning_rate = 0.001
filter_block1 = 32
kernel_size_block1 = 3
filter_block2 = 64
kernel_size_block2 = 3
filter_block3 = 128
kernel_size_block3 = 3
dense_layer_size = 512
# GlorotUniform, GlorotNormal, RandomNormal, RandomUniform, VarianceScaling
kernel_initializer = 'GlorotUniform' # default-Wert
bias_initializer = 'zeros' # default-Wert
rand_model = model_fn(optimizer, learning_rate, filter_block1, kernel_size_block1, filter_block2,
kernel_size_block2, filter_block3, kernel_size_block3, dense_layer_size,
kernel_initializer, bias_initializer)
rand_model.fit(
x=x_train_splitted,
y=y_train_splitted,
verbose=1,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_val, y_val))
score = rand_model.evaluate(
x_test,
y_test,
verbose=0,
batch_size=batch_size)
print("Test performance: ", score) | 4_DeepLearning-Advanced/1-Gewichte_initialisieren.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import random
import numpy as np
from tensorflow.keras.layers import *
from tensorflow.keras.activations import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.initializers import *
from tensorflow.keras.callbacks import *
from DogsCats_Dataset_class import DOGSCATS
data = DOGSCATS()
data.data_augmentation(augment_size=5000)
data.data_preprocessing(preprocess_mode="MinMax")
x_train_splitted, x_val, y_train_splitted, y_val = data.get_splitted_train_validation_set()
x_train, y_train = data.get_train_set()
x_test, y_test = data.get_test_set()
num_classes = data.num_classes
# Define the CNN
def model_fn(optimizer, learning_rate, filter_block1, kernel_size_block1, filter_block2,
kernel_size_block2, filter_block3, kernel_size_block3, dense_layer_size,
kernel_initializer, bias_initializer):
# Input
input_img = Input(shape=x_train.shape[1:])
# Conv Block 1
x = Conv2D(filters=filter_block1, kernel_size=kernel_size_block1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(input_img)
x = Activation("relu")(x)
x = Conv2D(filters=filter_block1, kernel_size=kernel_size_block1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
# Conv Block 2
x = Conv2D(filters=filter_block2, kernel_size=kernel_size_block2, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = Activation("relu")(x)
x = Conv2D(filters=filter_block2, kernel_size=kernel_size_block2, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
# Conv Block 3
x = Conv2D(filters=filter_block3, kernel_size=kernel_size_block3, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = Activation("relu")(x)
x = Conv2D(filters=filter_block3, kernel_size=kernel_size_block3, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
# Dense Part
x = Flatten()(x)
x = Dense(units=dense_layer_size)(x)
x = Activation("relu")(x)
x = Dense(units=num_classes)(x)
y_pred = Activation("softmax")(x)
# Build the model
model = Model(inputs=[input_img], outputs=[y_pred])
opt = optimizer(learning_rate=learning_rate)
model.compile(
loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"])
return model
# Global params
epochs = 20
batch_size = 256
optimizer = Adam
learning_rate = 0.001
filter_block1 = 32
kernel_size_block1 = 3
filter_block2 = 64
kernel_size_block2 = 3
filter_block3 = 128
kernel_size_block3 = 3
dense_layer_size = 512
# GlorotUniform, GlorotNormal, RandomNormal, RandomUniform, VarianceScaling
kernel_initializer = 'GlorotUniform' # default-Wert
bias_initializer = 'zeros' # default-Wert
rand_model = model_fn(optimizer, learning_rate, filter_block1, kernel_size_block1, filter_block2,
kernel_size_block2, filter_block3, kernel_size_block3, dense_layer_size,
kernel_initializer, bias_initializer)
rand_model.fit(
x=x_train_splitted,
y=y_train_splitted,
verbose=1,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_val, y_val))
score = rand_model.evaluate(
x_test,
y_test,
verbose=0,
batch_size=batch_size)
print("Test performance: ", score) | 0.765681 | 0.459137 |
veiculo_lv = {}
class Veiculo:
#Cadastro do veiculo
def __init__(self,tipo_veiculo:str='c',modelo_veiculo:str='s',cor_veiculo:str='pt',placa_veiculo:str='r',tipo_combustivel:str='gc',potencia_motor:str='',ano_veiculo:str='2015',reserva:bool=False,nao_cadastrar:bool=False):
'''
***nao_cadastrar -> Informe "True" caso queria chamar o construtor mas não queira cadastrar nenhum veiculo***\n\n
tipo_veiculo -> Carro: "c" / Motocicleta: "m"\n
modelo_veiculo -> Carro: Esportiva: "e", Crossover: "c", Jipe: "j", Sedan: "s", Van: "v" / Motocicleta: Esportiva: "e", Trail: "t", Scooter: "s", Off-Road: "or"\n
cor_veiculo -> Preto: "pt", Branco: "br", Vermelho: "vm", Verde: "vd", Azul: "az", Amarelo: "am", Cinza: "cz"\n
placa_veiculo -> Ex: 1234-ABCD \ "r" para randomizar uma placa\n
tipo_combustivel -> Gasolina Comum: "gc", Gasolina aditivada: "ga", Gasolina Premium: "gp", Gasolina Formulada: "gf", Etanol Comum: "ec", Etanol Aditivado: "ea", Diesel Comum: "dc", Diesel Aditivado: "da", Biodiesel: "bd"\n
ano_veiculo -> Informar o ano do veiculo como int()\n
potencia_motor -> Potência medida em cavalos int()
'''
if nao_cadastrar == False:
import random as rd
import datetime as dt
_erro = False
__tipo_veiculo = {'c':'Carro','m':'Motocicleta'}
__mod_veic_carro = {'e':'Esportivo','c':'Crossover','j':'Jeep','s':'Sedan','v':'Van'}
__mod_veic_moto = {'e':'Esportivo','t':'Trail','s':'Scooter','or':'Off-Road'}
__cor_veiculo = {'pt':'Preto','br':'Branco','vm':'Vermelho','vd':'Verde','az':'Azul','am':'Amarelo','cz':'Cinza'}
__tipo_combustivel = {'gc':'Gasolina Comum','ga':'Gasolina aditivada','gp':'Gasolina Premium','gf':'Gasolina Formulada','ec':'Etanol Comum','ea':'Etanol Aditivado','dc':'Diesel Comum','da':'Diesel Aditivado','bd':'Biodiesel'}
self.t_v = tipo_veiculo.lower()
self.m_v = modelo_veiculo
self.c_v = cor_veiculo
self.p_v = placa_veiculo.upper()
self.t_c = tipo_combustivel
self.a_v = ano_veiculo
self.p_m = potencia_motor
#Verifica se o tipo de veiculo está correto
if self.t_v != '':
if self.t_v not in __tipo_veiculo.keys():
print('\033[1;31mTipo de veiculo não encontrado\033[m')
_erro = True
else:
self.t_v = __tipo_veiculo[self.t_v]
else:
self.t_v = __tipo_veiculo['c']
#Verifica se o modelo do veiculo está correto
if self.t_v == 'Carro':
if self.m_v != '':
if self.m_v not in __mod_veic_carro:
print('\033[1;31mModelo de carro não encontrado\033[m')
_erro = True
else:
self.m_v = __mod_veic_carro[self.m_v]
else:
self.m_v = __mod_veic_carro['s']
else:
if self.m_v != '':
if self.m_v not in __mod_veic_moto:
print('\033[1;31mModelo de moto não encontrado\033[m')
_erro = True
else:
self.m_v = __mod_veic_moto[self.m_v]
else:
self.m_v = __mod_veic_moto['t']
#Verifica se a cor do veiculo está correta
if self.c_v != '':
if self.c_v not in __cor_veiculo.keys():
print('\033[1;31mCor inválida\033[m')
_erro = True
else:
self.c_v = __cor_veiculo[self.c_v]
else:
self.c_v = __cor_veiculo['pt']
#Randomiza uma placa no formato "####-####" com caracteres numéricos e alfabéticos
if self.p_v == 'R' or self.p_v == '':
rand_placa = []
while len(rand_placa) < 9:
nxt_v = rd.randint(0,1)
if nxt_v == 0:
rand_char = rd.randint(0,9)
else:
rand_char = rd.choice(['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'])
if len(rand_placa) == 3:
rand_placa.append(str(rand_char))
rand_placa.append('-')
else:
rand_placa.append(str(rand_char))
self.p_v = ''.join(rand_placa)
#Verifica se a placa informada está correta
else:
if len(self.p_v) == 9:
if '-' in self.p_v and self.p_v.count('-') == 1:
for c in self.p_v:
if c.isalnum() == False and c != '-':
print('\033[1;31mPlaca inválida\033[m')
_erro = True
else:
print('\033[1;31mPlaca inválida\033[m')
_erro = True
else:
print('\033[1;31mPlaca inválida\033[m')
_erro = True
#Verifica se o tipo de combustível é válido
if self.t_c != '':
if self.t_c not in __tipo_combustivel.keys():
print('\033[1;31mTipo de gasolina inválida\033[m')
_erro = True
else:
self.t_c = __tipo_combustivel[self.t_c]
else:
self.t_c = __tipo_combustivel['gc']
#Verifica se o ano do veiculo é válido
if self.a_v != '':
if str(self.a_v).isnumeric() != True:
print('\033[1;31mAno do veiculo inválido\033[m')
_erro = True
else:
self.a_v = int(self.a_v)
if dt.date.today().year < self.a_v:
print('\033[1;31mAno do veiculo inválido\033[m')
_erro = True
else:
self.a_v = 2015
#Verifica se a potência do veiculo é válida
if self.p_m != '':
if str(self.p_m).isnumeric() != True:
print('\033[1;31mPotência do motor inválida\033[m')
_erro = True
else:
self.p_m = int(self.p_m)
if self.p_m <= 0:
print('\033[1;31mPotência do motor inválida\033[m')
_erro = True
else:
self.p_m = 110
#Insere o veiculo em um array de veiculos livres caso ele ainda não esteja cadastrado
if _erro == False:
__ = True
for plc in veiculo_lv.keys():
if plc == self.p_v:
__ = False
break
if __ != True:
print('\033[1;31mVeiculo já cadastrado\033[m')
else:
veiculo_lv[self.p_v] = (self.t_v,self.m_v,self.c_v,self.p_m,self.t_c,self.a_v)
else:
print('Erro no cadastro')
def estoque_veiculo(self):
'''
Mostra as informações de todos os veiculos\n
Informa quais carros estão disponíveis
'''
return veiculo_lv | CODE/Veiculo.py | veiculo_lv = {}
class Veiculo:
#Cadastro do veiculo
def __init__(self,tipo_veiculo:str='c',modelo_veiculo:str='s',cor_veiculo:str='pt',placa_veiculo:str='r',tipo_combustivel:str='gc',potencia_motor:str='',ano_veiculo:str='2015',reserva:bool=False,nao_cadastrar:bool=False):
'''
***nao_cadastrar -> Informe "True" caso queria chamar o construtor mas não queira cadastrar nenhum veiculo***\n\n
tipo_veiculo -> Carro: "c" / Motocicleta: "m"\n
modelo_veiculo -> Carro: Esportiva: "e", Crossover: "c", Jipe: "j", Sedan: "s", Van: "v" / Motocicleta: Esportiva: "e", Trail: "t", Scooter: "s", Off-Road: "or"\n
cor_veiculo -> Preto: "pt", Branco: "br", Vermelho: "vm", Verde: "vd", Azul: "az", Amarelo: "am", Cinza: "cz"\n
placa_veiculo -> Ex: 1234-ABCD \ "r" para randomizar uma placa\n
tipo_combustivel -> Gasolina Comum: "gc", Gasolina aditivada: "ga", Gasolina Premium: "gp", Gasolina Formulada: "gf", Etanol Comum: "ec", Etanol Aditivado: "ea", Diesel Comum: "dc", Diesel Aditivado: "da", Biodiesel: "bd"\n
ano_veiculo -> Informar o ano do veiculo como int()\n
potencia_motor -> Potência medida em cavalos int()
'''
if nao_cadastrar == False:
import random as rd
import datetime as dt
_erro = False
__tipo_veiculo = {'c':'Carro','m':'Motocicleta'}
__mod_veic_carro = {'e':'Esportivo','c':'Crossover','j':'Jeep','s':'Sedan','v':'Van'}
__mod_veic_moto = {'e':'Esportivo','t':'Trail','s':'Scooter','or':'Off-Road'}
__cor_veiculo = {'pt':'Preto','br':'Branco','vm':'Vermelho','vd':'Verde','az':'Azul','am':'Amarelo','cz':'Cinza'}
__tipo_combustivel = {'gc':'Gasolina Comum','ga':'Gasolina aditivada','gp':'Gasolina Premium','gf':'Gasolina Formulada','ec':'Etanol Comum','ea':'Etanol Aditivado','dc':'Diesel Comum','da':'Diesel Aditivado','bd':'Biodiesel'}
self.t_v = tipo_veiculo.lower()
self.m_v = modelo_veiculo
self.c_v = cor_veiculo
self.p_v = placa_veiculo.upper()
self.t_c = tipo_combustivel
self.a_v = ano_veiculo
self.p_m = potencia_motor
#Verifica se o tipo de veiculo está correto
if self.t_v != '':
if self.t_v not in __tipo_veiculo.keys():
print('\033[1;31mTipo de veiculo não encontrado\033[m')
_erro = True
else:
self.t_v = __tipo_veiculo[self.t_v]
else:
self.t_v = __tipo_veiculo['c']
#Verifica se o modelo do veiculo está correto
if self.t_v == 'Carro':
if self.m_v != '':
if self.m_v not in __mod_veic_carro:
print('\033[1;31mModelo de carro não encontrado\033[m')
_erro = True
else:
self.m_v = __mod_veic_carro[self.m_v]
else:
self.m_v = __mod_veic_carro['s']
else:
if self.m_v != '':
if self.m_v not in __mod_veic_moto:
print('\033[1;31mModelo de moto não encontrado\033[m')
_erro = True
else:
self.m_v = __mod_veic_moto[self.m_v]
else:
self.m_v = __mod_veic_moto['t']
#Verifica se a cor do veiculo está correta
if self.c_v != '':
if self.c_v not in __cor_veiculo.keys():
print('\033[1;31mCor inválida\033[m')
_erro = True
else:
self.c_v = __cor_veiculo[self.c_v]
else:
self.c_v = __cor_veiculo['pt']
#Randomiza uma placa no formato "####-####" com caracteres numéricos e alfabéticos
if self.p_v == 'R' or self.p_v == '':
rand_placa = []
while len(rand_placa) < 9:
nxt_v = rd.randint(0,1)
if nxt_v == 0:
rand_char = rd.randint(0,9)
else:
rand_char = rd.choice(['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'])
if len(rand_placa) == 3:
rand_placa.append(str(rand_char))
rand_placa.append('-')
else:
rand_placa.append(str(rand_char))
self.p_v = ''.join(rand_placa)
#Verifica se a placa informada está correta
else:
if len(self.p_v) == 9:
if '-' in self.p_v and self.p_v.count('-') == 1:
for c in self.p_v:
if c.isalnum() == False and c != '-':
print('\033[1;31mPlaca inválida\033[m')
_erro = True
else:
print('\033[1;31mPlaca inválida\033[m')
_erro = True
else:
print('\033[1;31mPlaca inválida\033[m')
_erro = True
#Verifica se o tipo de combustível é válido
if self.t_c != '':
if self.t_c not in __tipo_combustivel.keys():
print('\033[1;31mTipo de gasolina inválida\033[m')
_erro = True
else:
self.t_c = __tipo_combustivel[self.t_c]
else:
self.t_c = __tipo_combustivel['gc']
#Verifica se o ano do veiculo é válido
if self.a_v != '':
if str(self.a_v).isnumeric() != True:
print('\033[1;31mAno do veiculo inválido\033[m')
_erro = True
else:
self.a_v = int(self.a_v)
if dt.date.today().year < self.a_v:
print('\033[1;31mAno do veiculo inválido\033[m')
_erro = True
else:
self.a_v = 2015
#Verifica se a potência do veiculo é válida
if self.p_m != '':
if str(self.p_m).isnumeric() != True:
print('\033[1;31mPotência do motor inválida\033[m')
_erro = True
else:
self.p_m = int(self.p_m)
if self.p_m <= 0:
print('\033[1;31mPotência do motor inválida\033[m')
_erro = True
else:
self.p_m = 110
#Insere o veiculo em um array de veiculos livres caso ele ainda não esteja cadastrado
if _erro == False:
__ = True
for plc in veiculo_lv.keys():
if plc == self.p_v:
__ = False
break
if __ != True:
print('\033[1;31mVeiculo já cadastrado\033[m')
else:
veiculo_lv[self.p_v] = (self.t_v,self.m_v,self.c_v,self.p_m,self.t_c,self.a_v)
else:
print('Erro no cadastro')
def estoque_veiculo(self):
'''
Mostra as informações de todos os veiculos\n
Informa quais carros estão disponíveis
'''
return veiculo_lv | 0.086579 | 0.169543 |
from pathlib import Path
from pickle import dump, load, HIGHEST_PROTOCOL
from os.path import exists
from googleapiclient.discovery import build, HttpError
from src.spacy_utils import PatternNotFoundException
from src.semantic_sequence import SemanticSequence
SEARCH_REQ_FOLDER = '.searchRequests' # Savefolder of search results
class AllKeysReachLimit(Exception):
'''
Exception for daily search request limit is reached for all keys.
'''
class WebRequester:
'''
Abstract class for a textual search api.
'''
def search_for_patter(self, pattern: SemanticSequence, qualia_theorem: str) -> [str]:
'''
Will place theorem to clue of used by semantic_seq and
execute search request.
:param pattern: semantic_seq with clue to search
:param qualia_theorem: theorem that will be placed in clue of semantic_seq
:return: search requests for clue of semantic_seq with theorem
'''
raise NotImplementedError('Abstract Class WebRequester has been initiated')
def num_results(self, search_request: str) -> int:
'''
Return number of results for search_request.
:param search_request request
:return: number of results for request
'''
raise NotImplementedError('Abstract Class WebRequester has been initiated')
def num_results_near(self, word_1: str, word_2: str) -> int:
'''
Return number of results in which word_1 is near to word_2.
:param word_1: first word
:param word_2: second word
:return: number of results for word_1 near word_2
'''
raise NotImplementedError('Abstract Class WebRequester has been initiated')
def num_search_and(self, word_1: str, word_2: str) -> int:
'''
Return number of results containing word_1 and word_2.
:param word_1: first word
:param word_2: second word
:return: number of results containing word_1 and word_2
'''
raise NotImplementedError('Abstract Class WebRequester has been initiated')
class GoogleRequester(WebRequester):
'''
Implementation for the google json api. keys is a list of
(API key, Custom Search ID) tuples. key_idx reference current
key pair in list.
'''
def __init__(self, keys: [(str, str)]):
self.keys = keys
self.key_idx = 0
self.api_key = self.keys[self.key_idx][0]
self.cse_key = self.keys[self.key_idx][1]
self.service = build('customsearch', 'v1', developerKey=self.api_key)
def search_for_patter(self, pattern: SemanticSequence, qualia_theorem: str) -> [str]:
search_string = "\"{}\"".format(pattern.get_search_requests(qualia_theorem))
res = self._get_search_result(search_string)
found_items = []
if 'items' in res.keys():
for item in res['items']:
for column in ['snippet']:
if column in item:
found_items.append(item[column])
return found_items
def num_results(self, search_request: str):
res = self._get_search_result(search_request)
hits = int(res['searchInformation']['totalResults'])
return hits
def num_results_near(self, word_1: str, word_2: str):
return self.num_results('{} AROUND(10) {}'.format(word_1, word_2))
def num_search_and(self, word_1: str, word_2: str):
return self.num_results('{} {}'.format(word_1, word_2))
def _get_search_result(self, search_string: str):
'''
Will load search request from SEARCH_REQ_FOLDER if executed in the past
or execute search request and store results to SEARCH_REQ_FOLDER.
Automatically use next key from keyfile, if daily limit is reached.
:param search_string: search request
:raise AllKeysReachLimit If all combination reach the daily limit of 100
:return: search results.
'''
path = SEARCH_REQ_FOLDER + '/' + search_string
if exists(path):
with open(path, 'rb') as file:
res = load(file)
else:
res = None
while res is None:
try:
res = self.service.cse().list(q=search_string, cx=self.cse_key).execute()
with open(path, 'wb') as output:
dump(res, output, HIGHEST_PROTOCOL)
except HttpError as http_error:
self._change_key(http_error)
return res
def _change_key(self, http_error: HttpError):
'''
Increase key_idx to use next key.
:param http_error: raised by google api client
:raise AllKeysReachLimit if all keys reached limit
:return: None
'''
self.key_idx += 1
if self.key_idx > len(self.keys) - 1:
raise AllKeysReachLimit(http_error)
self.api_key = self.keys[self.key_idx][0]
self.cse_key = self.keys[self.key_idx][1]
self.service = build('customsearch', 'v1', developerKey=self.api_key)
def read_key_file(filepath: Path) -> [(str, str)]:
'''
Load key pairs (API key, Custom Search ID) from text file referenced
by filepath.
:param filepath: filepath of text file
:return: list of (API key, Custom Search ID) pairs
'''
keys = []
with open(filepath) as file:
keys += [tuple(line.split(' ')) for line in file.read().splitlines() if
line and not line.startswith('#')]
if len(keys) == 0:
raise AttributeError('File {} does not contain keys'.format(filepath))
return keys | src/requester.py | from pathlib import Path
from pickle import dump, load, HIGHEST_PROTOCOL
from os.path import exists
from googleapiclient.discovery import build, HttpError
from src.spacy_utils import PatternNotFoundException
from src.semantic_sequence import SemanticSequence
SEARCH_REQ_FOLDER = '.searchRequests' # Savefolder of search results
class AllKeysReachLimit(Exception):
'''
Exception for daily search request limit is reached for all keys.
'''
class WebRequester:
'''
Abstract class for a textual search api.
'''
def search_for_patter(self, pattern: SemanticSequence, qualia_theorem: str) -> [str]:
'''
Will place theorem to clue of used by semantic_seq and
execute search request.
:param pattern: semantic_seq with clue to search
:param qualia_theorem: theorem that will be placed in clue of semantic_seq
:return: search requests for clue of semantic_seq with theorem
'''
raise NotImplementedError('Abstract Class WebRequester has been initiated')
def num_results(self, search_request: str) -> int:
'''
Return number of results for search_request.
:param search_request request
:return: number of results for request
'''
raise NotImplementedError('Abstract Class WebRequester has been initiated')
def num_results_near(self, word_1: str, word_2: str) -> int:
'''
Return number of results in which word_1 is near to word_2.
:param word_1: first word
:param word_2: second word
:return: number of results for word_1 near word_2
'''
raise NotImplementedError('Abstract Class WebRequester has been initiated')
def num_search_and(self, word_1: str, word_2: str) -> int:
'''
Return number of results containing word_1 and word_2.
:param word_1: first word
:param word_2: second word
:return: number of results containing word_1 and word_2
'''
raise NotImplementedError('Abstract Class WebRequester has been initiated')
class GoogleRequester(WebRequester):
'''
Implementation for the google json api. keys is a list of
(API key, Custom Search ID) tuples. key_idx reference current
key pair in list.
'''
def __init__(self, keys: [(str, str)]):
self.keys = keys
self.key_idx = 0
self.api_key = self.keys[self.key_idx][0]
self.cse_key = self.keys[self.key_idx][1]
self.service = build('customsearch', 'v1', developerKey=self.api_key)
def search_for_patter(self, pattern: SemanticSequence, qualia_theorem: str) -> [str]:
search_string = "\"{}\"".format(pattern.get_search_requests(qualia_theorem))
res = self._get_search_result(search_string)
found_items = []
if 'items' in res.keys():
for item in res['items']:
for column in ['snippet']:
if column in item:
found_items.append(item[column])
return found_items
def num_results(self, search_request: str):
res = self._get_search_result(search_request)
hits = int(res['searchInformation']['totalResults'])
return hits
def num_results_near(self, word_1: str, word_2: str):
return self.num_results('{} AROUND(10) {}'.format(word_1, word_2))
def num_search_and(self, word_1: str, word_2: str):
return self.num_results('{} {}'.format(word_1, word_2))
def _get_search_result(self, search_string: str):
'''
Will load search request from SEARCH_REQ_FOLDER if executed in the past
or execute search request and store results to SEARCH_REQ_FOLDER.
Automatically use next key from keyfile, if daily limit is reached.
:param search_string: search request
:raise AllKeysReachLimit If all combination reach the daily limit of 100
:return: search results.
'''
path = SEARCH_REQ_FOLDER + '/' + search_string
if exists(path):
with open(path, 'rb') as file:
res = load(file)
else:
res = None
while res is None:
try:
res = self.service.cse().list(q=search_string, cx=self.cse_key).execute()
with open(path, 'wb') as output:
dump(res, output, HIGHEST_PROTOCOL)
except HttpError as http_error:
self._change_key(http_error)
return res
def _change_key(self, http_error: HttpError):
'''
Increase key_idx to use next key.
:param http_error: raised by google api client
:raise AllKeysReachLimit if all keys reached limit
:return: None
'''
self.key_idx += 1
if self.key_idx > len(self.keys) - 1:
raise AllKeysReachLimit(http_error)
self.api_key = self.keys[self.key_idx][0]
self.cse_key = self.keys[self.key_idx][1]
self.service = build('customsearch', 'v1', developerKey=self.api_key)
def read_key_file(filepath: Path) -> [(str, str)]:
'''
Load key pairs (API key, Custom Search ID) from text file referenced
by filepath.
:param filepath: filepath of text file
:return: list of (API key, Custom Search ID) pairs
'''
keys = []
with open(filepath) as file:
keys += [tuple(line.split(' ')) for line in file.read().splitlines() if
line and not line.startswith('#')]
if len(keys) == 0:
raise AttributeError('File {} does not contain keys'.format(filepath))
return keys | 0.566738 | 0.296565 |
import pandas as pd
import re
from jinja2 import Environment, FileSystemLoader, select_autoescape
env = Environment(
loader=FileSystemLoader('templates'),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template("index.html")
the_index = pd.read_csv('annotation_index.csv').dropna()
class_names = list(the_index['class'])
groups = []
for group, group_index in the_index.groupby('group'):
rows = []
for i in group_index.itertuples():
claz = i[2] # "class"
png_href = i.image
page_href = re.sub(r'\.png','.html',png_href)
jpg_file = re.sub(r'.*/(\w+)\.png',r'\1.jpg',png_href)
jpg_path = f'images/{claz}/{jpg_file}'
rows.append({
'class_name': claz,
'img_src': jpg_path
})
group_class_names = [row['class_name'] for row in rows]
n = 4
rows = [rows[i:i + n] for i in range(0, len(rows), n)]
groups.append({
'name': group,
'class_names': group_class_names,
'rows': rows,
})
group_names = [group['name'] for group in groups]
context = {
'is_gallery': True,
'group_names': group_names,
'class_names': class_names,
'groups': groups
}
with open('html/index.html','w') as fout:
print(template.render(context), file=fout)
the_pages = pd.read_csv('wiki.csv')
for class_name, sdf in the_pages.groupby('class'):
with open(f'html/{class_name}.html', 'w') as fout:
images = []
for png_href in sdf['image']:
html_href = re.sub(r'\.png','.html',png_href)
pid = re.sub(r'.*/(\w+)\.png',r'\1',png_href)
jpg_file = f'images/{class_name}/{pid}.jpg'
images.append({
'pid': pid,
'src': jpg_file,
'href': html_href,
})
context = {
'is_gallery': False,
'class_name': class_name,
'group_names': group_names,
'class_names': class_names,
'images': images,
}
print(template.render(context), file=fout) | index.py | import pandas as pd
import re
from jinja2 import Environment, FileSystemLoader, select_autoescape
env = Environment(
loader=FileSystemLoader('templates'),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template("index.html")
the_index = pd.read_csv('annotation_index.csv').dropna()
class_names = list(the_index['class'])
groups = []
for group, group_index in the_index.groupby('group'):
rows = []
for i in group_index.itertuples():
claz = i[2] # "class"
png_href = i.image
page_href = re.sub(r'\.png','.html',png_href)
jpg_file = re.sub(r'.*/(\w+)\.png',r'\1.jpg',png_href)
jpg_path = f'images/{claz}/{jpg_file}'
rows.append({
'class_name': claz,
'img_src': jpg_path
})
group_class_names = [row['class_name'] for row in rows]
n = 4
rows = [rows[i:i + n] for i in range(0, len(rows), n)]
groups.append({
'name': group,
'class_names': group_class_names,
'rows': rows,
})
group_names = [group['name'] for group in groups]
context = {
'is_gallery': True,
'group_names': group_names,
'class_names': class_names,
'groups': groups
}
with open('html/index.html','w') as fout:
print(template.render(context), file=fout)
the_pages = pd.read_csv('wiki.csv')
for class_name, sdf in the_pages.groupby('class'):
with open(f'html/{class_name}.html', 'w') as fout:
images = []
for png_href in sdf['image']:
html_href = re.sub(r'\.png','.html',png_href)
pid = re.sub(r'.*/(\w+)\.png',r'\1',png_href)
jpg_file = f'images/{class_name}/{pid}.jpg'
images.append({
'pid': pid,
'src': jpg_file,
'href': html_href,
})
context = {
'is_gallery': False,
'class_name': class_name,
'group_names': group_names,
'class_names': class_names,
'images': images,
}
print(template.render(context), file=fout) | 0.157752 | 0.154567 |
from unittest import mock
import io
import pytest
from django.core.management import call_command
from great_components.janitor.management.commands import helpers, settings_shake
@pytest.fixture(autouse=True)
def mock_get_settings_source_code():
patched = mock.patch.object(
helpers,
'get_settings_source_code',
return_value='EXAMPLE_A = env.bool("EXAMPLE_A")'
)
yield patched.start()
patched.stop()
@pytest.fixture(autouse=True)
def mock_client():
patched = mock.patch('hvac.Client', mock.Mock(is_authenticated=True))
yield patched.start()
patched.stop()
@pytest.fixture(autouse=True)
def mock_get_secrets():
patched = mock.patch.object(
helpers,
'get_secrets',
return_value={'EXAMPLE_A': True, 'EXAMPLE_B': False}
)
yield patched.start()
patched.stop()
@pytest.fixture(autouse=True)
def mock_vulture():
mock_vulture = mock.Mock()
mock_vulture().report.return_value = ['FOO_BAR']
patched = mock.patch.object(helpers, 'Vulture', mock_vulture)
yield patched.start()
patched.stop()
def test_settings_shake_obsolete(settings):
out = io.StringIO()
call_command(
'settings_shake',
project='example-project',
environment='example-environment',
token='secret-token',
domain='example.com',
stdout=out
)
out.seek(0)
result = out.read()
assert 'EXAMPLE_A' not in result
assert 'EXAMPLE_B' in result
def test_settings_shake_redundant(settings):
settings.ADMINS = []
out = io.StringIO()
call_command(
'settings_shake',
project='example-project',
environment='example-environment',
token='secret-token',
domain='example.com',
stdout=out
)
out.seek(0)
result = out.read()
assert 'ADMINS' in result
def test_settings_shake_unused(settings):
out = io.StringIO()
call_command(
'settings_shake',
project='example-project',
environment='example-environment',
token='secret-token',
domain='example.com',
stdout=out
)
out.seek(0)
result = out.read()
assert 'FOO_BAR' in result
@mock.patch.object(helpers, 'get_secrets_wizard')
def test_obsolete_wizard(mock_get_secrets_wizard, settings):
settings.EXAMPLE_A = True
mock_get_secrets_wizard.return_value = {'EXAMPLE_A': True, 'EXAMPLE_B': False}
out = io.StringIO()
call_command(
'settings_shake',
wizard=True,
token='secret-token',
domain='example.com',
stdout=out
)
out.seek(0)
result = out.read()
assert 'EXAMPLE_A' not in result
assert 'EXAMPLE_B' in result
@mock.patch.object(settings_shake.Command, 'report_obsolete_vault_entries', mock.Mock(return_value=[]))
@mock.patch.object(settings_shake.Command, 'report_unused_settings', mock.Mock(return_value=[]))
@mock.patch.object(settings_shake.Command, 'report_redundant_settings', mock.Mock(return_value=[]))
def test_settings_shake_ok(settings):
out = io.StringIO()
call_command(
'settings_shake',
project='example-project',
environment='example-environment',
token='secret-token',
domain='example.com',
stdout=out
)
out.seek(0)
result = out.read()
assert 'No obsolete vault entries found.' in result
assert 'No unused settings found.' in result
assert 'No redundant settings found.' in result | tests/janitor/management/commands/test_settings_shake.py | from unittest import mock
import io
import pytest
from django.core.management import call_command
from great_components.janitor.management.commands import helpers, settings_shake
@pytest.fixture(autouse=True)
def mock_get_settings_source_code():
patched = mock.patch.object(
helpers,
'get_settings_source_code',
return_value='EXAMPLE_A = env.bool("EXAMPLE_A")'
)
yield patched.start()
patched.stop()
@pytest.fixture(autouse=True)
def mock_client():
patched = mock.patch('hvac.Client', mock.Mock(is_authenticated=True))
yield patched.start()
patched.stop()
@pytest.fixture(autouse=True)
def mock_get_secrets():
patched = mock.patch.object(
helpers,
'get_secrets',
return_value={'EXAMPLE_A': True, 'EXAMPLE_B': False}
)
yield patched.start()
patched.stop()
@pytest.fixture(autouse=True)
def mock_vulture():
mock_vulture = mock.Mock()
mock_vulture().report.return_value = ['FOO_BAR']
patched = mock.patch.object(helpers, 'Vulture', mock_vulture)
yield patched.start()
patched.stop()
def test_settings_shake_obsolete(settings):
out = io.StringIO()
call_command(
'settings_shake',
project='example-project',
environment='example-environment',
token='secret-token',
domain='example.com',
stdout=out
)
out.seek(0)
result = out.read()
assert 'EXAMPLE_A' not in result
assert 'EXAMPLE_B' in result
def test_settings_shake_redundant(settings):
settings.ADMINS = []
out = io.StringIO()
call_command(
'settings_shake',
project='example-project',
environment='example-environment',
token='secret-token',
domain='example.com',
stdout=out
)
out.seek(0)
result = out.read()
assert 'ADMINS' in result
def test_settings_shake_unused(settings):
out = io.StringIO()
call_command(
'settings_shake',
project='example-project',
environment='example-environment',
token='secret-token',
domain='example.com',
stdout=out
)
out.seek(0)
result = out.read()
assert 'FOO_BAR' in result
@mock.patch.object(helpers, 'get_secrets_wizard')
def test_obsolete_wizard(mock_get_secrets_wizard, settings):
settings.EXAMPLE_A = True
mock_get_secrets_wizard.return_value = {'EXAMPLE_A': True, 'EXAMPLE_B': False}
out = io.StringIO()
call_command(
'settings_shake',
wizard=True,
token='secret-token',
domain='example.com',
stdout=out
)
out.seek(0)
result = out.read()
assert 'EXAMPLE_A' not in result
assert 'EXAMPLE_B' in result
@mock.patch.object(settings_shake.Command, 'report_obsolete_vault_entries', mock.Mock(return_value=[]))
@mock.patch.object(settings_shake.Command, 'report_unused_settings', mock.Mock(return_value=[]))
@mock.patch.object(settings_shake.Command, 'report_redundant_settings', mock.Mock(return_value=[]))
def test_settings_shake_ok(settings):
out = io.StringIO()
call_command(
'settings_shake',
project='example-project',
environment='example-environment',
token='secret-token',
domain='example.com',
stdout=out
)
out.seek(0)
result = out.read()
assert 'No obsolete vault entries found.' in result
assert 'No unused settings found.' in result
assert 'No redundant settings found.' in result | 0.641647 | 0.359027 |
from django import VERSION
from django.db.models.signals import post_save, pre_save
def reset_instance(instance, *args, **kwargs):
"""
Called on the post_save signal. Calls the instance's _reset_state method
"""
instance._reset_state()
class DirtyFieldsMixin(object):
"""
Gives dirty field tracking ability to models, also implements a save_dirty
method which updates only the dirty fields using QuerySet.update - useful
for multi-process or multi-worker setups where save() will actually update
all fields, potentially overriding changes by other workers while the
current worker has the object open.
"""
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
dispatch_uid = '%s-DirtyFieldsMixin-sweeper' % self.__class__.__name__
post_save.connect(reset_instance, sender=self.__class__,
dispatch_uid=dispatch_uid)
self._reset_state()
def _reset_state(self, *args, **kwargs):
self._original_state = self._as_dict()
def _as_dict(self):
# For relations, saves all fk values too so that we can update fk by
# id, e.g. obj.foreignkey_id = 4
if self._deferred:
return {}
return dict([(f.name, f.to_python(getattr(self, f.attname))) for f in self._meta.local_fields])
def get_changed_values(self):
return dict([(field, getattr(self, field)) for field in self.dirty_fields])
@property
def dirty_fields(self):
"""
Returns a list of keys that have changed
"""
if self._deferred:
raise TypeError('Cant be used with deferred objects')
new_state = self._as_dict()
return tuple(k for k, v in self._original_state.iteritems() if v != new_state[k])
@property
def is_dirty(self):
if self._state.adding:
return True
return bool(self.dirty_fields)
def save_dirty(self):
"""
An alternative to save, instead writing every field again, only updates
the dirty fields via QuerySet.update
"""
if not self.pk:
self.save()
updated = 1
else:
changed_values = self.get_changed_values()
if len(changed_values.keys()) == 0:
return False
pre_save.send(sender=self.__class__, instance=self)
# Detect if updating relationship field_ids directly
# If related field object itself has changed then the field_id
# also changes, in which case we detect and ignore the field_id
# change, otherwise we'll reload the object again later unnecessarily
rel_fields = dict([(f.column, f) for f in self._meta.fields if f.rel])
updated_rel_ids = []
for field_name in changed_values.keys():
if field_name in rel_fields.keys():
rel_field = rel_fields[field_name]
value = changed_values[rel_field.column]
obj_value = getattr(self, rel_field.name).pk
del changed_values[rel_field.column]
changed_values[rel_field.name] = value
if value != obj_value:
updated_rel_ids.append(rel_field.column)
# Maps db column names back to field names if they differ
field_map = dict([(f.column, f.name) for f in self._meta.fields if f.db_column])
for field_from, field_to in field_map.iteritems():
if field_from in changed_values:
changed_values[field_to] = changed_values[field_from]
del changed_values[field_from]
updated = self.__class__.objects.filter(pk=self.pk).update(**changed_values)
# Reload updated relationships
for field_name in updated_rel_ids:
field = rel_fields[field_name]
field_pk = getattr(self, field_name)
rel_obj = field.related.parent_model.objects.get(pk=field_pk)
setattr(self, field.name, rel_obj)
self._reset_state()
post_save.send(sender=self.__class__, instance=self, created=False)
return updated == 1
# Django 1.5 added support for updating only specified fields, this fails in
# older versions.
if VERSION >= (1, 5):
def save(self, *args, **kwargs):
if not self._state.adding:
kwargs['update_fields'] = self.dirty_fields
return super(DirtyFieldsMixin, self).save(*args, **kwargs)
DirtyFieldsMixin.save = save | dirtyfields/dirtyfields.py | from django import VERSION
from django.db.models.signals import post_save, pre_save
def reset_instance(instance, *args, **kwargs):
"""
Called on the post_save signal. Calls the instance's _reset_state method
"""
instance._reset_state()
class DirtyFieldsMixin(object):
"""
Gives dirty field tracking ability to models, also implements a save_dirty
method which updates only the dirty fields using QuerySet.update - useful
for multi-process or multi-worker setups where save() will actually update
all fields, potentially overriding changes by other workers while the
current worker has the object open.
"""
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
dispatch_uid = '%s-DirtyFieldsMixin-sweeper' % self.__class__.__name__
post_save.connect(reset_instance, sender=self.__class__,
dispatch_uid=dispatch_uid)
self._reset_state()
def _reset_state(self, *args, **kwargs):
self._original_state = self._as_dict()
def _as_dict(self):
# For relations, saves all fk values too so that we can update fk by
# id, e.g. obj.foreignkey_id = 4
if self._deferred:
return {}
return dict([(f.name, f.to_python(getattr(self, f.attname))) for f in self._meta.local_fields])
def get_changed_values(self):
return dict([(field, getattr(self, field)) for field in self.dirty_fields])
@property
def dirty_fields(self):
"""
Returns a list of keys that have changed
"""
if self._deferred:
raise TypeError('Cant be used with deferred objects')
new_state = self._as_dict()
return tuple(k for k, v in self._original_state.iteritems() if v != new_state[k])
@property
def is_dirty(self):
if self._state.adding:
return True
return bool(self.dirty_fields)
def save_dirty(self):
"""
An alternative to save, instead writing every field again, only updates
the dirty fields via QuerySet.update
"""
if not self.pk:
self.save()
updated = 1
else:
changed_values = self.get_changed_values()
if len(changed_values.keys()) == 0:
return False
pre_save.send(sender=self.__class__, instance=self)
# Detect if updating relationship field_ids directly
# If related field object itself has changed then the field_id
# also changes, in which case we detect and ignore the field_id
# change, otherwise we'll reload the object again later unnecessarily
rel_fields = dict([(f.column, f) for f in self._meta.fields if f.rel])
updated_rel_ids = []
for field_name in changed_values.keys():
if field_name in rel_fields.keys():
rel_field = rel_fields[field_name]
value = changed_values[rel_field.column]
obj_value = getattr(self, rel_field.name).pk
del changed_values[rel_field.column]
changed_values[rel_field.name] = value
if value != obj_value:
updated_rel_ids.append(rel_field.column)
# Maps db column names back to field names if they differ
field_map = dict([(f.column, f.name) for f in self._meta.fields if f.db_column])
for field_from, field_to in field_map.iteritems():
if field_from in changed_values:
changed_values[field_to] = changed_values[field_from]
del changed_values[field_from]
updated = self.__class__.objects.filter(pk=self.pk).update(**changed_values)
# Reload updated relationships
for field_name in updated_rel_ids:
field = rel_fields[field_name]
field_pk = getattr(self, field_name)
rel_obj = field.related.parent_model.objects.get(pk=field_pk)
setattr(self, field.name, rel_obj)
self._reset_state()
post_save.send(sender=self.__class__, instance=self, created=False)
return updated == 1
# Django 1.5 added support for updating only specified fields, this fails in
# older versions.
if VERSION >= (1, 5):
def save(self, *args, **kwargs):
if not self._state.adding:
kwargs['update_fields'] = self.dirty_fields
return super(DirtyFieldsMixin, self).save(*args, **kwargs)
DirtyFieldsMixin.save = save | 0.757256 | 0.176246 |
from osmclient.common import utils
from osmclient.common import wait as WaitForStatus
from osmclient.common.exceptions import ClientException
from osmclient.common.exceptions import NotFound
import json
class SdnController(object):
def __init__(self, http=None, client=None):
self._http = http
self._client = client
self._apiName = '/admin'
self._apiVersion = '/v1'
self._apiResource = '/sdns'
self._apiBase = '{}{}{}'.format(self._apiName,
self._apiVersion, self._apiResource)
# SDNC '--wait' option
def _wait(self, id, deleteFlag=False):
# Endpoint to get operation status
apiUrlStatus = '{}{}{}'.format(self._apiName, self._apiVersion, '/sdns')
# Wait for status for SDN instance creation/update/deletion
WaitForStatus.wait_for_status(
'SDNC',
str(id),
WaitForStatus.TIMEOUT_SDNC_OPERATION,
apiUrlStatus,
self._http.get2_cmd,
deleteFlag=deleteFlag)
def _get_id_for_wait(self, name):
# Returns id of name, or the id itself if given as argument
for sdnc in self.list():
if name == sdnc['_id']:
return sdnc['_id']
for sdnc in self.list():
if name == sdnc['name']:
return sdnc['_id']
return ''
def create(self, name, sdn_controller, wait=False):
http_code, resp = self._http.post_cmd(endpoint=self._apiBase,
postfields_dict=sdn_controller)
#print 'HTTP CODE: {}'.format(http_code)
#print 'RESP: {}'.format(resp)
if http_code in (200, 201, 202, 204):
if resp:
resp = json.loads(resp)
if not resp or 'id' not in resp:
raise ClientException('unexpected response from server - {}'.format(
resp))
if wait:
# Wait for status for SDNC instance creation
self._wait(resp.get('id'))
print(resp['id'])
else:
msg = ""
if resp:
try:
msg = json.loads(resp)
except ValueError:
msg = resp
raise ClientException("failed to create SDN controller {} - {}".format(name, msg))
def update(self, name, sdn_controller, wait=False):
sdnc = self.get(name)
sdnc_id_for_wait = self._get_id_for_wait(name)
http_code, resp = self._http.put_cmd(endpoint='{}/{}'.format(self._apiBase,sdnc['_id']),
postfields_dict=sdn_controller)
# print 'HTTP CODE: {}'.format(http_code)
# print 'RESP: {}'.format(resp)
if http_code in (200, 201, 202, 204):
if wait:
# In this case, 'resp' always returns None, so 'resp['id']' cannot be used.
# Use the previously obtained id instead.
wait_id = sdnc_id_for_wait
# Wait for status for VI instance update
self._wait(wait_id)
else:
pass
else:
msg = ""
if resp:
try:
msg = json.loads(resp)
except ValueError:
msg = resp
raise ClientException("failed to update SDN controller {} - {}".format(name, msg))
def delete(self, name, force=False, wait=False):
sdn_controller = self.get(name)
sdnc_id_for_wait = self._get_id_for_wait(name)
querystring = ''
if force:
querystring = '?FORCE=True'
http_code, resp = self._http.delete_cmd('{}/{}{}'.format(self._apiBase,
sdn_controller['_id'], querystring))
#print 'HTTP CODE: {}'.format(http_code)
#print 'RESP: {}'.format(resp)
if http_code == 202:
if wait:
# Wait for status for SDNC instance deletion
self._wait(sdnc_id_for_wait, deleteFlag=True)
else:
print('Deletion in progress')
elif http_code == 204:
print('Deleted')
elif resp and 'result' in resp:
print('Deleted')
else:
msg = ""
if resp:
try:
msg = json.loads(resp)
except ValueError:
msg = resp
raise ClientException("failed to delete SDN controller {} - {}".format(name, msg))
def list(self, filter=None):
"""Returns a list of SDN controllers
"""
filter_string = ''
if filter:
filter_string = '?{}'.format(filter)
resp = self._http.get_cmd('{}{}'.format(self._apiBase,filter_string))
#print 'RESP: {}'.format(resp)
if resp:
return resp
return list()
def get(self, name):
"""Returns an SDN controller based on name or id
"""
if utils.validate_uuid4(name):
for sdnc in self.list():
if name == sdnc['_id']:
return sdnc
else:
for sdnc in self.list():
if name == sdnc['name']:
return sdnc
raise NotFound("SDN controller {} not found".format(name)) | src/osmclient/sol005/sdncontroller.py | from osmclient.common import utils
from osmclient.common import wait as WaitForStatus
from osmclient.common.exceptions import ClientException
from osmclient.common.exceptions import NotFound
import json
class SdnController(object):
def __init__(self, http=None, client=None):
self._http = http
self._client = client
self._apiName = '/admin'
self._apiVersion = '/v1'
self._apiResource = '/sdns'
self._apiBase = '{}{}{}'.format(self._apiName,
self._apiVersion, self._apiResource)
# SDNC '--wait' option
def _wait(self, id, deleteFlag=False):
# Endpoint to get operation status
apiUrlStatus = '{}{}{}'.format(self._apiName, self._apiVersion, '/sdns')
# Wait for status for SDN instance creation/update/deletion
WaitForStatus.wait_for_status(
'SDNC',
str(id),
WaitForStatus.TIMEOUT_SDNC_OPERATION,
apiUrlStatus,
self._http.get2_cmd,
deleteFlag=deleteFlag)
def _get_id_for_wait(self, name):
# Returns id of name, or the id itself if given as argument
for sdnc in self.list():
if name == sdnc['_id']:
return sdnc['_id']
for sdnc in self.list():
if name == sdnc['name']:
return sdnc['_id']
return ''
def create(self, name, sdn_controller, wait=False):
http_code, resp = self._http.post_cmd(endpoint=self._apiBase,
postfields_dict=sdn_controller)
#print 'HTTP CODE: {}'.format(http_code)
#print 'RESP: {}'.format(resp)
if http_code in (200, 201, 202, 204):
if resp:
resp = json.loads(resp)
if not resp or 'id' not in resp:
raise ClientException('unexpected response from server - {}'.format(
resp))
if wait:
# Wait for status for SDNC instance creation
self._wait(resp.get('id'))
print(resp['id'])
else:
msg = ""
if resp:
try:
msg = json.loads(resp)
except ValueError:
msg = resp
raise ClientException("failed to create SDN controller {} - {}".format(name, msg))
def update(self, name, sdn_controller, wait=False):
sdnc = self.get(name)
sdnc_id_for_wait = self._get_id_for_wait(name)
http_code, resp = self._http.put_cmd(endpoint='{}/{}'.format(self._apiBase,sdnc['_id']),
postfields_dict=sdn_controller)
# print 'HTTP CODE: {}'.format(http_code)
# print 'RESP: {}'.format(resp)
if http_code in (200, 201, 202, 204):
if wait:
# In this case, 'resp' always returns None, so 'resp['id']' cannot be used.
# Use the previously obtained id instead.
wait_id = sdnc_id_for_wait
# Wait for status for VI instance update
self._wait(wait_id)
else:
pass
else:
msg = ""
if resp:
try:
msg = json.loads(resp)
except ValueError:
msg = resp
raise ClientException("failed to update SDN controller {} - {}".format(name, msg))
def delete(self, name, force=False, wait=False):
sdn_controller = self.get(name)
sdnc_id_for_wait = self._get_id_for_wait(name)
querystring = ''
if force:
querystring = '?FORCE=True'
http_code, resp = self._http.delete_cmd('{}/{}{}'.format(self._apiBase,
sdn_controller['_id'], querystring))
#print 'HTTP CODE: {}'.format(http_code)
#print 'RESP: {}'.format(resp)
if http_code == 202:
if wait:
# Wait for status for SDNC instance deletion
self._wait(sdnc_id_for_wait, deleteFlag=True)
else:
print('Deletion in progress')
elif http_code == 204:
print('Deleted')
elif resp and 'result' in resp:
print('Deleted')
else:
msg = ""
if resp:
try:
msg = json.loads(resp)
except ValueError:
msg = resp
raise ClientException("failed to delete SDN controller {} - {}".format(name, msg))
def list(self, filter=None):
"""Returns a list of SDN controllers
"""
filter_string = ''
if filter:
filter_string = '?{}'.format(filter)
resp = self._http.get_cmd('{}{}'.format(self._apiBase,filter_string))
#print 'RESP: {}'.format(resp)
if resp:
return resp
return list()
def get(self, name):
"""Returns an SDN controller based on name or id
"""
if utils.validate_uuid4(name):
for sdnc in self.list():
if name == sdnc['_id']:
return sdnc
else:
for sdnc in self.list():
if name == sdnc['name']:
return sdnc
raise NotFound("SDN controller {} not found".format(name)) | 0.352759 | 0.040846 |
from nltk.stem import WordNetLemmatizer
import fractions
import operator
from fuzzywuzzy import fuzz, process
import requests
import re
from ..models import FoodDescription, Tag
from . import fpdb
lemmer = WordNetLemmatizer()
def tokenize(text):
"""
First splits on r',|\.|and', strips the result, and removes empty tokens
:param text: the text to tokenize
:return: list of string tokens
"""
tokens = re.split(r',|\.(?![0-9])|and', text)
tokens = [t.strip() for t in tokens]
tokens = [t for t in tokens if t is not u'']
return tokens
def ask_google_for_ndb_no(query):
text = requests.get('http://www.google.com/search?q=%s+food' % query).text
if 'USDA' not in text:
text = requests.get('http://www.google.com/search?q=%s' % query).text
if 'USDA' not in text:
return None
# limit between these two (see html of that url for reference)
first = text.find('Sources include:')
second = text.find('USDA')
if first < 0 or second < 0:
return None
text = text[first:second]
resp = re.search('qlookup%3D[0-9]+', text)
if resp is None:
return None
return int(resp.group(0).replace("qlookup%3D", ""))
def realtime_parse(query):
"""
returns a dictionary of parsed entry
dictionary has food item, quantity, and measure
"""
parts = re.split("\s+", query)
parts = [lemmer.lemmatize(part) for part in parts]
quantity, parts = get_quantity(parts)
item = FoodDescription.query.get(9040)
measure = item.measurements[0]
return {'item': {'id': item.id, 'desc': item.long_desc},
'measure': {'id': measure.id, 'desc': measure.description},
'quantity': quantity}
def get_quantity(parts):
""" returns a tuple (quantity, parts without quantity) """
quantity = 1
for i, part in enumerate(parts):
if re.match(r'[0-9]*\.*[0-9]', part) is not None:
try:
quantity = float(fractions.Fraction(part))
del parts[i]
except:
pass
return (quantity, parts)
def realtime_parse_autocomplete(db, query):
parts = re.split("\s+", query)
parts = [lemmer.lemmatize(part) for part in parts]
quantity, parts = get_quantity(parts)
db_query = ' '.join(parts)
foods = fpdb.desc_fts(db, db_query, 15)
res = []
for f in foods:
for ms in f.measurements:
res.append({'value': str(quantity) + " " + ms.description + " x " + f.long_desc,
'data': {
'food-id': f.id,
'measure-id': ms.id,
'quantity': quantity
}})
return {"suggestions": res}
def food_parse(db, query):
"""
returns a jsonable dict for the autocomplete js library we are using
for a text field used to select a food item
:param db:
:param query:
:return:
"""
parts = re.split("\s+", query)
parts = [lemmer.lemmatize(part) for part in parts]
db_query = ' '.join(parts)
foods = fpdb.desc_fts(db, db_query, 15)
res = []
for f in foods:
res.append({'value': f.long_desc,
'data': {
'food-id': f.id
}})
return {"suggestions": res} | app/fplib/nlp.py | from nltk.stem import WordNetLemmatizer
import fractions
import operator
from fuzzywuzzy import fuzz, process
import requests
import re
from ..models import FoodDescription, Tag
from . import fpdb
lemmer = WordNetLemmatizer()
def tokenize(text):
"""
First splits on r',|\.|and', strips the result, and removes empty tokens
:param text: the text to tokenize
:return: list of string tokens
"""
tokens = re.split(r',|\.(?![0-9])|and', text)
tokens = [t.strip() for t in tokens]
tokens = [t for t in tokens if t is not u'']
return tokens
def ask_google_for_ndb_no(query):
text = requests.get('http://www.google.com/search?q=%s+food' % query).text
if 'USDA' not in text:
text = requests.get('http://www.google.com/search?q=%s' % query).text
if 'USDA' not in text:
return None
# limit between these two (see html of that url for reference)
first = text.find('Sources include:')
second = text.find('USDA')
if first < 0 or second < 0:
return None
text = text[first:second]
resp = re.search('qlookup%3D[0-9]+', text)
if resp is None:
return None
return int(resp.group(0).replace("qlookup%3D", ""))
def realtime_parse(query):
"""
returns a dictionary of parsed entry
dictionary has food item, quantity, and measure
"""
parts = re.split("\s+", query)
parts = [lemmer.lemmatize(part) for part in parts]
quantity, parts = get_quantity(parts)
item = FoodDescription.query.get(9040)
measure = item.measurements[0]
return {'item': {'id': item.id, 'desc': item.long_desc},
'measure': {'id': measure.id, 'desc': measure.description},
'quantity': quantity}
def get_quantity(parts):
""" returns a tuple (quantity, parts without quantity) """
quantity = 1
for i, part in enumerate(parts):
if re.match(r'[0-9]*\.*[0-9]', part) is not None:
try:
quantity = float(fractions.Fraction(part))
del parts[i]
except:
pass
return (quantity, parts)
def realtime_parse_autocomplete(db, query):
parts = re.split("\s+", query)
parts = [lemmer.lemmatize(part) for part in parts]
quantity, parts = get_quantity(parts)
db_query = ' '.join(parts)
foods = fpdb.desc_fts(db, db_query, 15)
res = []
for f in foods:
for ms in f.measurements:
res.append({'value': str(quantity) + " " + ms.description + " x " + f.long_desc,
'data': {
'food-id': f.id,
'measure-id': ms.id,
'quantity': quantity
}})
return {"suggestions": res}
def food_parse(db, query):
"""
returns a jsonable dict for the autocomplete js library we are using
for a text field used to select a food item
:param db:
:param query:
:return:
"""
parts = re.split("\s+", query)
parts = [lemmer.lemmatize(part) for part in parts]
db_query = ' '.join(parts)
foods = fpdb.desc_fts(db, db_query, 15)
res = []
for f in foods:
res.append({'value': f.long_desc,
'data': {
'food-id': f.id
}})
return {"suggestions": res} | 0.511473 | 0.257427 |
import gzip
import itertools
import os
import re
import struct
import tempfile
import typing as ty
import urllib.request
from fastnumbers import int
from filefetcher.manager import BuildTask
import lmdb
import msgpack
import pysam
RSID_CAPTURE = re.compile(r'RS=(\d+);?')
# The new dbSNP format uses refseq identifiers. Building a lookup based on human friendly chrom/pos/ref/alt
# requires converting human identifiers to and from refseq names
VERSIONLESS_CHROMS = {
'NC_000001': '1',
'NC_000002': '2',
'NC_000003': '3',
'NC_000004': '4',
'NC_000005': '5',
'NC_000006': '6',
'NC_000007': '7',
'NC_000008': '8',
'NC_000009': '9',
'NC_000010': '10',
'NC_000011': '11',
'NC_000012': '12',
'NC_000013': '13',
'NC_000014': '14',
'NC_000015': '15',
'NC_000016': '16',
'NC_000017': '17',
'NC_000018': '18',
'NC_000019': '19',
'NC_000020': '20',
'NC_000021': '21',
'NC_000022': '22',
'NC_000023': 'X',
'NC_000024': 'Y',
'NC_012920': 'MT',
}
def make_chrom_to_contigs(tabix_file: pysam.TabixFile) -> dict:
"""
In order to address a tabix file by position, we need the exact contig name. dbSNP builds use a versioned
identifier that changes across builds and versions.
This generates a lookup for converting human friendly chromosome names to things that can be used by a
particular dbSNP file. It automatically adjusts for small build differences and excludes non-chrom contigs
"""
return {
# Eg '1': 'NC_000001.10',
VERSIONLESS_CHROMS[c.split('.', 1)[0]]: c
for c in tabix_file.contigs if c.startswith('NC')
}
def fetch_regions_sequentially(source_fn: str, sample_regions: ty.Tuple[str, int, int]) -> ty.Iterator[str]:
"""Instead of loading an entire gzip file, create an iterator over a set of regions"""
source = pysam.TabixFile(source_fn)
human_chrom_to_tabix = make_chrom_to_contigs(source)
for region in sample_regions:
chrom, start, end = region
dbsnp_chrom = human_chrom_to_tabix[chrom]
for line in source.fetch(dbsnp_chrom, start, end):
yield line
def line_parser(row) -> ty.Tuple[str, int, str, str, int]:
"""For new dbSNP format, builds 152+"""
fields = row.split()
# the new dbSNP format uses refseq ids + version; convert these to human-readable chromosome names
chrom = VERSIONLESS_CHROMS[fields[0].split('.')[0]]
pos = int(fields[1])
ref = fields[3]
alt = fields[4]
# Get the RSID from the VCF info field, in case the id column is ambiguous for some reason
rsid = int(RSID_CAPTURE.search(fields[7]).group(1))
return (chrom, pos, ref, alt, rsid)
def make_file_iterator(handle: ty.Iterable) -> ty.Iterator[ty.Tuple[str, int, str, str, int]]:
"""
Parse the set of lines for some iterator (eg over contents of an open text, gz, etc file), and only give back the
ones relevant to our chrom/pos/ref/alt use case
"""
for row in handle:
if row.startswith('#'):
# Skip header rows
continue
if not row.startswith('NC_'):
# After that, only parse the variants labeled "NC", not the "unplaced scaffold" items
raise StopIteration
yield line_parser(row)
def make_group_iterator(file_iterator) -> ty.Iterable[ty.Tuple[str, int, dict]]:
"""Create an iterator that returns all possible ref/alt : rsid pairs for that position"""
chrom = None
for position, position_records in itertools.groupby(file_iterator, key=lambda x: x[1]):
# assumes that position is the only thing that will change, else use chr/pos: (x[0], x[1])):
position_contents = {}
# Then push final lookup for that chr:pos into the database
for record in position_records:
# The same line can indicate one or more ref/alts
chrom, pos, ref, alt, rsid = record
ref_alts = ['{}/{}'.format(ref_option, alt_option)
for alt_option in alt.split(',')
for ref_option in ref.split('.')]
for key in ref_alts:
position_contents[key] = rsid
yield chrom, position, position_contents
def make_databases(env) -> dict:
known = [
'1', '2', '3', '4', '5', '6', '7', '8', '9', '10',
'11', '12', '13', '14', '15', '16', '17', '18', '19', '20',
'21', '22', 'X', 'Y', 'MT'
]
return {k: env.open_db(bytes(k, "utf8"), integerkey=True) for k in known}
def main(source_fn: str, out_fn: str, n_chroms=25, sample_regions=None):
"""Perform this task in isolation for any dbsnp file"""
if os.path.exists(out_fn):
# LMDB would happily append / replace keys, but we want to create these files once and keep file size small
raise Exception('The requested output file already exists.')
# To reduce disk usage, each chromosome is stored internally as a separate database (the same chrom key info isn't
# stored redundantly)
env = lmdb.open(out_fn, subdir=False, max_dbs=n_chroms, map_size=25e9)
db_handles = make_databases(env)
if sample_regions:
iterator = fetch_regions_sequentially(source_fn, sample_regions)
else:
iterator = gzip.open(source_fn, "rt")
with env.begin(write=True) as txn:
all_lines = make_file_iterator(iterator)
group_iterator = make_group_iterator(all_lines)
for chrom, position, position_contents in group_iterator:
# Value is not a primitive; serialize it efficiently
key = struct.pack('I', position)
value = msgpack.packb(position_contents, use_bin_type=True)
txn.put(key, value, db=db_handles[chrom])
class MakeSnpToRsid(BuildTask):
"""A packaged filefetcher build task that also downloads the necessary input files"""
def __init__(self, genome_build, sample_regions=None):
self.genome_build = genome_build
self.regions = sample_regions or None
def get_assets(self):
# Download the appropriate dbsnp file for a given genome build if does not exist
# Uses a system temp directory rather than the build folder in case file is needed between runs
# FIXME: The directory structure of new dbSNP is still too new to know what will change between releases, so
# we don't yet have a generic way to get build information without downloading the files. For
# now this is a hardcoded reference.
if self.genome_build == 'GRCh37':
source_url = 'ftp://ftp.ncbi.nih.gov/snp/redesign/latest_release/VCF/GCF_000001405.25.gz'
elif self.genome_build == 'GRCh38':
source_url = 'ftp://ftp.ncbi.nih.gov/snp/redesign/latest_release/VCF/GCF_000001405.38.gz'
else:
raise Exception('Unknown build')
dest_fn = os.path.join(tempfile.gettempdir(), os.path.basename(source_url))
if not os.path.exists(dest_fn):
# Download assets to a tempfile directory
urllib.request.urlretrieve(
source_url,
dest_fn
)
# And also Tabix index
urllib.request.urlretrieve(
source_url + '.tbi',
dest_fn + '.tbi'
)
return dest_fn
def build(self, manager, item_type: str, build_folder: str, **kwargs):
# FIXME: Hardcode dbsnp build until we build a more reliable way to get this info.
dbsnp_build = 'b153'
source_fn = self.get_assets()
print('Building to: ', build_folder)
dest_fn = os.path.join(
build_folder,
'{}_{}_{}.lmdb'.format(item_type, self.genome_build, dbsnp_build)
)
main(source_fn, dest_fn, sample_regions=self.regions)
return dest_fn, {'dbsnp_build': dbsnp_build} | zorp/loaders/make_rsid_lookup.py | import gzip
import itertools
import os
import re
import struct
import tempfile
import typing as ty
import urllib.request
from fastnumbers import int
from filefetcher.manager import BuildTask
import lmdb
import msgpack
import pysam
RSID_CAPTURE = re.compile(r'RS=(\d+);?')
# The new dbSNP format uses refseq identifiers. Building a lookup based on human friendly chrom/pos/ref/alt
# requires converting human identifiers to and from refseq names
VERSIONLESS_CHROMS = {
'NC_000001': '1',
'NC_000002': '2',
'NC_000003': '3',
'NC_000004': '4',
'NC_000005': '5',
'NC_000006': '6',
'NC_000007': '7',
'NC_000008': '8',
'NC_000009': '9',
'NC_000010': '10',
'NC_000011': '11',
'NC_000012': '12',
'NC_000013': '13',
'NC_000014': '14',
'NC_000015': '15',
'NC_000016': '16',
'NC_000017': '17',
'NC_000018': '18',
'NC_000019': '19',
'NC_000020': '20',
'NC_000021': '21',
'NC_000022': '22',
'NC_000023': 'X',
'NC_000024': 'Y',
'NC_012920': 'MT',
}
def make_chrom_to_contigs(tabix_file: pysam.TabixFile) -> dict:
"""
In order to address a tabix file by position, we need the exact contig name. dbSNP builds use a versioned
identifier that changes across builds and versions.
This generates a lookup for converting human friendly chromosome names to things that can be used by a
particular dbSNP file. It automatically adjusts for small build differences and excludes non-chrom contigs
"""
return {
# Eg '1': 'NC_000001.10',
VERSIONLESS_CHROMS[c.split('.', 1)[0]]: c
for c in tabix_file.contigs if c.startswith('NC')
}
def fetch_regions_sequentially(source_fn: str, sample_regions: ty.Tuple[str, int, int]) -> ty.Iterator[str]:
"""Instead of loading an entire gzip file, create an iterator over a set of regions"""
source = pysam.TabixFile(source_fn)
human_chrom_to_tabix = make_chrom_to_contigs(source)
for region in sample_regions:
chrom, start, end = region
dbsnp_chrom = human_chrom_to_tabix[chrom]
for line in source.fetch(dbsnp_chrom, start, end):
yield line
def line_parser(row) -> ty.Tuple[str, int, str, str, int]:
"""For new dbSNP format, builds 152+"""
fields = row.split()
# the new dbSNP format uses refseq ids + version; convert these to human-readable chromosome names
chrom = VERSIONLESS_CHROMS[fields[0].split('.')[0]]
pos = int(fields[1])
ref = fields[3]
alt = fields[4]
# Get the RSID from the VCF info field, in case the id column is ambiguous for some reason
rsid = int(RSID_CAPTURE.search(fields[7]).group(1))
return (chrom, pos, ref, alt, rsid)
def make_file_iterator(handle: ty.Iterable) -> ty.Iterator[ty.Tuple[str, int, str, str, int]]:
"""
Parse the set of lines for some iterator (eg over contents of an open text, gz, etc file), and only give back the
ones relevant to our chrom/pos/ref/alt use case
"""
for row in handle:
if row.startswith('#'):
# Skip header rows
continue
if not row.startswith('NC_'):
# After that, only parse the variants labeled "NC", not the "unplaced scaffold" items
raise StopIteration
yield line_parser(row)
def make_group_iterator(file_iterator) -> ty.Iterable[ty.Tuple[str, int, dict]]:
"""Create an iterator that returns all possible ref/alt : rsid pairs for that position"""
chrom = None
for position, position_records in itertools.groupby(file_iterator, key=lambda x: x[1]):
# assumes that position is the only thing that will change, else use chr/pos: (x[0], x[1])):
position_contents = {}
# Then push final lookup for that chr:pos into the database
for record in position_records:
# The same line can indicate one or more ref/alts
chrom, pos, ref, alt, rsid = record
ref_alts = ['{}/{}'.format(ref_option, alt_option)
for alt_option in alt.split(',')
for ref_option in ref.split('.')]
for key in ref_alts:
position_contents[key] = rsid
yield chrom, position, position_contents
def make_databases(env) -> dict:
known = [
'1', '2', '3', '4', '5', '6', '7', '8', '9', '10',
'11', '12', '13', '14', '15', '16', '17', '18', '19', '20',
'21', '22', 'X', 'Y', 'MT'
]
return {k: env.open_db(bytes(k, "utf8"), integerkey=True) for k in known}
def main(source_fn: str, out_fn: str, n_chroms=25, sample_regions=None):
"""Perform this task in isolation for any dbsnp file"""
if os.path.exists(out_fn):
# LMDB would happily append / replace keys, but we want to create these files once and keep file size small
raise Exception('The requested output file already exists.')
# To reduce disk usage, each chromosome is stored internally as a separate database (the same chrom key info isn't
# stored redundantly)
env = lmdb.open(out_fn, subdir=False, max_dbs=n_chroms, map_size=25e9)
db_handles = make_databases(env)
if sample_regions:
iterator = fetch_regions_sequentially(source_fn, sample_regions)
else:
iterator = gzip.open(source_fn, "rt")
with env.begin(write=True) as txn:
all_lines = make_file_iterator(iterator)
group_iterator = make_group_iterator(all_lines)
for chrom, position, position_contents in group_iterator:
# Value is not a primitive; serialize it efficiently
key = struct.pack('I', position)
value = msgpack.packb(position_contents, use_bin_type=True)
txn.put(key, value, db=db_handles[chrom])
class MakeSnpToRsid(BuildTask):
"""A packaged filefetcher build task that also downloads the necessary input files"""
def __init__(self, genome_build, sample_regions=None):
self.genome_build = genome_build
self.regions = sample_regions or None
def get_assets(self):
# Download the appropriate dbsnp file for a given genome build if does not exist
# Uses a system temp directory rather than the build folder in case file is needed between runs
# FIXME: The directory structure of new dbSNP is still too new to know what will change between releases, so
# we don't yet have a generic way to get build information without downloading the files. For
# now this is a hardcoded reference.
if self.genome_build == 'GRCh37':
source_url = 'ftp://ftp.ncbi.nih.gov/snp/redesign/latest_release/VCF/GCF_000001405.25.gz'
elif self.genome_build == 'GRCh38':
source_url = 'ftp://ftp.ncbi.nih.gov/snp/redesign/latest_release/VCF/GCF_000001405.38.gz'
else:
raise Exception('Unknown build')
dest_fn = os.path.join(tempfile.gettempdir(), os.path.basename(source_url))
if not os.path.exists(dest_fn):
# Download assets to a tempfile directory
urllib.request.urlretrieve(
source_url,
dest_fn
)
# And also Tabix index
urllib.request.urlretrieve(
source_url + '.tbi',
dest_fn + '.tbi'
)
return dest_fn
def build(self, manager, item_type: str, build_folder: str, **kwargs):
# FIXME: Hardcode dbsnp build until we build a more reliable way to get this info.
dbsnp_build = 'b153'
source_fn = self.get_assets()
print('Building to: ', build_folder)
dest_fn = os.path.join(
build_folder,
'{}_{}_{}.lmdb'.format(item_type, self.genome_build, dbsnp_build)
)
main(source_fn, dest_fn, sample_regions=self.regions)
return dest_fn, {'dbsnp_build': dbsnp_build} | 0.474631 | 0.211539 |
import os
import gzip
import tarfile
import pickle
import numpy as np
import matplotlib.pyplot as plt
from dezero.utils import get_file, cache_dir
from dezero.transforms import Compose, Flatten, ToFloat, Normalize
# =============================================================================
# Base class
# =============================================================================
class Dataset():
def __init__(self, train=True, transform=None, target_transform=None):
self.train = train
self.transform = transform
self.target_transform = target_transform
if self.transform is None:
self.transform = lambda x: x
if self.target_transform is None:
self.target_transform = lambda x: x
self.data = None
self.label = None
self.prepare()
def __getitem__(self, index):
assert np.isscalar(index)
if self.label is None:
return self.transform(self.data[index]), None
else:
return self.transform(self.data[index]), self.target_transform(
self.label[index])
def __len__(self):
return len(self.data)
def prepare(self):
pass
# =============================================================================
# Toy datasets
# =============================================================================
def get_spiral(train=True):
seed = 1984 if train else 2020
np.random.seed(seed=seed)
num_data, num_class, input_dim = 100, 3, 2
data_size = num_class * num_data
x = np.zeros((data_size, input_dim), dtype=np.float32)
t = np.zeros(data_size, dtype=np.int)
for j in range(num_class):
for i in range(num_data):
rate = i / num_data
radius = 1.0 * rate
theta = j * 4.0 + 4.0 * rate + np.random.randn() * 0.2
ix = num_data * j + i
x[ix] = np.array([radius * np.sin(theta),
radius * np.cos(theta)]).flatten()
t[ix] = j
# Shuffle
indices = np.random.permutation(num_data * num_class)
x = x[indices]
t = t[indices]
return x, t
class Spiral(Dataset):
def prepare(self):
self.data, self.label = get_spiral(self.train)
# =============================================================================
# MNIST-like dataset: MNIST / CIFAR /
# =============================================================================
class MNIST(Dataset):
def __init__(self,
train=True,
transform=Compose([Flatten(),
ToFloat(),
Normalize(0., 255.)]),
target_transform=None):
super().__init__(train, transform, target_transform)
def prepare(self):
url = 'http://yann.lecun.com/exdb/mnist/'
train_files = {
'target': 'train-images-idx3-ubyte.gz',
'label': 'train-labels-idx1-ubyte.gz'
}
test_files = {
'target': 't10k-images-idx3-ubyte.gz',
'label': 't10k-labels-idx1-ubyte.gz'
}
files = train_files if self.train else test_files
data_path = get_file(url + files['target'])
label_path = get_file(url + files['label'])
self.data = self._load_data(data_path)
self.label = self._load_label(label_path)
def _load_label(self, filepath):
with gzip.open(filepath, 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
return labels
def _load_data(self, filepath):
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
data = data.reshape(-1, 1, 28, 28)
return data
def show(self, row=10, col=10):
H, W = 28, 28
img = np.zeros((H * row, W * col))
for r in range(row):
for c in range(col):
img[r * H:(r + 1) * H,
c * W:(c + 1) * W] = self.data[np.random.randint(
0,
len(self.data) - 1)].reshape(H, W)
plt.imshow(img, cmap='gray', interpolation='nearest')
plt.axis('off')
plt.show()
@staticmethod
def labels():
return {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9'
}
# =============================================================================
# Big datasets
# =============================================================================
class ImageNet(Dataset):
def __init__(self):
NotImplemented
@staticmethod
def labels():
url = 'https://gist.githubusercontent.com/yrevar/942d3a0ac09ec9e5eb3a/raw/238f720ff059c1f82f368259d1ca4ffa5dd8f9f5/imagenet1000_clsidx_to_labels.txt'
path = get_file(url)
with open(path, 'r') as f:
labels = eval(f.read())
return labels
# =============================================================================
# Sequential datasets: SinCurve, Shapekspare
# =============================================================================
class SinCurve(Dataset):
def prepare(self):
num_data = 1000
dtype = np.float64
x = np.linspace(0, 2 * np.pi, num_data)
noise_range = (-0.05, 0.05)
noise = np.random.uniform(noise_range[0], noise_range[1], size=x.shape)
if self.train:
y = np.sin(x) + noise
else:
y = np.cos(x)
y = y.astype(dtype)
self.data = y[:-1][:, np.newaxis]
self.label = y[1:][:, np.newaxis] | dezero/datasets.py | import os
import gzip
import tarfile
import pickle
import numpy as np
import matplotlib.pyplot as plt
from dezero.utils import get_file, cache_dir
from dezero.transforms import Compose, Flatten, ToFloat, Normalize
# =============================================================================
# Base class
# =============================================================================
class Dataset():
def __init__(self, train=True, transform=None, target_transform=None):
self.train = train
self.transform = transform
self.target_transform = target_transform
if self.transform is None:
self.transform = lambda x: x
if self.target_transform is None:
self.target_transform = lambda x: x
self.data = None
self.label = None
self.prepare()
def __getitem__(self, index):
assert np.isscalar(index)
if self.label is None:
return self.transform(self.data[index]), None
else:
return self.transform(self.data[index]), self.target_transform(
self.label[index])
def __len__(self):
return len(self.data)
def prepare(self):
pass
# =============================================================================
# Toy datasets
# =============================================================================
def get_spiral(train=True):
seed = 1984 if train else 2020
np.random.seed(seed=seed)
num_data, num_class, input_dim = 100, 3, 2
data_size = num_class * num_data
x = np.zeros((data_size, input_dim), dtype=np.float32)
t = np.zeros(data_size, dtype=np.int)
for j in range(num_class):
for i in range(num_data):
rate = i / num_data
radius = 1.0 * rate
theta = j * 4.0 + 4.0 * rate + np.random.randn() * 0.2
ix = num_data * j + i
x[ix] = np.array([radius * np.sin(theta),
radius * np.cos(theta)]).flatten()
t[ix] = j
# Shuffle
indices = np.random.permutation(num_data * num_class)
x = x[indices]
t = t[indices]
return x, t
class Spiral(Dataset):
def prepare(self):
self.data, self.label = get_spiral(self.train)
# =============================================================================
# MNIST-like dataset: MNIST / CIFAR /
# =============================================================================
class MNIST(Dataset):
def __init__(self,
train=True,
transform=Compose([Flatten(),
ToFloat(),
Normalize(0., 255.)]),
target_transform=None):
super().__init__(train, transform, target_transform)
def prepare(self):
url = 'http://yann.lecun.com/exdb/mnist/'
train_files = {
'target': 'train-images-idx3-ubyte.gz',
'label': 'train-labels-idx1-ubyte.gz'
}
test_files = {
'target': 't10k-images-idx3-ubyte.gz',
'label': 't10k-labels-idx1-ubyte.gz'
}
files = train_files if self.train else test_files
data_path = get_file(url + files['target'])
label_path = get_file(url + files['label'])
self.data = self._load_data(data_path)
self.label = self._load_label(label_path)
def _load_label(self, filepath):
with gzip.open(filepath, 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
return labels
def _load_data(self, filepath):
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
data = data.reshape(-1, 1, 28, 28)
return data
def show(self, row=10, col=10):
H, W = 28, 28
img = np.zeros((H * row, W * col))
for r in range(row):
for c in range(col):
img[r * H:(r + 1) * H,
c * W:(c + 1) * W] = self.data[np.random.randint(
0,
len(self.data) - 1)].reshape(H, W)
plt.imshow(img, cmap='gray', interpolation='nearest')
plt.axis('off')
plt.show()
@staticmethod
def labels():
return {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9'
}
# =============================================================================
# Big datasets
# =============================================================================
class ImageNet(Dataset):
def __init__(self):
NotImplemented
@staticmethod
def labels():
url = 'https://gist.githubusercontent.com/yrevar/942d3a0ac09ec9e5eb3a/raw/238f720ff059c1f82f368259d1ca4ffa5dd8f9f5/imagenet1000_clsidx_to_labels.txt'
path = get_file(url)
with open(path, 'r') as f:
labels = eval(f.read())
return labels
# =============================================================================
# Sequential datasets: SinCurve, Shapekspare
# =============================================================================
class SinCurve(Dataset):
def prepare(self):
num_data = 1000
dtype = np.float64
x = np.linspace(0, 2 * np.pi, num_data)
noise_range = (-0.05, 0.05)
noise = np.random.uniform(noise_range[0], noise_range[1], size=x.shape)
if self.train:
y = np.sin(x) + noise
else:
y = np.cos(x)
y = y.astype(dtype)
self.data = y[:-1][:, np.newaxis]
self.label = y[1:][:, np.newaxis] | 0.599016 | 0.431165 |
from typing import Dict
from base_collectors import JenkinsPluginSourceUpToDatenessCollector, SourceCollector
from collector_utilities.type import URL
from source_model import Entity, SourceMeasurement, SourceResponses
class OWASPDependencyCheckJenkinsPluginSecurityWarnings(SourceCollector):
"""OWASP Dependency Check Jenkins plugin security warnings collector."""
async def _api_url(self) -> URL:
api_url = await super()._api_url()
return URL(f"{api_url}/lastSuccessfulBuild/dependency-check-jenkins-pluginResult/api/json?depth=1")
async def _landing_url(self, responses: SourceResponses) -> URL:
return URL(f"{await super()._api_url()}/lastSuccessfulBuild/dependency-check-jenkins-pluginResult")
async def _parse_source_responses(self, responses: SourceResponses) -> SourceMeasurement:
json = await responses[0].json()
severities = self._parameter("severities")
warnings = [warning for warning in json.get("warnings", []) if warning["priority"].lower() in severities]
entities: Dict[str, Entity] = {}
for warning in warnings:
priority = warning["priority"].lower()
file_path = warning["fileName"]
if file_path in entities:
entities[file_path]["nr_vulnerabilities"] = str(int(entities[file_path]["nr_vulnerabilities"]) + 1)
entities[file_path]["highest_severity"] = \
self.__highest_severity(str(entities[file_path]["highest_severity"]).lower(), priority).capitalize()
else:
entities[file_path] = Entity(
key=file_path, file_path=file_path, highest_severity=priority.capitalize(), nr_vulnerabilities="1")
return SourceMeasurement(entities=list(entities.values()))
def __highest_severity(self, severity1: str, severity2: str) -> str:
"""Return the highest of the two severities."""
severities = self._data_model["sources"][self.source_type]["parameters"]["severities"]["values"]
return severity1 if severities.index(severity1) >= severities.index(severity2) else severity2
class OWASPDependencyCheckJenkinsPluginSourceUpToDateness(JenkinsPluginSourceUpToDatenessCollector):
"""Collector to get the age of the OWASP Dependency Check Jenkins plugin report.""" | components/collector/src/source_collectors/api_source_collectors/owasp_dependency_check_jenkins_plugin.py |
from typing import Dict
from base_collectors import JenkinsPluginSourceUpToDatenessCollector, SourceCollector
from collector_utilities.type import URL
from source_model import Entity, SourceMeasurement, SourceResponses
class OWASPDependencyCheckJenkinsPluginSecurityWarnings(SourceCollector):
"""OWASP Dependency Check Jenkins plugin security warnings collector."""
async def _api_url(self) -> URL:
api_url = await super()._api_url()
return URL(f"{api_url}/lastSuccessfulBuild/dependency-check-jenkins-pluginResult/api/json?depth=1")
async def _landing_url(self, responses: SourceResponses) -> URL:
return URL(f"{await super()._api_url()}/lastSuccessfulBuild/dependency-check-jenkins-pluginResult")
async def _parse_source_responses(self, responses: SourceResponses) -> SourceMeasurement:
json = await responses[0].json()
severities = self._parameter("severities")
warnings = [warning for warning in json.get("warnings", []) if warning["priority"].lower() in severities]
entities: Dict[str, Entity] = {}
for warning in warnings:
priority = warning["priority"].lower()
file_path = warning["fileName"]
if file_path in entities:
entities[file_path]["nr_vulnerabilities"] = str(int(entities[file_path]["nr_vulnerabilities"]) + 1)
entities[file_path]["highest_severity"] = \
self.__highest_severity(str(entities[file_path]["highest_severity"]).lower(), priority).capitalize()
else:
entities[file_path] = Entity(
key=file_path, file_path=file_path, highest_severity=priority.capitalize(), nr_vulnerabilities="1")
return SourceMeasurement(entities=list(entities.values()))
def __highest_severity(self, severity1: str, severity2: str) -> str:
"""Return the highest of the two severities."""
severities = self._data_model["sources"][self.source_type]["parameters"]["severities"]["values"]
return severity1 if severities.index(severity1) >= severities.index(severity2) else severity2
class OWASPDependencyCheckJenkinsPluginSourceUpToDateness(JenkinsPluginSourceUpToDatenessCollector):
"""Collector to get the age of the OWASP Dependency Check Jenkins plugin report.""" | 0.875628 | 0.110351 |
from cliff import lister
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from osc_lib.utils import tags as _tag
from oslo_utils import uuidutils
from octaviaclient.osc.v2 import constants as const
from octaviaclient.osc.v2 import utils as v2_utils
HTTP_METHODS = ['GET', 'POST', 'DELETE', 'PUT', 'HEAD', 'OPTIONS', 'PATCH',
'CONNECT', 'TRACE']
HTTP_VERSIONS = [1.0, 1.1]
TYPE_CHOICES = ['PING', 'HTTP', 'TCP', 'HTTPS', 'TLS-HELLO',
'UDP-CONNECT', 'SCTP']
class CreateHealthMonitor(command.ShowOne):
"""Create a health monitor"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'pool',
metavar='<pool>',
help="Set the pool for the health monitor (name or ID)."
)
parser.add_argument(
'--name',
metavar='<name>',
help="Set the health monitor name."
)
parser.add_argument(
'--delay',
metavar='<delay>',
required=True,
help="Set the time in seconds, between sending probes to members."
)
parser.add_argument(
'--domain-name',
metavar='<domain_name>',
help=("Set the domain name, which be injected into the HTTP Host "
"Header to the backend server for HTTP health check.")
)
parser.add_argument(
'--expected-codes',
metavar='<codes>',
help="Set the list of HTTP status codes expected in response from "
"the member to declare it healthy."
)
parser.add_argument(
'--http-method',
metavar='{' + ','.join(HTTP_METHODS) + '}',
choices=HTTP_METHODS,
type=lambda s: s.upper(), # case insensitive
help="Set the HTTP method that the health monitor uses for "
"requests."
)
parser.add_argument(
'--http-version',
metavar='<http_version>',
choices=HTTP_VERSIONS,
type=float,
help="Set the HTTP version."
)
parser.add_argument(
'--timeout',
metavar='<timeout>',
required=True,
help="Set the maximum time, in seconds, that a monitor waits to "
"connect before it times out. This value must be less than "
"the delay value."
)
parser.add_argument(
'--max-retries',
metavar='<max_retries>',
type=int,
choices=range(1, 10),
required=True,
help="The number of successful checks before changing the "
"operating status of the member to ONLINE."
)
parser.add_argument(
'--url-path',
metavar='<url_path>',
help="Set the HTTP URL path of the request sent by the monitor to "
"test the health of a backend member."
)
parser.add_argument(
'--type',
metavar='{' + ','.join(TYPE_CHOICES) + '}',
required=True,
choices=TYPE_CHOICES,
type=lambda s: s.upper(), # case insensitive
help="Set the health monitor type."
)
parser.add_argument(
'--max-retries-down',
metavar='<max_retries_down>',
type=int,
choices=range(1, 10),
help="Set the number of allowed check failures before changing "
"the operating status of the member to ERROR."
)
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
default=True,
help="Enable health monitor (default)."
)
admin_group.add_argument(
'--disable',
action='store_true',
default=None,
help="Disable health monitor."
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
_tag.add_tag_option_to_parser_for_create(
parser, 'health monitor')
return parser
def take_action(self, parsed_args):
rows = const.MONITOR_ROWS
attrs = v2_utils.get_health_monitor_attrs(self.app.client_manager,
parsed_args)
body = {"healthmonitor": attrs}
data = self.app.client_manager.load_balancer.health_monitor_create(
json=body)
if parsed_args.wait:
pool = self.app.client_manager.load_balancer.pool_show(
data['healthmonitor']['pools'][0]['id'])
v2_utils.wait_for_active(
status_f=(self.app.client_manager.load_balancer.
load_balancer_show),
res_id=pool['loadbalancers'][0]['id']
)
data = {
'healthmonitor': (
self.app.client_manager.load_balancer.health_monitor_show(
data['healthmonitor']['id']))
}
formatters = {'pools': v2_utils.format_list,
'tags': v2_utils.format_list_flat}
return (rows,
(utils.get_dict_properties(data['healthmonitor'],
rows,
formatters=formatters)))
class DeleteHealthMonitor(command.Command):
"""Delete a health monitor"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'health_monitor',
metavar='<health_monitor>',
help="Health monitor to delete (name or ID)."
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
return parser
def take_action(self, parsed_args):
attrs = v2_utils.get_health_monitor_attrs(self.app.client_manager,
parsed_args)
health_monitor_id = attrs.pop('health_monitor_id')
self.app.client_manager.load_balancer.health_monitor_delete(
health_monitor_id=health_monitor_id)
if parsed_args.wait:
v2_utils.wait_for_delete(
status_f=(self.app.client_manager.load_balancer.
health_monitor_show),
res_id=health_monitor_id
)
class ListHealthMonitor(lister.Lister):
"""List health monitors"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
_tag.add_tag_filtering_option_to_parser(parser, 'health monitor')
return parser
def take_action(self, parsed_args):
columns = const.MONITOR_COLUMNS
attrs = v2_utils.get_health_monitor_attrs(self.app.client_manager,
parsed_args)
data = self.app.client_manager.load_balancer.health_monitor_list(
**attrs)
formatters = {'pools': v2_utils.format_list}
return (columns,
(utils.get_dict_properties(s, columns, formatters=formatters)
for s in data['healthmonitors']))
class ShowHealthMonitor(command.ShowOne):
"""Show the details of a single health monitor"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'health_monitor',
metavar='<health_monitor>',
help='Name or UUID of the health monitor.'
)
return parser
def take_action(self, parsed_args):
rows = const.MONITOR_ROWS
data = None
if uuidutils.is_uuid_like(parsed_args.health_monitor):
try:
data = (
self.app.client_manager.load_balancer.health_monitor_show(
health_monitor_id=parsed_args.health_monitor))
except exceptions.NotFound:
pass
if data is None:
attrs = v2_utils.get_health_monitor_attrs(self.app.client_manager,
parsed_args)
health_monitor_id = attrs.pop('health_monitor_id')
data = self.app.client_manager.load_balancer.health_monitor_show(
health_monitor_id=health_monitor_id,
)
formatters = {'pools': v2_utils.format_list,
'tags': v2_utils.format_list_flat}
return (rows,
(utils.get_dict_properties(data, rows, formatters=formatters)))
class SetHealthMonitor(command.Command):
"""Update a health monitor"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'health_monitor',
metavar='<health_monitor>',
help="Health monitor to update (name or ID)."
)
parser.add_argument(
'--name',
metavar='<name>',
help="Set health monitor name."
)
parser.add_argument(
'--delay',
metavar='<delay>',
help="Set the time in seconds, between sending probes to members."
)
parser.add_argument(
'--domain-name',
metavar='<domain_name>',
help=("Set the domain name, which be injected into the HTTP Host "
"Header to the backend server for HTTP health check.")
)
parser.add_argument(
'--expected-codes',
metavar='<codes>',
help="Set the list of HTTP status codes expected in response from "
"the member to declare it healthy."
)
parser.add_argument(
'--http-method',
metavar='{' + ','.join(HTTP_METHODS) + '}',
choices=HTTP_METHODS,
type=lambda s: s.upper(), # case insensitive
help="Set the HTTP method that the health monitor uses for "
"requests."
)
parser.add_argument(
'--http-version',
metavar='<http_version>',
choices=HTTP_VERSIONS,
type=float,
help="Set the HTTP version."
)
parser.add_argument(
'--timeout',
metavar='<timeout>',
help="Set the maximum time, in seconds, that a monitor waits to "
"connect before it times out. This value must be less than "
"the delay value."
)
parser.add_argument(
'--max-retries',
metavar='<max_retries>',
type=int,
choices=range(1, 10),
help="Set the number of successful checks before changing the "
"operating status of the member to ONLINE."
)
parser.add_argument(
'--max-retries-down',
metavar='<max_retries_down>',
type=int,
choices=range(1, 10),
help="Set the number of allowed check failures before changing "
"the operating status of the member to ERROR."
)
parser.add_argument(
'--url-path',
metavar='<url_path>',
help="Set the HTTP URL path of the request sent by the monitor to "
"test the health of a backend member."
)
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
default=None,
help="Enable health monitor."
)
admin_group.add_argument(
'--disable',
action='store_true',
default=None,
help="Disable health monitor."
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
_tag.add_tag_option_to_parser_for_set(parser, 'health monitor')
return parser
def take_action(self, parsed_args):
attrs = v2_utils.get_health_monitor_attrs(self.app.client_manager,
parsed_args)
hm_id = attrs.pop('health_monitor_id')
v2_utils.set_tags_for_set(
self.app.client_manager.load_balancer.health_monitor_show,
hm_id, attrs, clear_tags=parsed_args.no_tag)
body = {'healthmonitor': attrs}
self.app.client_manager.load_balancer.health_monitor_set(
hm_id, json=body)
if parsed_args.wait:
v2_utils.wait_for_active(
status_f=(self.app.client_manager.load_balancer.
health_monitor_show),
res_id=hm_id
)
class UnsetHealthMonitor(command.Command):
"""Clear health monitor settings"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'health_monitor',
metavar='<health_monitor>',
help="Health monitor to update (name or ID)."
)
parser.add_argument(
'--domain-name',
action='store_true',
help="Clear the health monitor domain name."
)
parser.add_argument(
'--expected-codes',
action='store_true',
help="Reset the health monitor expected codes to the API default."
)
parser.add_argument(
'--http-method',
action='store_true',
help="Reset the health monitor HTTP method to the API default."
)
parser.add_argument(
'--http-version',
action='store_true',
help="Reset the health monitor HTTP version to the API default."
)
parser.add_argument(
'--max-retries-down',
action='store_true',
help="Reset the health monitor max retries down to the API "
"default."
)
parser.add_argument(
'--name',
action='store_true',
help="Clear the health monitor name."
)
parser.add_argument(
'--url-path',
action='store_true',
help="Clear the health monitor URL path."
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
_tag.add_tag_option_to_parser_for_unset(parser, 'health monitor')
return parser
def take_action(self, parsed_args):
unset_args = v2_utils.get_unsets(parsed_args)
if not unset_args and not parsed_args.all_tag:
return
hm_id = v2_utils.get_resource_id(
self.app.client_manager.load_balancer.health_monitor_list,
'healthmonitors', parsed_args.health_monitor)
v2_utils.set_tags_for_unset(
self.app.client_manager.load_balancer.health_monitor_show,
hm_id, unset_args, clear_tags=parsed_args.all_tag)
body = {'healthmonitor': unset_args}
self.app.client_manager.load_balancer.health_monitor_set(
hm_id, json=body)
if parsed_args.wait:
v2_utils.wait_for_active(
status_f=(self.app.client_manager.load_balancer.
health_monitor_show),
res_id=hm_id
) | octaviaclient/osc/v2/health_monitor.py | from cliff import lister
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from osc_lib.utils import tags as _tag
from oslo_utils import uuidutils
from octaviaclient.osc.v2 import constants as const
from octaviaclient.osc.v2 import utils as v2_utils
HTTP_METHODS = ['GET', 'POST', 'DELETE', 'PUT', 'HEAD', 'OPTIONS', 'PATCH',
'CONNECT', 'TRACE']
HTTP_VERSIONS = [1.0, 1.1]
TYPE_CHOICES = ['PING', 'HTTP', 'TCP', 'HTTPS', 'TLS-HELLO',
'UDP-CONNECT', 'SCTP']
class CreateHealthMonitor(command.ShowOne):
"""Create a health monitor"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'pool',
metavar='<pool>',
help="Set the pool for the health monitor (name or ID)."
)
parser.add_argument(
'--name',
metavar='<name>',
help="Set the health monitor name."
)
parser.add_argument(
'--delay',
metavar='<delay>',
required=True,
help="Set the time in seconds, between sending probes to members."
)
parser.add_argument(
'--domain-name',
metavar='<domain_name>',
help=("Set the domain name, which be injected into the HTTP Host "
"Header to the backend server for HTTP health check.")
)
parser.add_argument(
'--expected-codes',
metavar='<codes>',
help="Set the list of HTTP status codes expected in response from "
"the member to declare it healthy."
)
parser.add_argument(
'--http-method',
metavar='{' + ','.join(HTTP_METHODS) + '}',
choices=HTTP_METHODS,
type=lambda s: s.upper(), # case insensitive
help="Set the HTTP method that the health monitor uses for "
"requests."
)
parser.add_argument(
'--http-version',
metavar='<http_version>',
choices=HTTP_VERSIONS,
type=float,
help="Set the HTTP version."
)
parser.add_argument(
'--timeout',
metavar='<timeout>',
required=True,
help="Set the maximum time, in seconds, that a monitor waits to "
"connect before it times out. This value must be less than "
"the delay value."
)
parser.add_argument(
'--max-retries',
metavar='<max_retries>',
type=int,
choices=range(1, 10),
required=True,
help="The number of successful checks before changing the "
"operating status of the member to ONLINE."
)
parser.add_argument(
'--url-path',
metavar='<url_path>',
help="Set the HTTP URL path of the request sent by the monitor to "
"test the health of a backend member."
)
parser.add_argument(
'--type',
metavar='{' + ','.join(TYPE_CHOICES) + '}',
required=True,
choices=TYPE_CHOICES,
type=lambda s: s.upper(), # case insensitive
help="Set the health monitor type."
)
parser.add_argument(
'--max-retries-down',
metavar='<max_retries_down>',
type=int,
choices=range(1, 10),
help="Set the number of allowed check failures before changing "
"the operating status of the member to ERROR."
)
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
default=True,
help="Enable health monitor (default)."
)
admin_group.add_argument(
'--disable',
action='store_true',
default=None,
help="Disable health monitor."
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
_tag.add_tag_option_to_parser_for_create(
parser, 'health monitor')
return parser
def take_action(self, parsed_args):
rows = const.MONITOR_ROWS
attrs = v2_utils.get_health_monitor_attrs(self.app.client_manager,
parsed_args)
body = {"healthmonitor": attrs}
data = self.app.client_manager.load_balancer.health_monitor_create(
json=body)
if parsed_args.wait:
pool = self.app.client_manager.load_balancer.pool_show(
data['healthmonitor']['pools'][0]['id'])
v2_utils.wait_for_active(
status_f=(self.app.client_manager.load_balancer.
load_balancer_show),
res_id=pool['loadbalancers'][0]['id']
)
data = {
'healthmonitor': (
self.app.client_manager.load_balancer.health_monitor_show(
data['healthmonitor']['id']))
}
formatters = {'pools': v2_utils.format_list,
'tags': v2_utils.format_list_flat}
return (rows,
(utils.get_dict_properties(data['healthmonitor'],
rows,
formatters=formatters)))
class DeleteHealthMonitor(command.Command):
"""Delete a health monitor"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'health_monitor',
metavar='<health_monitor>',
help="Health monitor to delete (name or ID)."
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
return parser
def take_action(self, parsed_args):
attrs = v2_utils.get_health_monitor_attrs(self.app.client_manager,
parsed_args)
health_monitor_id = attrs.pop('health_monitor_id')
self.app.client_manager.load_balancer.health_monitor_delete(
health_monitor_id=health_monitor_id)
if parsed_args.wait:
v2_utils.wait_for_delete(
status_f=(self.app.client_manager.load_balancer.
health_monitor_show),
res_id=health_monitor_id
)
class ListHealthMonitor(lister.Lister):
"""List health monitors"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
_tag.add_tag_filtering_option_to_parser(parser, 'health monitor')
return parser
def take_action(self, parsed_args):
columns = const.MONITOR_COLUMNS
attrs = v2_utils.get_health_monitor_attrs(self.app.client_manager,
parsed_args)
data = self.app.client_manager.load_balancer.health_monitor_list(
**attrs)
formatters = {'pools': v2_utils.format_list}
return (columns,
(utils.get_dict_properties(s, columns, formatters=formatters)
for s in data['healthmonitors']))
class ShowHealthMonitor(command.ShowOne):
"""Show the details of a single health monitor"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'health_monitor',
metavar='<health_monitor>',
help='Name or UUID of the health monitor.'
)
return parser
def take_action(self, parsed_args):
rows = const.MONITOR_ROWS
data = None
if uuidutils.is_uuid_like(parsed_args.health_monitor):
try:
data = (
self.app.client_manager.load_balancer.health_monitor_show(
health_monitor_id=parsed_args.health_monitor))
except exceptions.NotFound:
pass
if data is None:
attrs = v2_utils.get_health_monitor_attrs(self.app.client_manager,
parsed_args)
health_monitor_id = attrs.pop('health_monitor_id')
data = self.app.client_manager.load_balancer.health_monitor_show(
health_monitor_id=health_monitor_id,
)
formatters = {'pools': v2_utils.format_list,
'tags': v2_utils.format_list_flat}
return (rows,
(utils.get_dict_properties(data, rows, formatters=formatters)))
class SetHealthMonitor(command.Command):
"""Update a health monitor"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'health_monitor',
metavar='<health_monitor>',
help="Health monitor to update (name or ID)."
)
parser.add_argument(
'--name',
metavar='<name>',
help="Set health monitor name."
)
parser.add_argument(
'--delay',
metavar='<delay>',
help="Set the time in seconds, between sending probes to members."
)
parser.add_argument(
'--domain-name',
metavar='<domain_name>',
help=("Set the domain name, which be injected into the HTTP Host "
"Header to the backend server for HTTP health check.")
)
parser.add_argument(
'--expected-codes',
metavar='<codes>',
help="Set the list of HTTP status codes expected in response from "
"the member to declare it healthy."
)
parser.add_argument(
'--http-method',
metavar='{' + ','.join(HTTP_METHODS) + '}',
choices=HTTP_METHODS,
type=lambda s: s.upper(), # case insensitive
help="Set the HTTP method that the health monitor uses for "
"requests."
)
parser.add_argument(
'--http-version',
metavar='<http_version>',
choices=HTTP_VERSIONS,
type=float,
help="Set the HTTP version."
)
parser.add_argument(
'--timeout',
metavar='<timeout>',
help="Set the maximum time, in seconds, that a monitor waits to "
"connect before it times out. This value must be less than "
"the delay value."
)
parser.add_argument(
'--max-retries',
metavar='<max_retries>',
type=int,
choices=range(1, 10),
help="Set the number of successful checks before changing the "
"operating status of the member to ONLINE."
)
parser.add_argument(
'--max-retries-down',
metavar='<max_retries_down>',
type=int,
choices=range(1, 10),
help="Set the number of allowed check failures before changing "
"the operating status of the member to ERROR."
)
parser.add_argument(
'--url-path',
metavar='<url_path>',
help="Set the HTTP URL path of the request sent by the monitor to "
"test the health of a backend member."
)
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
default=None,
help="Enable health monitor."
)
admin_group.add_argument(
'--disable',
action='store_true',
default=None,
help="Disable health monitor."
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
_tag.add_tag_option_to_parser_for_set(parser, 'health monitor')
return parser
def take_action(self, parsed_args):
attrs = v2_utils.get_health_monitor_attrs(self.app.client_manager,
parsed_args)
hm_id = attrs.pop('health_monitor_id')
v2_utils.set_tags_for_set(
self.app.client_manager.load_balancer.health_monitor_show,
hm_id, attrs, clear_tags=parsed_args.no_tag)
body = {'healthmonitor': attrs}
self.app.client_manager.load_balancer.health_monitor_set(
hm_id, json=body)
if parsed_args.wait:
v2_utils.wait_for_active(
status_f=(self.app.client_manager.load_balancer.
health_monitor_show),
res_id=hm_id
)
class UnsetHealthMonitor(command.Command):
"""Clear health monitor settings"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'health_monitor',
metavar='<health_monitor>',
help="Health monitor to update (name or ID)."
)
parser.add_argument(
'--domain-name',
action='store_true',
help="Clear the health monitor domain name."
)
parser.add_argument(
'--expected-codes',
action='store_true',
help="Reset the health monitor expected codes to the API default."
)
parser.add_argument(
'--http-method',
action='store_true',
help="Reset the health monitor HTTP method to the API default."
)
parser.add_argument(
'--http-version',
action='store_true',
help="Reset the health monitor HTTP version to the API default."
)
parser.add_argument(
'--max-retries-down',
action='store_true',
help="Reset the health monitor max retries down to the API "
"default."
)
parser.add_argument(
'--name',
action='store_true',
help="Clear the health monitor name."
)
parser.add_argument(
'--url-path',
action='store_true',
help="Clear the health monitor URL path."
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
_tag.add_tag_option_to_parser_for_unset(parser, 'health monitor')
return parser
def take_action(self, parsed_args):
unset_args = v2_utils.get_unsets(parsed_args)
if not unset_args and not parsed_args.all_tag:
return
hm_id = v2_utils.get_resource_id(
self.app.client_manager.load_balancer.health_monitor_list,
'healthmonitors', parsed_args.health_monitor)
v2_utils.set_tags_for_unset(
self.app.client_manager.load_balancer.health_monitor_show,
hm_id, unset_args, clear_tags=parsed_args.all_tag)
body = {'healthmonitor': unset_args}
self.app.client_manager.load_balancer.health_monitor_set(
hm_id, json=body)
if parsed_args.wait:
v2_utils.wait_for_active(
status_f=(self.app.client_manager.load_balancer.
health_monitor_show),
res_id=hm_id
) | 0.52829 | 0.116261 |
from __future__ import absolute_import
import errno
import io
import os
import re
import requests
PNG_TYPE = "image/png"
JPEG_TYPE = "image/jpeg"
FILE_TYPES = {
'jpg': JPEG_TYPE,
'jpeg': JPEG_TYPE,
'png': PNG_TYPE,
}
def _ensure_dir(dirname):
try:
os.mkdir(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Reporter(object):
"""Simplistic output to a stream with verbosity support."""
def __init__(self, stream, verbosity):
"""Init."""
self._stream = stream
self.level = verbosity
def __call__(self, format_string, level=1, **kwargs):
"""Call."""
if self.level >= level:
self._stream.write(format_string.format(**kwargs) + "\n")
self._stream.flush()
def show_nth(self, n, step=(1 << 10)):
"""``True`` if item in sequence should be reported based on level."""
if not self.level:
return False
factor = step >> (self.level << 1)
if not factor:
return True
return not n % factor
def create_collection(reporter,
client,
auth_client,
collection_name,
collection_description,
data_file,
image_folders,
xslt_file,
columns_file,
zegs,
dynamic_custom_options=None,
image_column='id',
path_replace=None):
"""Create a new collection."""
collection = client.create_collection(
collection_name,
collection_description,
zegs)
reporter("Created collection " + collection['dataset_id'], level=0)
with open(data_file, 'rb') as f:
client.upload_data(collection['dataset_id'], data_file, f)
reporter(
"Data file {filename} uploaded",
level=0,
filename=data_file
)
imageset_ids = dict()
if zegs:
# create an imageset for each folder
for directory in image_folders:
directory_name = get_path_directory_name(directory)
imageset_ids[directory_name] = api_upload_folder(
reporter,
client,
auth_client,
directory,
directory_name,
zegs
)
# perform this substitution on the .tsv above if the
# image filenames are specified in there instead
with open(xslt_file, 'rb') as f:
text = f.read()
if path_replace is not None:
text = update_paths(
client.project,
imageset_ids,
text,
client.api_url,
image_folders,
)
bio = io.BytesIO(text)
with open('test.xslt', 'wb') as wf:
wf.write(text)
if dynamic_custom_options: collection['dynamic_custom_options'] = dynamic_custom_options
collection['related_imageset_ids'] = [value for key, value in imageset_ids.items()]
client.update_collection(collection['id'], collection)
client.upload_zegx(collection['id'], bio)
reporter("Created zegx template", level=0)
else:
source={"deepzoom": {"midlevel": 10, "optimize": 'true', "overlap": 1, "quality": 95}}
info = {"source": source}
client.update_imageset(collection['dz_imageset_id'], info=info)
imageset_ids[get_path_directory_name(image_folders[0])] = api_upload_folder(
reporter,
client,
auth_client,
image_folders[0],
get_path_directory_name(image_folders[0]),
zegs,
existing_imageset_id=collection['imageset_id']
)
join_ds = client.create_join("Join for " + collection['name'],
imageset_ids[get_path_directory_name(image_folders[0])],
collection['dataset_id'],
join_column=image_column)
collection['join_dataset_id'] = join_ds['id']
reporter("Joined images and data", level=0)
client.update_collection(collection['id'], collection)
# upload a json with the column schema
if columns_file is not None:
with open(columns_file, 'rb') as f:
client.set_columns(f.read(), collection['dataset_id'])
def api_upload_folder(reporter, client, auth_client, image_folder, imageset_name, zegs, existing_imageset_id=None):
if existing_imageset_id:
imageset_id = existing_imageset_id
else:
reporter("Creating imageset", level=0)
imageset = client.create_imageset(imageset_name)
imageset_id = imageset['id']
reporter("Created imageset {name}", level=0, name=imageset_name)
"""Upload all images within a folder to an imageset."""
for filename in os.listdir(image_folder):
file_path = os.path.join(image_folder, filename)
# the components being the file name and the extension
filename_components = os.path.basename(file_path).rsplit('.', 1)
if len(filename_components) > 1 and \
filename_components[1].lower() in FILE_TYPES.keys():
with open(file_path, 'rb') as f:
i = 3
while i > 0:
try:
client.upload_image(
imageset_id,
filename_components[0],
f,
FILE_TYPES[filename_components[1].lower()]
)
reporter(
"Imageset: {id}, uploaded {filename}",
level=0,
id=imageset_id,
filename=filename_components[0]
)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
reporter("Requesting new token...", level=0)
client.update_token(auth_client.get_user_token())
i -= 1
reporter(str(e))
continue
except requests.exceptions.RequestException as e:
reporter(
"Imageset: {id}, upload failed for {filename}\n{error}",
level=0,
id=imageset_id,
filename=filename_components[0],
error=e,
)
break
return imageset_id
def update_paths(project, imageset_ids, text, api_url, image_folders):
"""Update the paths for all images in the XSLT.
The URL needs to instead point to the imageset.
"""
def foldername_replace(matchobj):
return (api_url + "v0/project/{}/imagesets/{}/images/name:{}/data\""
).format(
project,
imageset_ids[matchobj.group(1).decode("utf-8")],
matchobj.group(2).decode("utf-8")
).encode()
directory_names = [get_path_directory_name(folder) for folder in image_folders]
pat = (r"({})/(.*?)(\.)?({})?\"").format(
r'|'.join(directory_names),
r'|'.join(FILE_TYPES.keys())
).encode()
updatedfile = re.sub(pat, foldername_replace, text)
with open("test.xslt", 'wb') as testfile:
testfile.write(updatedfile)
return updatedfile
def get_path_directory_name(path):
"""Get the directory name for a path."""
return os.path.split(path)[-1] | zegami_zeg/run.py | from __future__ import absolute_import
import errno
import io
import os
import re
import requests
PNG_TYPE = "image/png"
JPEG_TYPE = "image/jpeg"
FILE_TYPES = {
'jpg': JPEG_TYPE,
'jpeg': JPEG_TYPE,
'png': PNG_TYPE,
}
def _ensure_dir(dirname):
try:
os.mkdir(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Reporter(object):
"""Simplistic output to a stream with verbosity support."""
def __init__(self, stream, verbosity):
"""Init."""
self._stream = stream
self.level = verbosity
def __call__(self, format_string, level=1, **kwargs):
"""Call."""
if self.level >= level:
self._stream.write(format_string.format(**kwargs) + "\n")
self._stream.flush()
def show_nth(self, n, step=(1 << 10)):
"""``True`` if item in sequence should be reported based on level."""
if not self.level:
return False
factor = step >> (self.level << 1)
if not factor:
return True
return not n % factor
def create_collection(reporter,
client,
auth_client,
collection_name,
collection_description,
data_file,
image_folders,
xslt_file,
columns_file,
zegs,
dynamic_custom_options=None,
image_column='id',
path_replace=None):
"""Create a new collection."""
collection = client.create_collection(
collection_name,
collection_description,
zegs)
reporter("Created collection " + collection['dataset_id'], level=0)
with open(data_file, 'rb') as f:
client.upload_data(collection['dataset_id'], data_file, f)
reporter(
"Data file {filename} uploaded",
level=0,
filename=data_file
)
imageset_ids = dict()
if zegs:
# create an imageset for each folder
for directory in image_folders:
directory_name = get_path_directory_name(directory)
imageset_ids[directory_name] = api_upload_folder(
reporter,
client,
auth_client,
directory,
directory_name,
zegs
)
# perform this substitution on the .tsv above if the
# image filenames are specified in there instead
with open(xslt_file, 'rb') as f:
text = f.read()
if path_replace is not None:
text = update_paths(
client.project,
imageset_ids,
text,
client.api_url,
image_folders,
)
bio = io.BytesIO(text)
with open('test.xslt', 'wb') as wf:
wf.write(text)
if dynamic_custom_options: collection['dynamic_custom_options'] = dynamic_custom_options
collection['related_imageset_ids'] = [value for key, value in imageset_ids.items()]
client.update_collection(collection['id'], collection)
client.upload_zegx(collection['id'], bio)
reporter("Created zegx template", level=0)
else:
source={"deepzoom": {"midlevel": 10, "optimize": 'true', "overlap": 1, "quality": 95}}
info = {"source": source}
client.update_imageset(collection['dz_imageset_id'], info=info)
imageset_ids[get_path_directory_name(image_folders[0])] = api_upload_folder(
reporter,
client,
auth_client,
image_folders[0],
get_path_directory_name(image_folders[0]),
zegs,
existing_imageset_id=collection['imageset_id']
)
join_ds = client.create_join("Join for " + collection['name'],
imageset_ids[get_path_directory_name(image_folders[0])],
collection['dataset_id'],
join_column=image_column)
collection['join_dataset_id'] = join_ds['id']
reporter("Joined images and data", level=0)
client.update_collection(collection['id'], collection)
# upload a json with the column schema
if columns_file is not None:
with open(columns_file, 'rb') as f:
client.set_columns(f.read(), collection['dataset_id'])
def api_upload_folder(reporter, client, auth_client, image_folder, imageset_name, zegs, existing_imageset_id=None):
if existing_imageset_id:
imageset_id = existing_imageset_id
else:
reporter("Creating imageset", level=0)
imageset = client.create_imageset(imageset_name)
imageset_id = imageset['id']
reporter("Created imageset {name}", level=0, name=imageset_name)
"""Upload all images within a folder to an imageset."""
for filename in os.listdir(image_folder):
file_path = os.path.join(image_folder, filename)
# the components being the file name and the extension
filename_components = os.path.basename(file_path).rsplit('.', 1)
if len(filename_components) > 1 and \
filename_components[1].lower() in FILE_TYPES.keys():
with open(file_path, 'rb') as f:
i = 3
while i > 0:
try:
client.upload_image(
imageset_id,
filename_components[0],
f,
FILE_TYPES[filename_components[1].lower()]
)
reporter(
"Imageset: {id}, uploaded {filename}",
level=0,
id=imageset_id,
filename=filename_components[0]
)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
reporter("Requesting new token...", level=0)
client.update_token(auth_client.get_user_token())
i -= 1
reporter(str(e))
continue
except requests.exceptions.RequestException as e:
reporter(
"Imageset: {id}, upload failed for {filename}\n{error}",
level=0,
id=imageset_id,
filename=filename_components[0],
error=e,
)
break
return imageset_id
def update_paths(project, imageset_ids, text, api_url, image_folders):
"""Update the paths for all images in the XSLT.
The URL needs to instead point to the imageset.
"""
def foldername_replace(matchobj):
return (api_url + "v0/project/{}/imagesets/{}/images/name:{}/data\""
).format(
project,
imageset_ids[matchobj.group(1).decode("utf-8")],
matchobj.group(2).decode("utf-8")
).encode()
directory_names = [get_path_directory_name(folder) for folder in image_folders]
pat = (r"({})/(.*?)(\.)?({})?\"").format(
r'|'.join(directory_names),
r'|'.join(FILE_TYPES.keys())
).encode()
updatedfile = re.sub(pat, foldername_replace, text)
with open("test.xslt", 'wb') as testfile:
testfile.write(updatedfile)
return updatedfile
def get_path_directory_name(path):
"""Get the directory name for a path."""
return os.path.split(path)[-1] | 0.402275 | 0.139104 |
import numpy as np
import matplotlib.pyplot as plt
import data_load
import gsw
import oceans as oc
import Internal_wave_properties_REV as iwp
import pandas as pd
# Testdata load
def internal_wave_properties(save_only=True):
ladcp, ctd, bathy = data_load.load_data()
rho_neutral = np.genfromtxt('neutral_rho.csv', delimiter=',')
strain = np.genfromtxt('strain.csv', delimiter=',')
wl_max=350
wl_min=100
lambdaH, kh, omega, N2, dist, depths,\
U2, V2, p_ladcp, Uspec, Vspec,\
etaSpec, aspect, Ek, Ep, Etotal = iwp.frequencyEstimator(ctd, ladcp, bathy,\
rho_neutral,strain, wl_max=500, full_set=True)
if not save_only:
return lambdaH, kh, omega, N2, dist, depths,\
U2, V2, p_ladcp, Uspec, Vspec, etaSpec, aspect
def doppler_shifts(kh, ladcp, avg=1000, bin_size = 512):
"""
Doppler shift the internal frequency to test for lee waves
using the depth averaged floww
"""
U, V, p_ladcp = oc.loadLADCP(ladcp)
maxDepth = 4000
idx_ladcp = p_ladcp[:,-1] <= maxDepth
dz = int(np.nanmean(np.gradient(p_ladcp, axis=0)))
window = int(np.ceil(avg/dz))
Ubar = []
for u, v in zip(U.T, V.T):
mask = np.isfinite(u)
u = u[mask]
v = v[mask]
u = np.nanmean(u[-window:])
v = np.nanmean(v[-window:])
Ubar.append(np.sqrt(u**2 + v**2))
Ubar = np.vstack(Ubar)
dshift = []
for cast, ubar in zip(kh.T, Ubar):
dshift.append(cast*ubar)
dshift = np.vstack(dshift).T
return dshift
def horizontal_wave_vector_decomposition(Uprime, Vprime, axis=1, nfft=1024):
"""
Attempt to decompose horizontal wave vector
Following methods used in Polzin 2007 (internal waves in eddies or something like that)
"""
# U'* x b'
Uspec = np.fft(Uprime, axis=axis, nfft=nfft)
Vspec = np.fft(Vprime, axis=axis, nfft=nfft)
theta = []
for Uin, Vin in zip(Uspec, Vspec):
u_conj = np.conj(Uin[:,1])
v_prime = Vin[:,1]
u_prime = Uin[:,1]
v_conj = np.conj(Vin[:,1])
theta.append(np.arctan(2*np.real((u_conj*v_prime)/(u_conj*u_prime - v_conj*v_prime)))/2)
theta = np.vstack(theta).T
k = -kh*np.cos(theta)
k_wave = (2*np.pi/k)*1e-3
l = kh*np.sin(theta)
l_wave = (2*np.pi/l)*1e-3
return k, l
def lee_wave_tests(kh, omega, N2, ctd, ladcp, dist, depths, error_factor=2, plots=False):
"""
Testing whether or not the observations can be attributed to lee waves
"""
S, T, p_ctd, lat, lon = oc.loadCTD(ctd)
# k, l = horizontal_wave_vector_decomposition(Uspec, Vspec)
dshift = doppler_shifts(kh, ladcp)
# Test whether K*U is between N and f
f = (np.nanmean(gsw.f(lat)))
dshiftTest = np.full_like(kh, np.nan)
test_final = np.full_like(kh, np.nan)
for i, dump in enumerate(kh.T):
N2mean = np.nanmean(N2[:,i])
testA = np.abs(dshift[:,i]**2-f**2) <= f**2
testB = np.abs(dshift[:,i]**2-f**2) <= N2mean
test_final[:,i] = np.logical_and(testA, testB)
dshiftTest[:,i] = np.logical_and(dshift[:,i]**2 >= f**2, dshift[:,i]**2<= N2mean)
dshiftTest2 = dshiftTest == 0
mask = np.logical_not(test_final)
kh[mask] = np.nan
omega[mask] = np.nan
lambdaH[mask] = np.nan
k[mask] = np.nan
l[mask] = np.nan
# kh[dshiftTest2] = np.nan
# omega[dshiftTest2] = np.nan
# lambdaH[dshiftTest2] = np.nan
# k[dshiftTest2] = np.nan
# l[dshiftTest2] = np.nan
file2save = pd.DataFrame(kh)
file2save.index = np.squeeze(depths)
file2save.to_excel('Kh_masked.xlsx')
file2save = pd.DataFrame(omega)
file2save.index = np.squeeze(depths)
file2save.to_excel('omega_masked.xlsx')
file2save = pd.DataFrame(lambdaH)
file2save.index = np.squeeze(depths)
file2save.to_excel('lambda_masked.xlsx')
np.savetxt('kh_masked2.csv', kh)
np.savetxt('k_masked2.csv', k)
np.savetxt('l_masked2.csv', l)
np.savetxt('omega_masked.csv', omega)
# Test phase of velocity and isopycnal perturbations
stns = np.arange(1, 1+S.shape[1])
stns = np.expand_dims(stns, 1)
stns = np.repeat(stns.T, kh.shape[0], axis=0)
np.savetxt('dshift_mask.csv',dshiftTest)
if plots:
fig = plt.figure()
plt.contourf(dist, np.squeeze(p_ladcp), U, cmap='seismic')
plt.colorbar()
plt.pcolormesh(dist, np.squeeze(depths), dshiftTest, cmap='binary', alpha=.2)
plt.fill_between(dist, bathy, 4000, color = '#B4B4B4')
plt.ylim(0, 4000)
plt.gca().invert_yaxis()
plt.title("u' with bins with f < Kh*U < N")
plt.xlabel('Distance Along Transect (km)')
plt.ylabel('Pressure (dB)')
for i in range(U.shape[1]):
plt.annotate(str(i+1), (dist[i], 3600))
def energy_flux(Uspec, Vspec, Ek, Ep, ctd, depths, m=300):
"""
Use to calculate energy flux for a given internal wave following the equations
shown in Cusack et al. 2017
energy flux in the direction of the wave (direction of Cg)
This function will return the total energy flux and the x,y, and z components
of energy flux
"""
m = 2*np.pi/m
S, T, p_ctd, lat, lon = oc.loadCTD(ctd)
rho = oc.rhoFromCTD(S, T, p_ctd, lon, lat)
maxDepth = 4000
idx_ctd = p_ctd[:,-1] <= maxDepth
p_ctd = p_ctd[idx_ctd,:]
rho = rho[idx_ctd,:]
ctd_bins = oc.binData(rho, p_ctd[:,0], bin_size=512)
rho2 = np.vstack([np.nanmean(rho[binIn]) for binIn in ctd_bins])
# Energy Density = Kinetic Energy + Potential Energy
E = (Ek + Ep)*np.nanmean(rho2)
k, l = horizontal_wave_vector_decomposition(Uspec, Vspec)
def momentumFluxes(kh, m, N2,ladcp, z_ctd, bin_size=512, h0=1000):
"""
Calculating vertical and horizontal momentum fluxes of internal waves within
safe range
"""
dshift = doppler_shifts(kh, ladcp)
# Test whether K*U is between N and f
f = (np.nanmean(gsw.f(lat)))
dshiftTest = np.full_like(kh, np.nan)
for i, dump in enumerate(kh.T):
N2mean = np.nanmean(N2[:,i])
dshiftTest[:,i] = np.logical_and(dshift[:,i]**2 >= f**2, dshift[:,i]**2<= N2mean)
dshiftTest2 = dshiftTest == 0
kh[dshiftTest2] = np.nan
maxDepth = 4000
idx_ladcp = z[:,-1] <= maxDepth
idx_ctd = z_ctd[:,-1] <= maxDepth
z = z[idx_ladcp,:]
U = U[idx_ladcp,:]
V = V[idx_ladcp,:]
rho = rho_neutral[idx_ctd,:]
bins = oc.binData(U, z[:,0], bin_size)
Umean = np.vstack([np.nanmean(U[binIn,:], axis=0) for binIn in bins])
Vmean = np.vstack([np.nanmean(V[binIn,:], axis=0) for binIn in bins])
Umag = np.sqrt(Umean**2 + Vmean**2)
bins = oc.binData(rho, z_ctd[:,0], bin_size)
rho0 = np.vstack([np.nanmean(rho[binIn,:], axis=0) for binIn in bins])
tau = .5*kh*rho0*Umag*np.sqrt((N2/Umag**2)-kh**2)
np.savetxt('wave_momentum.csv', tau)
def perturbation_comparisons(ctd, ladcp, bin_size=512, plots=False):
"""
Comparison of velocity and density perturbations to see if they are in or
out of phase
"""
# Load ctd data
S, T, p_ctd, lat, lon = oc.loadCTD(ctd)
# Calculate sigma0 and filter it
rho = oc.rhoFromCTD(S, T, p_ctd, lon, lat)
rho_poly = []
for cast in rho.T:
fitrev = oc.vert_polyFit2(cast, p_ctd[:,0], 100, deg=2)
rho_poly.append(fitrev)
rho_poly = np.vstack(rho_poly).T
rho2 = rho - rho_poly
# load ladcp data and filter it
U, V, p_ladcp, uup, vup, udo, vdo = oc.loadLADCP(ladcp, full_set=True)
uup, vup = oc.velocityFiltered(uup, vup, p_ladcp, deg=1)
udo, vdo = oc.velocityFiltered(udo, vdo, p_ladcp, deg=1)
U2, V2 = oc.velocityFiltered(U, V, p_ladcp, deg=1)
# Apply 100m wide box filter to clean up perturbations
for i, dump in enumerate(rho2.T):
rho2[:,i] = oc.verticalBoxFilter1(rho2[:,i], p_ctd[:,0], box=100)
U2[:,i] = oc.verticalBoxFilter1(U2[:,i], p_ladcp[:,0], box=100)
V2[:,i] = oc.verticalBoxFilter1(V2[:,i], p_ladcp[:,0], box=100)
uup[:,i] = oc.verticalBoxFilter1(uup[:,i], p_ladcp[:,0], box=100)
vup[:,i] = oc.verticalBoxFilter1(vup[:,i], p_ladcp[:,0], box=100)
udo[:,i] = oc.verticalBoxFilter1(udo[:,i], p_ladcp[:,0], box=100)
vdo[:,i] = oc.verticalBoxFilter1(vdo[:,i], p_ladcp[:,0], box=100)
if plots:
count = 0
for rho0, u1, u2 in zip(rho2.T, uup.T, udo.T):
count += 1
fig = plt.figure(figsize=(3.88, 7.62))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
l1 = ax1.plot(rho0, p_ctd[:,0],'r', label='\rho')
l2 = ax2.plot(u1, p_ladcp,'b', label="up'")
l3 = ax2.plot(u2, p_ladcp,'k', label="down'")
ax2.set_xlabel("u'")
ax1.set_xlabel("rho'")
ax1.set_ylim((0, 4000))
plt.gca().invert_yaxis()
plt.title('station ' + str(count), y=1.1)
plt.savefig('figures/perturbation_comparisons/station ' + str(count) + '.png',\
bbox_inches='tight', dpi=400)
plt.close() | lee_wave_analysis1.py | import numpy as np
import matplotlib.pyplot as plt
import data_load
import gsw
import oceans as oc
import Internal_wave_properties_REV as iwp
import pandas as pd
# Testdata load
def internal_wave_properties(save_only=True):
ladcp, ctd, bathy = data_load.load_data()
rho_neutral = np.genfromtxt('neutral_rho.csv', delimiter=',')
strain = np.genfromtxt('strain.csv', delimiter=',')
wl_max=350
wl_min=100
lambdaH, kh, omega, N2, dist, depths,\
U2, V2, p_ladcp, Uspec, Vspec,\
etaSpec, aspect, Ek, Ep, Etotal = iwp.frequencyEstimator(ctd, ladcp, bathy,\
rho_neutral,strain, wl_max=500, full_set=True)
if not save_only:
return lambdaH, kh, omega, N2, dist, depths,\
U2, V2, p_ladcp, Uspec, Vspec, etaSpec, aspect
def doppler_shifts(kh, ladcp, avg=1000, bin_size = 512):
"""
Doppler shift the internal frequency to test for lee waves
using the depth averaged floww
"""
U, V, p_ladcp = oc.loadLADCP(ladcp)
maxDepth = 4000
idx_ladcp = p_ladcp[:,-1] <= maxDepth
dz = int(np.nanmean(np.gradient(p_ladcp, axis=0)))
window = int(np.ceil(avg/dz))
Ubar = []
for u, v in zip(U.T, V.T):
mask = np.isfinite(u)
u = u[mask]
v = v[mask]
u = np.nanmean(u[-window:])
v = np.nanmean(v[-window:])
Ubar.append(np.sqrt(u**2 + v**2))
Ubar = np.vstack(Ubar)
dshift = []
for cast, ubar in zip(kh.T, Ubar):
dshift.append(cast*ubar)
dshift = np.vstack(dshift).T
return dshift
def horizontal_wave_vector_decomposition(Uprime, Vprime, axis=1, nfft=1024):
"""
Attempt to decompose horizontal wave vector
Following methods used in Polzin 2007 (internal waves in eddies or something like that)
"""
# U'* x b'
Uspec = np.fft(Uprime, axis=axis, nfft=nfft)
Vspec = np.fft(Vprime, axis=axis, nfft=nfft)
theta = []
for Uin, Vin in zip(Uspec, Vspec):
u_conj = np.conj(Uin[:,1])
v_prime = Vin[:,1]
u_prime = Uin[:,1]
v_conj = np.conj(Vin[:,1])
theta.append(np.arctan(2*np.real((u_conj*v_prime)/(u_conj*u_prime - v_conj*v_prime)))/2)
theta = np.vstack(theta).T
k = -kh*np.cos(theta)
k_wave = (2*np.pi/k)*1e-3
l = kh*np.sin(theta)
l_wave = (2*np.pi/l)*1e-3
return k, l
def lee_wave_tests(kh, omega, N2, ctd, ladcp, dist, depths, error_factor=2, plots=False):
"""
Testing whether or not the observations can be attributed to lee waves
"""
S, T, p_ctd, lat, lon = oc.loadCTD(ctd)
# k, l = horizontal_wave_vector_decomposition(Uspec, Vspec)
dshift = doppler_shifts(kh, ladcp)
# Test whether K*U is between N and f
f = (np.nanmean(gsw.f(lat)))
dshiftTest = np.full_like(kh, np.nan)
test_final = np.full_like(kh, np.nan)
for i, dump in enumerate(kh.T):
N2mean = np.nanmean(N2[:,i])
testA = np.abs(dshift[:,i]**2-f**2) <= f**2
testB = np.abs(dshift[:,i]**2-f**2) <= N2mean
test_final[:,i] = np.logical_and(testA, testB)
dshiftTest[:,i] = np.logical_and(dshift[:,i]**2 >= f**2, dshift[:,i]**2<= N2mean)
dshiftTest2 = dshiftTest == 0
mask = np.logical_not(test_final)
kh[mask] = np.nan
omega[mask] = np.nan
lambdaH[mask] = np.nan
k[mask] = np.nan
l[mask] = np.nan
# kh[dshiftTest2] = np.nan
# omega[dshiftTest2] = np.nan
# lambdaH[dshiftTest2] = np.nan
# k[dshiftTest2] = np.nan
# l[dshiftTest2] = np.nan
file2save = pd.DataFrame(kh)
file2save.index = np.squeeze(depths)
file2save.to_excel('Kh_masked.xlsx')
file2save = pd.DataFrame(omega)
file2save.index = np.squeeze(depths)
file2save.to_excel('omega_masked.xlsx')
file2save = pd.DataFrame(lambdaH)
file2save.index = np.squeeze(depths)
file2save.to_excel('lambda_masked.xlsx')
np.savetxt('kh_masked2.csv', kh)
np.savetxt('k_masked2.csv', k)
np.savetxt('l_masked2.csv', l)
np.savetxt('omega_masked.csv', omega)
# Test phase of velocity and isopycnal perturbations
stns = np.arange(1, 1+S.shape[1])
stns = np.expand_dims(stns, 1)
stns = np.repeat(stns.T, kh.shape[0], axis=0)
np.savetxt('dshift_mask.csv',dshiftTest)
if plots:
fig = plt.figure()
plt.contourf(dist, np.squeeze(p_ladcp), U, cmap='seismic')
plt.colorbar()
plt.pcolormesh(dist, np.squeeze(depths), dshiftTest, cmap='binary', alpha=.2)
plt.fill_between(dist, bathy, 4000, color = '#B4B4B4')
plt.ylim(0, 4000)
plt.gca().invert_yaxis()
plt.title("u' with bins with f < Kh*U < N")
plt.xlabel('Distance Along Transect (km)')
plt.ylabel('Pressure (dB)')
for i in range(U.shape[1]):
plt.annotate(str(i+1), (dist[i], 3600))
def energy_flux(Uspec, Vspec, Ek, Ep, ctd, depths, m=300):
"""
Use to calculate energy flux for a given internal wave following the equations
shown in Cusack et al. 2017
energy flux in the direction of the wave (direction of Cg)
This function will return the total energy flux and the x,y, and z components
of energy flux
"""
m = 2*np.pi/m
S, T, p_ctd, lat, lon = oc.loadCTD(ctd)
rho = oc.rhoFromCTD(S, T, p_ctd, lon, lat)
maxDepth = 4000
idx_ctd = p_ctd[:,-1] <= maxDepth
p_ctd = p_ctd[idx_ctd,:]
rho = rho[idx_ctd,:]
ctd_bins = oc.binData(rho, p_ctd[:,0], bin_size=512)
rho2 = np.vstack([np.nanmean(rho[binIn]) for binIn in ctd_bins])
# Energy Density = Kinetic Energy + Potential Energy
E = (Ek + Ep)*np.nanmean(rho2)
k, l = horizontal_wave_vector_decomposition(Uspec, Vspec)
def momentumFluxes(kh, m, N2,ladcp, z_ctd, bin_size=512, h0=1000):
"""
Calculating vertical and horizontal momentum fluxes of internal waves within
safe range
"""
dshift = doppler_shifts(kh, ladcp)
# Test whether K*U is between N and f
f = (np.nanmean(gsw.f(lat)))
dshiftTest = np.full_like(kh, np.nan)
for i, dump in enumerate(kh.T):
N2mean = np.nanmean(N2[:,i])
dshiftTest[:,i] = np.logical_and(dshift[:,i]**2 >= f**2, dshift[:,i]**2<= N2mean)
dshiftTest2 = dshiftTest == 0
kh[dshiftTest2] = np.nan
maxDepth = 4000
idx_ladcp = z[:,-1] <= maxDepth
idx_ctd = z_ctd[:,-1] <= maxDepth
z = z[idx_ladcp,:]
U = U[idx_ladcp,:]
V = V[idx_ladcp,:]
rho = rho_neutral[idx_ctd,:]
bins = oc.binData(U, z[:,0], bin_size)
Umean = np.vstack([np.nanmean(U[binIn,:], axis=0) for binIn in bins])
Vmean = np.vstack([np.nanmean(V[binIn,:], axis=0) for binIn in bins])
Umag = np.sqrt(Umean**2 + Vmean**2)
bins = oc.binData(rho, z_ctd[:,0], bin_size)
rho0 = np.vstack([np.nanmean(rho[binIn,:], axis=0) for binIn in bins])
tau = .5*kh*rho0*Umag*np.sqrt((N2/Umag**2)-kh**2)
np.savetxt('wave_momentum.csv', tau)
def perturbation_comparisons(ctd, ladcp, bin_size=512, plots=False):
"""
Comparison of velocity and density perturbations to see if they are in or
out of phase
"""
# Load ctd data
S, T, p_ctd, lat, lon = oc.loadCTD(ctd)
# Calculate sigma0 and filter it
rho = oc.rhoFromCTD(S, T, p_ctd, lon, lat)
rho_poly = []
for cast in rho.T:
fitrev = oc.vert_polyFit2(cast, p_ctd[:,0], 100, deg=2)
rho_poly.append(fitrev)
rho_poly = np.vstack(rho_poly).T
rho2 = rho - rho_poly
# load ladcp data and filter it
U, V, p_ladcp, uup, vup, udo, vdo = oc.loadLADCP(ladcp, full_set=True)
uup, vup = oc.velocityFiltered(uup, vup, p_ladcp, deg=1)
udo, vdo = oc.velocityFiltered(udo, vdo, p_ladcp, deg=1)
U2, V2 = oc.velocityFiltered(U, V, p_ladcp, deg=1)
# Apply 100m wide box filter to clean up perturbations
for i, dump in enumerate(rho2.T):
rho2[:,i] = oc.verticalBoxFilter1(rho2[:,i], p_ctd[:,0], box=100)
U2[:,i] = oc.verticalBoxFilter1(U2[:,i], p_ladcp[:,0], box=100)
V2[:,i] = oc.verticalBoxFilter1(V2[:,i], p_ladcp[:,0], box=100)
uup[:,i] = oc.verticalBoxFilter1(uup[:,i], p_ladcp[:,0], box=100)
vup[:,i] = oc.verticalBoxFilter1(vup[:,i], p_ladcp[:,0], box=100)
udo[:,i] = oc.verticalBoxFilter1(udo[:,i], p_ladcp[:,0], box=100)
vdo[:,i] = oc.verticalBoxFilter1(vdo[:,i], p_ladcp[:,0], box=100)
if plots:
count = 0
for rho0, u1, u2 in zip(rho2.T, uup.T, udo.T):
count += 1
fig = plt.figure(figsize=(3.88, 7.62))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
l1 = ax1.plot(rho0, p_ctd[:,0],'r', label='\rho')
l2 = ax2.plot(u1, p_ladcp,'b', label="up'")
l3 = ax2.plot(u2, p_ladcp,'k', label="down'")
ax2.set_xlabel("u'")
ax1.set_xlabel("rho'")
ax1.set_ylim((0, 4000))
plt.gca().invert_yaxis()
plt.title('station ' + str(count), y=1.1)
plt.savefig('figures/perturbation_comparisons/station ' + str(count) + '.png',\
bbox_inches='tight', dpi=400)
plt.close() | 0.425367 | 0.465995 |
import mock
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import six
from senlin.common import consts
from senlin.common import exception
from senlin.engine import node as nodem
from senlin.objects import node as node_obj
from senlin.profiles import base as pb
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
PROFILE_ID = 'aa5f86b8-e52b-4f2b-828a-4c14c770938d'
CLUSTER_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de'
NODE_ID = '60efdaa1-06c2-4fcf-ae44-17a2d85ff3ea'
class TestNode(base.SenlinTestCase):
def setUp(self):
super(TestNode, self).setUp()
self.context = utils.dummy_context(project='node_test_project')
self.profile = utils.create_profile(self.context, PROFILE_ID)
self.cluster = utils.create_cluster(self.context, CLUSTER_ID,
PROFILE_ID)
def test_node_init(self):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, role='first_node')
self.assertIsNone(node.id)
self.assertEqual('node1', node.name)
self.assertIsNone(node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
self.assertEqual('', node.user)
self.assertEqual('', node.project)
self.assertEqual('', node.domain)
self.assertEqual(CLUSTER_ID, node.cluster_id)
self.assertEqual(-1, node.index)
self.assertEqual('first_node', node.role)
self.assertIsNone(node.init_at)
self.assertIsNone(node.created_at)
self.assertIsNone(node.updated_at)
self.assertEqual('INIT', node.status)
self.assertEqual('Initializing', node.status_reason)
self.assertEqual({}, node.data)
self.assertEqual({}, node.metadata)
self.assertEqual({}, node.rt)
def test_node_init_random_name(self):
node = nodem.Node(None, PROFILE_ID, None)
self.assertIsNotNone(node.name)
self.assertEqual(13, len(node.name))
def test_node_store_init(self):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context,
role='first_node', index=1)
self.assertIsNone(node.id)
node_id = node.store(self.context)
self.assertIsNotNone(node_id)
node_info = node_obj.Node.get(self.context, node_id)
self.assertIsNotNone(node_info)
self.assertEqual('node1', node_info.name)
self.assertIsNone(node_info.physical_id)
self.assertEqual(CLUSTER_ID, node_info.cluster_id)
self.assertEqual(PROFILE_ID, node_info.profile_id)
self.assertEqual(self.context.user_id, node_info.user)
self.assertEqual(self.context.project_id, node_info.project)
self.assertEqual(self.context.domain_id, node_info.domain)
self.assertEqual(1, node_info.index)
self.assertEqual('first_node', node.role)
self.assertIsNotNone(node_info.init_at)
self.assertIsNone(node_info.created_at)
self.assertIsNone(node_info.updated_at)
self.assertEqual('INIT', node_info.status)
self.assertEqual('Initializing', node_info.status_reason)
self.assertEqual({}, node_info.metadata)
self.assertEqual({}, node_info.data)
def test_node_store_update(self):
node = nodem.Node('node1', PROFILE_ID, "", user=self.context.user_id,
project=self.context.project_id)
node_id = node.store(self.context)
node.name = 'new_name'
new_node_id = node.store(self.context)
self.assertEqual(node_id, new_node_id)
def test_node_load(self):
ex = self.assertRaises(exception.ResourceNotFound,
nodem.Node.load,
self.context, 'non-existent', None)
self.assertEqual("The node 'non-existent' could not be found.",
six.text_type(ex))
x_node_id = 'ee96c490-2dee-40c8-8919-4c64b89e326c'
node = utils.create_node(self.context, x_node_id, PROFILE_ID,
CLUSTER_ID)
node_info = nodem.Node.load(self.context, x_node_id)
self.assertEqual(node.id, node_info.id)
self.assertEqual(node.name, node_info.name)
self.assertEqual(node.physical_id, node_info.physical_id)
self.assertEqual(node.cluster_id, node_info.cluster_id)
self.assertEqual(node.profile_id, node_info.profile_id)
self.assertEqual(node.user, node_info.user)
self.assertEqual(node.project, node_info.project)
self.assertEqual(node.domain, node_info.domain)
self.assertEqual(node.index, node_info.index)
self.assertEqual(node.role, node_info.role)
self.assertEqual(node.init_at, node_info.init_at)
self.assertEqual(node.created_at, node_info.created_at)
self.assertEqual(node.updated_at, node_info.updated_at)
self.assertEqual(node.status, node_info.status)
self.assertEqual(node.status_reason, node_info.status_reason)
self.assertEqual(node.metadata, node_info.metadata)
self.assertEqual(node.data, node_info.data)
self.assertEqual(self.profile.name, node_info.rt['profile'].name)
def test_node_load_diff_project(self):
x_node_id = 'c06840c5-f4e4-49ae-8143-9da5b4c73f38'
utils.create_node(self.context, x_node_id, PROFILE_ID, CLUSTER_ID)
new_ctx = utils.dummy_context(project='a-different-project')
ex = self.assertRaises(exception.ResourceNotFound,
nodem.Node.load,
new_ctx, x_node_id, None)
self.assertEqual("The node '%s' could not be found." % x_node_id,
six.text_type(ex))
res = nodem.Node.load(new_ctx, x_node_id, project_safe=False)
self.assertIsNotNone(res)
self.assertEqual(x_node_id, res.id)
@mock.patch.object(nodem.Node, '_from_object')
@mock.patch.object(node_obj.Node, 'get_all')
def test_node_load_all(self, mock_get, mock_init):
x_obj_1 = mock.Mock()
x_obj_2 = mock.Mock()
mock_get.return_value = [x_obj_1, x_obj_2]
x_node_1 = mock.Mock()
x_node_2 = mock.Mock()
mock_init.side_effect = [x_node_1, x_node_2]
result = nodem.Node.load_all(self.context)
self.assertEqual([x_node_1, x_node_2], [n for n in result])
mock_get.assert_called_once_with(self.context, cluster_id=None,
limit=None, marker=None,
sort=None, filters=None,
project_safe=True)
mock_init.assert_has_calls([
mock.call(self.context, x_obj_1),
mock.call(self.context, x_obj_2)])
def test_node_set_status(self):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.store(self.context)
self.assertEqual(nodem.consts.NS_INIT, node.status)
self.assertIsNotNone(node.init_at)
self.assertIsNone(node.created_at)
self.assertIsNone(node.updated_at)
# create
node.set_status(self.context, consts.NS_CREATING,
reason='Creation in progress')
self.assertEqual('CREATING', node.status)
self.assertEqual('Creation in progress', node.status_reason)
self.assertIsNone(node.created_at)
self.assertIsNone(node.updated_at)
node.set_status(self.context, consts.NS_ACTIVE,
reason='Creation succeeded')
self.assertEqual('ACTIVE', node.status)
self.assertEqual('Creation succeeded', node.status_reason)
self.assertIsNotNone(node.created_at)
self.assertIsNotNone(node.updated_at)
# update
node.set_status(self.context, consts.NS_UPDATING,
reason='Update in progress')
self.assertEqual('UPDATING', node.status)
self.assertEqual('Update in progress', node.status_reason)
self.assertIsNotNone(node.created_at)
node.set_status(self.context, consts.NS_ACTIVE,
reason='Update succeeded')
self.assertEqual('ACTIVE', node.status)
self.assertEqual('Update succeeded', node.status_reason)
self.assertIsNotNone(node.created_at)
self.assertIsNotNone(node.updated_at)
node.set_status(self.context, consts.NS_ACTIVE)
self.assertEqual('ACTIVE', node.status)
self.assertIsNotNone(node.created_at)
self.assertIsNotNone(node.updated_at)
# delete
node.set_status(self.context, consts.NS_DELETING,
reason='Deletion in progress')
self.assertEqual('DELETING', node.status)
self.assertEqual('Deletion in progress', node.status_reason)
self.assertIsNotNone(node.created_at)
@mock.patch.object(pb.Profile, 'get_details')
def test_node_get_details(self, mock_details):
node = nodem.Node('node1', CLUSTER_ID, None)
for physical_id in (None, ''):
node.physical_id = physical_id
self.assertEqual({}, node.get_details(self.context))
self.assertEqual(0, mock_details.call_count)
fake_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
node.physical_id = fake_id
mock_details.return_value = {'foo': 'bar'}
res = node.get_details(self.context)
mock_details.assert_called_once_with(self.context, node)
self.assertEqual({'foo': 'bar'}, res)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'create_object')
def test_node_create(self, mock_create, mock_status):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_create.return_value = physical_id
res = node.do_create(self.context)
self.assertTrue(res)
mock_status.assert_any_call(self.context, consts.NS_CREATING,
'Creation in progress')
mock_status.assert_any_call(self.context, consts.NS_ACTIVE,
'Creation succeeded',
physical_id=physical_id)
@mock.patch.object(nodem.Node, 'set_status')
def test_node_create_not_init(self, mock_status):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.status = 'NOT_INIT'
res, reason = node.do_create(self.context)
self.assertFalse(res)
self.assertEqual('Node must be in INIT status', reason)
mock_status.assert_any_call(self.context, consts.NS_ERROR,
'Node must be in INIT status')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'create_object')
def test_node_create_not_created(self, mock_create, mock_status):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
mock_create.side_effect = exception.EResourceCreation(
type='PROFILE', message='Boom', resource_id='test_id')
res, reason = node.do_create(self.context)
self.assertFalse(res)
self.assertEqual(str(reason), 'Failed in creating PROFILE: Boom.')
mock_status.assert_any_call(self.context, consts.NS_CREATING,
'Creation in progress')
mock_status.assert_any_call(self.context, consts.NS_ERROR,
'Failed in creating PROFILE: Boom.',
physical_id='test_id')
@mock.patch.object(node_obj.Node, 'delete')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'delete_object')
def test_node_delete(self, mock_delete, mock_status, mock_db_delete):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.physical_id = uuidutils.generate_uuid()
node.id = uuidutils.generate_uuid()
mock_db_delete.return_value = True
res = node.do_delete(self.context)
self.assertTrue(res)
mock_delete.assert_called_once_with(self.context, node)
mock_db_delete.assert_called_once_with(self.context, node.id)
mock_status.assert_called_once_with(self.context, consts.NS_DELETING,
'Deletion in progress')
@mock.patch.object(node_obj.Node, 'delete')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'delete_object')
def test_node_delete_no_physical_id(self, mock_delete, mock_status,
mock_db_delete):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.id = uuidutils.generate_uuid()
self.assertIsNone(node.physical_id)
mock_db_delete.return_value = True
res = node.do_delete(self.context)
self.assertTrue(res)
mock_status.assert_called_once_with(self.context, consts.NS_DELETING,
"Deletion in progress")
self.assertTrue(mock_delete.called)
mock_db_delete.assert_called_once_with(self.context, node.id)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'delete_object')
def test_node_delete_EResourceDeletion(self, mock_delete, mock_status):
ex = exception.EResourceDeletion(type='PROFILE', id='NODE_ID',
message='Too Bad')
mock_delete.side_effect = ex
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.physical_id = uuidutils.generate_uuid()
res = node.do_delete(self.context)
self.assertFalse(res)
mock_delete.assert_called_once_with(self.context, node)
mock_status.assert_has_calls([
mock.call(self.context, consts.NS_DELETING,
"Deletion in progress"),
mock.call(self.context, consts.NS_ERROR,
"Failed in deleting PROFILE 'NODE_ID': Too Bad.")
])
@mock.patch.object(node_obj.Node, 'update')
@mock.patch.object(pb.Profile, 'update_object')
def test_node_update_new_profile(self, mock_update, mock_db):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context,
physical_id=uuidutils.generate_uuid())
node.id = node.store(self.context)
new_id = uuidutils.generate_uuid()
utils.create_profile(self.context, new_id)
mock_update.return_value = True
res = node.do_update(self.context, {'new_profile_id': new_id})
self.assertTrue(res)
mock_update.assert_called_once_with(self.context, node, new_id)
self.assertEqual(new_id, node.profile_id)
self.assertEqual(new_id, node.rt['profile'].id)
mock_db.assert_has_calls([
mock.call(self.context, node.id,
{'status': consts.NS_UPDATING,
'status_reason': 'Update in progress'}),
mock.call(self.context, node.id,
{'status': consts.NS_ACTIVE,
'status_reason': 'Update succeeded',
'profile_id': new_id,
'updated_at': mock.ANY})
])
@mock.patch.object(pb.Profile, 'update_object')
@mock.patch.object(node_obj.Node, 'update')
def test_node_update_name(self, mock_db, mock_update):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.store(self.context)
physical_id = uuidutils.generate_uuid()
node.physical_id = physical_id
res = node.do_update(self.context, {'name': 'new_name',
'role': 'new_role',
'metadata': {'k': {'m': 'v'}},
'bogus': 'foo'})
self.assertTrue(res)
self.assertEqual('new_name', node.name)
mock_db.assert_has_calls([
mock.call(self.context, node.id,
{'status': 'UPDATING',
'status_reason': 'Update in progress'}),
mock.call(self.context, node.id,
{'status': 'ACTIVE',
'status_reason': 'Update succeeded',
'name': 'new_name',
'role': 'new_role',
'metadata': {'k': {'m': 'v'}},
'updated_at': mock.ANY})
])
self.assertEqual(0, mock_update.call_count)
def test_node_update_not_created(self):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
self.assertIsNone(node.physical_id)
new_profile_id = '71d8f4dd-1ef9-4308-b7ae-03298b04449e'
res = node.do_update(self.context, new_profile_id)
self.assertFalse(res)
@mock.patch.object(pb.Profile, 'update_object')
@mock.patch.object(node_obj.Node, 'update')
def test_node_update_EResourceUpdate(self, mock_db, mock_update):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.physical_id = uuidutils.generate_uuid()
node.id = uuidutils.generate_uuid()
ex = exception.EResourceUpdate(type='PROFILE', id='ID',
message='reason')
mock_update.side_effect = ex
new_id = uuidutils.generate_uuid()
utils.create_profile(self.context, new_id)
res = node.do_update(self.context, {'new_profile_id': new_id})
self.assertFalse(res)
self.assertNotEqual(new_id, node.profile_id)
mock_db.assert_has_calls([
mock.call(
self.context, node.id,
{"status": "UPDATING", "status_reason": "Update in progress"}
),
mock.call(
self.context, node.id,
{"status": "ERROR",
"status_reason": "Failed in updating PROFILE 'ID': reason.",
"updated_at": mock.ANY}
)
])
self.assertEqual(1, mock_update.call_count)
@mock.patch.object(node_obj.Node, 'migrate')
def test_node_join_same_cluster(self, mock_migrate):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.index = 1
res = node.do_join(self.context, CLUSTER_ID)
self.assertTrue(res)
self.assertEqual(1, node.index)
self.assertIsNone(node.updated_at)
self.assertFalse(mock_migrate.called)
@mock.patch.object(pb.Profile, 'join_cluster')
@mock.patch.object(node_obj.Node, 'migrate')
def test_node_join(self, mock_migrate, mock_join_cluster):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
mock_join_cluster.return_value = True
cluster_id = 'fb8bca7a-a82b-4442-a40f-92d3e3cfb0b9'
res = node.do_join(self.context, cluster_id)
self.assertTrue(res)
mock_migrate.assert_called_once_with(self.context, node.id,
cluster_id, mock.ANY)
mock_join_cluster.assert_called_once_with(self.context, node,
cluster_id)
self.assertEqual(cluster_id, node.cluster_id)
self.assertEqual(mock_migrate.return_value.index, node.index)
self.assertIsNotNone(node.updated_at)
@mock.patch.object(pb.Profile, 'join_cluster')
def test_node_join_fail_profile_call(self, mock_join):
node = nodem.Node('node1', PROFILE_ID, None, self.context)
node.id = uuidutils.generate_uuid()
mock_join.return_value = False
cluster_id = '<KEY>'
res = node.do_join(self.context, cluster_id)
self.assertFalse(res)
mock_join.assert_called_once_with(self.context, node, cluster_id)
self.assertEqual(-1, node.index)
@mock.patch.object(node_obj.Node, 'migrate')
def test_node_leave_no_cluster(self, mock_migrate):
node = nodem.Node('node1', PROFILE_ID, '', self.context)
self.assertTrue(node.do_leave(self.context))
self.assertFalse(mock_migrate.called)
self.assertEqual('', node.cluster_id)
self.assertIsNone(node.updated_at)
@mock.patch.object(pb.Profile, 'leave_cluster')
@mock.patch.object(node_obj.Node, 'migrate')
def test_node_leave(self, mock_migrate, mock_leave_cluster):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
mock_leave_cluster.return_value = True
res = node.do_leave(self.context)
self.assertTrue(res)
self.assertEqual('', node.cluster_id)
self.assertIsNotNone(node.updated_at)
self.assertEqual(-1, node.index)
mock_migrate.assert_called_once_with(self.context, node.id,
None, mock.ANY)
mock_leave_cluster.assert_called_once_with(self.context, node)
@mock.patch.object(pb.Profile, 'leave_cluster')
def test_node_leave_fail_update_server_metadata(self, mock_leave):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context,
index=1)
mock_leave.return_value = False
res = node.do_leave(self.context)
self.assertFalse(res)
self.assertNotEqual('', node.cluster_id)
self.assertIsNone(node.updated_at)
self.assertEqual(1, node.index)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'check_object')
def test_node_check(self, mock_check, mock_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.status = consts.NS_ACTIVE
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_check.return_value = True
res = node.do_check(self.context)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_status.assert_called_once_with(self.context, consts.NS_ACTIVE,
'Check: Node is ACTIVE.')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'check_object')
def test_node_check_warning(self, mock_check, mock_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.status = consts.NS_WARNING
node.status_reason = 'bad news'
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_check.return_value = True
res = node.do_check(self.context)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
msg = ("Check: Physical object is ACTIVE but the node status was "
"WARNING. %s") % node.status_reason
mock_status.assert_called_once_with(self.context, consts.NS_WARNING,
msg)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'check_object')
def test_node_check_not_active(self, mock_check, mock_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.status = consts.NS_WARNING
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_check.return_value = False
res = node.do_check(self.context)
self.assertTrue(res)
mock_status.assert_called_once_with(self.context, consts.NS_ERROR,
'Check: Node is not ACTIVE.')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'check_object')
def test_node_check_check_with_exc(self, mock_check, mock_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
err = exception.EResourceOperation(op='checking', type='server',
id=node.physical_id,
message='failed get')
mock_check.side_effect = err
res = node.do_check(self.context)
self.assertFalse(res)
mock_status.assert_called_once_with(
self.context,
consts.NS_ERROR,
"Failed in checking server '%s': failed get." % node.physical_id)
def test_node_check_no_physical_id(self):
node = nodem.Node('node1', PROFILE_ID, '')
res = node.do_check(self.context)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'check_object')
def test_node_check_no_server(self, mock_check, mock_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
err = exception.EServerNotFound(type='server',
id=node.physical_id,
message='No Server found')
mock_check.side_effect = err
res = node.do_check(self.context)
self.assertTrue(res)
mock_status.assert_called_once_with(
self.context, consts.NS_ERROR,
"Failed in found server '%s': No Server found."
% node.physical_id,
physical_id=None)
@mock.patch.object(pb.Profile, 'healthcheck_object')
def test_node_healthcheck(self, mock_healthcheck):
node = nodem.Node('node1', PROFILE_ID, '')
node.status = consts.NS_ACTIVE
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_healthcheck.return_value = True
res = node.do_healthcheck(self.context)
self.assertTrue(res)
mock_healthcheck.assert_called_once_with(self.context, node)
def test_node_healthcheck_no_physical_id(self):
node = nodem.Node('node1', PROFILE_ID, '')
res = node.do_healthcheck(self.context)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_new_object(self, mock_recover, mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'RECREATE'}
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
# action = node_action.NodeAction(node.id, 'ACTION', self.ctx)
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
action = mock.Mock()
action.inputs = {'operation': 'SWIM'}
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
self.assertEqual({'recovery': 'RECREATE'}, node.data)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_in_place(self, mock_recover, mock_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_recover.return_value = node.physical_id, True
action = mock.Mock(inputs={})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_recover.assert_called_once_with(self.context, node)
self.assertEqual('node1', node.name)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded')])
@mock.patch.object(nodem.Node, 'set_status')
def test_node_recover_check_active(self, mock_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
node.status = 'ACTIVE'
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.return_value = True
action = mock.Mock(inputs={'check': True})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_status.assert_called_once_with(self.context, consts.NS_ACTIVE,
reason='Recover: Node is ACTIVE.')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_check_error(self, mock_recover, mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'recreate'}
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.return_value = False
action = mock.Mock(inputs={'check': True})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_recreate(self, mock_recover, mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'RECREATE'}
node = nodem.Node('node1', PROFILE_ID, '', id='fake')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.return_value = False
action = mock.Mock(
outputs={}, inputs={'operation': 'RECREATE',
'check': True})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_check_exception(self, mock_recover, mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'RECREATE'}
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.side_effect = exception.EResourceOperation(
op='checking',
type='server',
id=node.physical_id,
reason='Boom!'
)
action = mock.Mock(inputs={'operation': 'boom',
'check': True})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_failed_recover(self, mock_recover, mock_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_recover.return_value = node.physical_id, None
action = mock.Mock(inputs={'operation': 'RECREATE'})
res = node.do_recover(self.context, action)
self.assertFalse(res)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ERROR,
reason='Recovery failed')])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_failed_recover_with_old_physical_id(self,
mock_recover,
mock_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
action = mock.Mock(
inputs={'operation': consts.RECOVER_RECREATE, 'check': True})
mock_recover.side_effect = exception.EResourceOperation(
op=consts.RECOVER_RECREATE,
type='server',
id=node.physical_id,
resource_id=node.physical_id,
reason='Recovery failed',
)
res = node.do_recover(self.context, action)
self.assertFalse(res)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
reason = ("Failed in RECREATE server 'd94d6333-82e6-4f87-b7ab-b786776d"
"f9d1': Internal error happened.")
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ERROR,
reason=six.text_type(reason),
physical_id=node.physical_id)])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_failed_recover_with_new_physical_id(self,
mock_recover,
mock_status):
def set_status(*args, **kwargs):
if args[1] == consts.NS_ERROR:
node.physical_id = new_id
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_status.side_effect = set_status
action = mock.Mock(inputs={'operation': consts.RECOVER_RECREATE,
'check': True})
mock_recover.side_effect = exception.EResourceOperation(
op=consts.RECOVER_RECREATE,
type='server',
id=node.physical_id,
resource_id=new_id,
reason='Recovery failed',
)
res = node.do_recover(self.context, action)
self.assertFalse(res)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
reason = ("Failed in RECREATE server 'd94d6333-82e6-4f87-b7ab-b786776d"
"f9d1': Internal error happened.")
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ERROR,
reason=six.text_type(reason),
physical_id=new_id)])
def test_node_recover_no_physical_id_reboot_op(self):
node = nodem.Node('node1', PROFILE_ID, None)
action = mock.Mock(inputs={'operation': 'REBOOT'})
res = node.do_recover(self.context, action)
self.assertFalse(res)
def test_node_recover_no_physical_id_rebuild_op(self):
node = nodem.Node('node1', PROFILE_ID, None)
action = mock.Mock(inputs={'operation': 'REBUILD'})
res = node.do_recover(self.context, action)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_no_physical_id_no_op(self, mock_recover,
mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'RECREATE'}
node = nodem.Node('node1', PROFILE_ID, '', id='fake')
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.return_value = False
action = mock.Mock(
outputs={}, inputs={})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_not_called()
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_no_physical_id_recreate_op(self, mock_recover,
mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'RECREATE'}
node = nodem.Node('node1', PROFILE_ID, '', id='fake')
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.return_value = False
action = mock.Mock(
outputs={}, inputs={'operation': 'RECREATE',
'check': True})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
def test_node_recover_operation_not_support(self, mock_set_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
action = mock.Mock(
outputs={}, inputs={'operation': 'foo'})
res = node.do_recover(self.context, action)
self.assertEqual({}, action.outputs)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
def test_node_recover_operation_not_string(self, mock_set_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
action = mock.Mock(
outputs={}, inputs={'operation': 'foo'})
res = node.do_recover(self.context, action)
self.assertEqual({}, action.outputs)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
def test_node_operation(self, mock_set_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
x_profile = mock.Mock()
x_profile.handle_dance = mock.Mock(return_value=True)
node.rt['profile'] = x_profile
inputs = {'operation': 'dance', 'params': {'style': 'tango'}}
res = node.do_operation(self.context, **inputs)
self.assertTrue(res)
mock_set_status.assert_has_calls([
mock.call(self.context, consts.NS_OPERATING,
reason="Operation 'dance' in progress"),
mock.call(self.context, consts.NS_ACTIVE,
reason="Operation 'dance' succeeded")
])
x_profile.handle_dance.assert_called_once_with(node, style='tango')
def test_node_operation_no_physical_id(self):
node = nodem.Node('node1', PROFILE_ID, None)
inputs = {'operation': 'dance', 'params': {'style': 'tango'}}
res = node.do_operation(self.context, **inputs)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
def test_node_operation_failed_op(self, mock_set_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
x_profile = mock.Mock()
err = exception.EResourceOperation(
op='dance', type='container', id='test_id', message='Boom')
x_profile.handle_dance = mock.Mock(side_effect=err)
node.rt['profile'] = x_profile
inputs = {'operation': 'dance', 'params': {'style': 'tango'}}
res = node.do_operation(self.context, **inputs)
self.assertFalse(res)
mock_set_status.assert_has_calls([
mock.call(self.context, consts.NS_OPERATING,
reason="Operation 'dance' in progress"),
mock.call(self.context, consts.NS_ERROR,
reason="Failed in dance container 'test_id': Boom.")
])
x_profile.handle_dance.assert_called_once_with(node, style='tango')
def _verify_execution_create_args(self, expected_name,
expected_inputs_dict, wfc):
wfc.execution_create.assert_called_once_with(mock.ANY, mock.ANY)
actual_call_args, call_kwargs = wfc.execution_create.call_args
# execution_create parameters are name and inputs
actual_call_name, actual_call_inputs = actual_call_args
# actual_call_inputs is string representation of a dictionary.
# convert actual_call_inputs to json, then dump it back as string
# sorted by key
final_actual_call_inputs = jsonutils.dumps(
jsonutils.loads(actual_call_inputs), sort_keys=True)
# dump expected_inputs_dict as string sorted by key
final_expected_inputs = jsonutils.dumps(
expected_inputs_dict, sort_keys=True)
# compare the sorted input strings along with the names
self.assertEqual(actual_call_name, expected_name)
self.assertEqual(final_actual_call_inputs, final_expected_inputs)
def test_run_workflow(self):
node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER')
node.physical_id = 'FAKE_NODE'
wfc = mock.Mock()
wfc.workflow_find.return_value = None
wfc.workflow_create = mock.Mock()
wfc.execution_create = mock.Mock()
x_profile = mock.Mock()
x_profile.workflow = mock.Mock(return_value=wfc)
node.rt['profile'] = x_profile
options = {
'workflow_name': 'foo',
'inputs': {
'definition': {
'bar': 'baz'
},
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
}
res = node.run_workflow(**options)
self.assertTrue(res)
x_profile.workflow.assert_called_once_with(node)
wfc.workflow_find.assert_called_once_with('foo')
wfc.workflow_create.assert_called_once_with(
{'bar': 'baz'}, scope='private')
final_dict = {
'cluster_id': 'FAKE_CLUSTER',
'node_id': 'FAKE_NODE',
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
self._verify_execution_create_args('foo', final_dict, wfc)
def test_run_workflow_no_physical_id(self):
node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER')
node.physical_id = None
res = node.run_workflow()
self.assertFalse(res)
def test_run_workflow_workflow_is_found(self):
node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER')
node.physical_id = 'FAKE_NODE'
wfc = mock.Mock()
wfc.workflow_find.return_value = mock.Mock(definition={'bar': 'baz'})
wfc.workflow_create = mock.Mock()
wfc.execution_create = mock.Mock()
x_profile = mock.Mock()
x_profile.workflow = mock.Mock(return_value=wfc)
node.rt['profile'] = x_profile
options = {
'workflow_name': 'foo',
'inputs': {
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
}
res = node.run_workflow(**options)
self.assertTrue(res)
x_profile.workflow.assert_called_once_with(node)
wfc.workflow_find.assert_called_once_with('foo')
self.assertEqual(0, wfc.workflow_create.call_count)
final_dict = {
'cluster_id': 'FAKE_CLUSTER',
'node_id': 'FAKE_NODE',
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
self._verify_execution_create_args('foo', final_dict, wfc)
def test_run_workflow_failed_creation(self):
node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER')
node.physical_id = 'FAKE_NODE'
wfc = mock.Mock()
wfc.workflow_find.return_value = None
err = exception.InternalError(message='boom')
wfc.workflow_create.side_effect = err
wfc.execution_create = mock.Mock()
x_profile = mock.Mock()
x_profile.workflow = mock.Mock(return_value=wfc)
node.rt['profile'] = x_profile
options = {
'workflow_name': 'foo',
'inputs': {
'definition': {'bar': 'baz'},
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
}
ex = self.assertRaises(exception.EResourceOperation,
node.run_workflow,
**options)
self.assertEqual("Failed in executing workflow 'foo': boom.",
six.text_type(ex))
x_profile.workflow.assert_called_once_with(node)
wfc.workflow_find.assert_called_once_with('foo')
wfc.workflow_create.assert_called_once_with(
{'bar': 'baz'}, scope='private')
self.assertEqual(0, wfc.execution_create.call_count)
def test_run_workflow_failed_execution(self):
node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER')
node.physical_id = 'FAKE_NODE'
wfc = mock.Mock()
wfc.workflow_find.return_value = None
wfc.workflow_create = mock.Mock()
err = exception.InternalError(message='boom')
wfc.execution_create.side_effect = err
x_profile = mock.Mock()
x_profile.workflow = mock.Mock(return_value=wfc)
node.rt['profile'] = x_profile
options = {
'workflow_name': 'foo',
'inputs': {
'definition': {'bar': 'baz'},
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
}
ex = self.assertRaises(exception.EResourceOperation,
node.run_workflow,
**options)
self.assertEqual("Failed in executing workflow 'foo': boom.",
six.text_type(ex))
x_profile.workflow.assert_called_once_with(node)
wfc.workflow_find.assert_called_once_with('foo')
wfc.workflow_create.assert_called_once_with(
{'bar': 'baz'}, scope='private')
final_dict = {
'cluster_id': 'FAKE_CLUSTER',
'node_id': 'FAKE_NODE',
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
self._verify_execution_create_args('foo', final_dict, wfc) | senlin/tests/unit/engine/test_node.py |
import mock
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import six
from senlin.common import consts
from senlin.common import exception
from senlin.engine import node as nodem
from senlin.objects import node as node_obj
from senlin.profiles import base as pb
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
PROFILE_ID = 'aa5f86b8-e52b-4f2b-828a-4c14c770938d'
CLUSTER_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de'
NODE_ID = '60efdaa1-06c2-4fcf-ae44-17a2d85ff3ea'
class TestNode(base.SenlinTestCase):
def setUp(self):
super(TestNode, self).setUp()
self.context = utils.dummy_context(project='node_test_project')
self.profile = utils.create_profile(self.context, PROFILE_ID)
self.cluster = utils.create_cluster(self.context, CLUSTER_ID,
PROFILE_ID)
def test_node_init(self):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, role='first_node')
self.assertIsNone(node.id)
self.assertEqual('node1', node.name)
self.assertIsNone(node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
self.assertEqual('', node.user)
self.assertEqual('', node.project)
self.assertEqual('', node.domain)
self.assertEqual(CLUSTER_ID, node.cluster_id)
self.assertEqual(-1, node.index)
self.assertEqual('first_node', node.role)
self.assertIsNone(node.init_at)
self.assertIsNone(node.created_at)
self.assertIsNone(node.updated_at)
self.assertEqual('INIT', node.status)
self.assertEqual('Initializing', node.status_reason)
self.assertEqual({}, node.data)
self.assertEqual({}, node.metadata)
self.assertEqual({}, node.rt)
def test_node_init_random_name(self):
node = nodem.Node(None, PROFILE_ID, None)
self.assertIsNotNone(node.name)
self.assertEqual(13, len(node.name))
def test_node_store_init(self):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context,
role='first_node', index=1)
self.assertIsNone(node.id)
node_id = node.store(self.context)
self.assertIsNotNone(node_id)
node_info = node_obj.Node.get(self.context, node_id)
self.assertIsNotNone(node_info)
self.assertEqual('node1', node_info.name)
self.assertIsNone(node_info.physical_id)
self.assertEqual(CLUSTER_ID, node_info.cluster_id)
self.assertEqual(PROFILE_ID, node_info.profile_id)
self.assertEqual(self.context.user_id, node_info.user)
self.assertEqual(self.context.project_id, node_info.project)
self.assertEqual(self.context.domain_id, node_info.domain)
self.assertEqual(1, node_info.index)
self.assertEqual('first_node', node.role)
self.assertIsNotNone(node_info.init_at)
self.assertIsNone(node_info.created_at)
self.assertIsNone(node_info.updated_at)
self.assertEqual('INIT', node_info.status)
self.assertEqual('Initializing', node_info.status_reason)
self.assertEqual({}, node_info.metadata)
self.assertEqual({}, node_info.data)
def test_node_store_update(self):
node = nodem.Node('node1', PROFILE_ID, "", user=self.context.user_id,
project=self.context.project_id)
node_id = node.store(self.context)
node.name = 'new_name'
new_node_id = node.store(self.context)
self.assertEqual(node_id, new_node_id)
def test_node_load(self):
ex = self.assertRaises(exception.ResourceNotFound,
nodem.Node.load,
self.context, 'non-existent', None)
self.assertEqual("The node 'non-existent' could not be found.",
six.text_type(ex))
x_node_id = 'ee96c490-2dee-40c8-8919-4c64b89e326c'
node = utils.create_node(self.context, x_node_id, PROFILE_ID,
CLUSTER_ID)
node_info = nodem.Node.load(self.context, x_node_id)
self.assertEqual(node.id, node_info.id)
self.assertEqual(node.name, node_info.name)
self.assertEqual(node.physical_id, node_info.physical_id)
self.assertEqual(node.cluster_id, node_info.cluster_id)
self.assertEqual(node.profile_id, node_info.profile_id)
self.assertEqual(node.user, node_info.user)
self.assertEqual(node.project, node_info.project)
self.assertEqual(node.domain, node_info.domain)
self.assertEqual(node.index, node_info.index)
self.assertEqual(node.role, node_info.role)
self.assertEqual(node.init_at, node_info.init_at)
self.assertEqual(node.created_at, node_info.created_at)
self.assertEqual(node.updated_at, node_info.updated_at)
self.assertEqual(node.status, node_info.status)
self.assertEqual(node.status_reason, node_info.status_reason)
self.assertEqual(node.metadata, node_info.metadata)
self.assertEqual(node.data, node_info.data)
self.assertEqual(self.profile.name, node_info.rt['profile'].name)
def test_node_load_diff_project(self):
x_node_id = 'c06840c5-f4e4-49ae-8143-9da5b4c73f38'
utils.create_node(self.context, x_node_id, PROFILE_ID, CLUSTER_ID)
new_ctx = utils.dummy_context(project='a-different-project')
ex = self.assertRaises(exception.ResourceNotFound,
nodem.Node.load,
new_ctx, x_node_id, None)
self.assertEqual("The node '%s' could not be found." % x_node_id,
six.text_type(ex))
res = nodem.Node.load(new_ctx, x_node_id, project_safe=False)
self.assertIsNotNone(res)
self.assertEqual(x_node_id, res.id)
@mock.patch.object(nodem.Node, '_from_object')
@mock.patch.object(node_obj.Node, 'get_all')
def test_node_load_all(self, mock_get, mock_init):
x_obj_1 = mock.Mock()
x_obj_2 = mock.Mock()
mock_get.return_value = [x_obj_1, x_obj_2]
x_node_1 = mock.Mock()
x_node_2 = mock.Mock()
mock_init.side_effect = [x_node_1, x_node_2]
result = nodem.Node.load_all(self.context)
self.assertEqual([x_node_1, x_node_2], [n for n in result])
mock_get.assert_called_once_with(self.context, cluster_id=None,
limit=None, marker=None,
sort=None, filters=None,
project_safe=True)
mock_init.assert_has_calls([
mock.call(self.context, x_obj_1),
mock.call(self.context, x_obj_2)])
def test_node_set_status(self):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.store(self.context)
self.assertEqual(nodem.consts.NS_INIT, node.status)
self.assertIsNotNone(node.init_at)
self.assertIsNone(node.created_at)
self.assertIsNone(node.updated_at)
# create
node.set_status(self.context, consts.NS_CREATING,
reason='Creation in progress')
self.assertEqual('CREATING', node.status)
self.assertEqual('Creation in progress', node.status_reason)
self.assertIsNone(node.created_at)
self.assertIsNone(node.updated_at)
node.set_status(self.context, consts.NS_ACTIVE,
reason='Creation succeeded')
self.assertEqual('ACTIVE', node.status)
self.assertEqual('Creation succeeded', node.status_reason)
self.assertIsNotNone(node.created_at)
self.assertIsNotNone(node.updated_at)
# update
node.set_status(self.context, consts.NS_UPDATING,
reason='Update in progress')
self.assertEqual('UPDATING', node.status)
self.assertEqual('Update in progress', node.status_reason)
self.assertIsNotNone(node.created_at)
node.set_status(self.context, consts.NS_ACTIVE,
reason='Update succeeded')
self.assertEqual('ACTIVE', node.status)
self.assertEqual('Update succeeded', node.status_reason)
self.assertIsNotNone(node.created_at)
self.assertIsNotNone(node.updated_at)
node.set_status(self.context, consts.NS_ACTIVE)
self.assertEqual('ACTIVE', node.status)
self.assertIsNotNone(node.created_at)
self.assertIsNotNone(node.updated_at)
# delete
node.set_status(self.context, consts.NS_DELETING,
reason='Deletion in progress')
self.assertEqual('DELETING', node.status)
self.assertEqual('Deletion in progress', node.status_reason)
self.assertIsNotNone(node.created_at)
@mock.patch.object(pb.Profile, 'get_details')
def test_node_get_details(self, mock_details):
node = nodem.Node('node1', CLUSTER_ID, None)
for physical_id in (None, ''):
node.physical_id = physical_id
self.assertEqual({}, node.get_details(self.context))
self.assertEqual(0, mock_details.call_count)
fake_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
node.physical_id = fake_id
mock_details.return_value = {'foo': 'bar'}
res = node.get_details(self.context)
mock_details.assert_called_once_with(self.context, node)
self.assertEqual({'foo': 'bar'}, res)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'create_object')
def test_node_create(self, mock_create, mock_status):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_create.return_value = physical_id
res = node.do_create(self.context)
self.assertTrue(res)
mock_status.assert_any_call(self.context, consts.NS_CREATING,
'Creation in progress')
mock_status.assert_any_call(self.context, consts.NS_ACTIVE,
'Creation succeeded',
physical_id=physical_id)
@mock.patch.object(nodem.Node, 'set_status')
def test_node_create_not_init(self, mock_status):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.status = 'NOT_INIT'
res, reason = node.do_create(self.context)
self.assertFalse(res)
self.assertEqual('Node must be in INIT status', reason)
mock_status.assert_any_call(self.context, consts.NS_ERROR,
'Node must be in INIT status')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'create_object')
def test_node_create_not_created(self, mock_create, mock_status):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
mock_create.side_effect = exception.EResourceCreation(
type='PROFILE', message='Boom', resource_id='test_id')
res, reason = node.do_create(self.context)
self.assertFalse(res)
self.assertEqual(str(reason), 'Failed in creating PROFILE: Boom.')
mock_status.assert_any_call(self.context, consts.NS_CREATING,
'Creation in progress')
mock_status.assert_any_call(self.context, consts.NS_ERROR,
'Failed in creating PROFILE: Boom.',
physical_id='test_id')
@mock.patch.object(node_obj.Node, 'delete')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'delete_object')
def test_node_delete(self, mock_delete, mock_status, mock_db_delete):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.physical_id = uuidutils.generate_uuid()
node.id = uuidutils.generate_uuid()
mock_db_delete.return_value = True
res = node.do_delete(self.context)
self.assertTrue(res)
mock_delete.assert_called_once_with(self.context, node)
mock_db_delete.assert_called_once_with(self.context, node.id)
mock_status.assert_called_once_with(self.context, consts.NS_DELETING,
'Deletion in progress')
@mock.patch.object(node_obj.Node, 'delete')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'delete_object')
def test_node_delete_no_physical_id(self, mock_delete, mock_status,
mock_db_delete):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.id = uuidutils.generate_uuid()
self.assertIsNone(node.physical_id)
mock_db_delete.return_value = True
res = node.do_delete(self.context)
self.assertTrue(res)
mock_status.assert_called_once_with(self.context, consts.NS_DELETING,
"Deletion in progress")
self.assertTrue(mock_delete.called)
mock_db_delete.assert_called_once_with(self.context, node.id)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'delete_object')
def test_node_delete_EResourceDeletion(self, mock_delete, mock_status):
ex = exception.EResourceDeletion(type='PROFILE', id='NODE_ID',
message='Too Bad')
mock_delete.side_effect = ex
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.physical_id = uuidutils.generate_uuid()
res = node.do_delete(self.context)
self.assertFalse(res)
mock_delete.assert_called_once_with(self.context, node)
mock_status.assert_has_calls([
mock.call(self.context, consts.NS_DELETING,
"Deletion in progress"),
mock.call(self.context, consts.NS_ERROR,
"Failed in deleting PROFILE 'NODE_ID': Too Bad.")
])
@mock.patch.object(node_obj.Node, 'update')
@mock.patch.object(pb.Profile, 'update_object')
def test_node_update_new_profile(self, mock_update, mock_db):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context,
physical_id=uuidutils.generate_uuid())
node.id = node.store(self.context)
new_id = uuidutils.generate_uuid()
utils.create_profile(self.context, new_id)
mock_update.return_value = True
res = node.do_update(self.context, {'new_profile_id': new_id})
self.assertTrue(res)
mock_update.assert_called_once_with(self.context, node, new_id)
self.assertEqual(new_id, node.profile_id)
self.assertEqual(new_id, node.rt['profile'].id)
mock_db.assert_has_calls([
mock.call(self.context, node.id,
{'status': consts.NS_UPDATING,
'status_reason': 'Update in progress'}),
mock.call(self.context, node.id,
{'status': consts.NS_ACTIVE,
'status_reason': 'Update succeeded',
'profile_id': new_id,
'updated_at': mock.ANY})
])
@mock.patch.object(pb.Profile, 'update_object')
@mock.patch.object(node_obj.Node, 'update')
def test_node_update_name(self, mock_db, mock_update):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.store(self.context)
physical_id = uuidutils.generate_uuid()
node.physical_id = physical_id
res = node.do_update(self.context, {'name': 'new_name',
'role': 'new_role',
'metadata': {'k': {'m': 'v'}},
'bogus': 'foo'})
self.assertTrue(res)
self.assertEqual('new_name', node.name)
mock_db.assert_has_calls([
mock.call(self.context, node.id,
{'status': 'UPDATING',
'status_reason': 'Update in progress'}),
mock.call(self.context, node.id,
{'status': 'ACTIVE',
'status_reason': 'Update succeeded',
'name': 'new_name',
'role': 'new_role',
'metadata': {'k': {'m': 'v'}},
'updated_at': mock.ANY})
])
self.assertEqual(0, mock_update.call_count)
def test_node_update_not_created(self):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
self.assertIsNone(node.physical_id)
new_profile_id = '71d8f4dd-1ef9-4308-b7ae-03298b04449e'
res = node.do_update(self.context, new_profile_id)
self.assertFalse(res)
@mock.patch.object(pb.Profile, 'update_object')
@mock.patch.object(node_obj.Node, 'update')
def test_node_update_EResourceUpdate(self, mock_db, mock_update):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.physical_id = uuidutils.generate_uuid()
node.id = uuidutils.generate_uuid()
ex = exception.EResourceUpdate(type='PROFILE', id='ID',
message='reason')
mock_update.side_effect = ex
new_id = uuidutils.generate_uuid()
utils.create_profile(self.context, new_id)
res = node.do_update(self.context, {'new_profile_id': new_id})
self.assertFalse(res)
self.assertNotEqual(new_id, node.profile_id)
mock_db.assert_has_calls([
mock.call(
self.context, node.id,
{"status": "UPDATING", "status_reason": "Update in progress"}
),
mock.call(
self.context, node.id,
{"status": "ERROR",
"status_reason": "Failed in updating PROFILE 'ID': reason.",
"updated_at": mock.ANY}
)
])
self.assertEqual(1, mock_update.call_count)
@mock.patch.object(node_obj.Node, 'migrate')
def test_node_join_same_cluster(self, mock_migrate):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
node.index = 1
res = node.do_join(self.context, CLUSTER_ID)
self.assertTrue(res)
self.assertEqual(1, node.index)
self.assertIsNone(node.updated_at)
self.assertFalse(mock_migrate.called)
@mock.patch.object(pb.Profile, 'join_cluster')
@mock.patch.object(node_obj.Node, 'migrate')
def test_node_join(self, mock_migrate, mock_join_cluster):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
mock_join_cluster.return_value = True
cluster_id = 'fb8bca7a-a82b-4442-a40f-92d3e3cfb0b9'
res = node.do_join(self.context, cluster_id)
self.assertTrue(res)
mock_migrate.assert_called_once_with(self.context, node.id,
cluster_id, mock.ANY)
mock_join_cluster.assert_called_once_with(self.context, node,
cluster_id)
self.assertEqual(cluster_id, node.cluster_id)
self.assertEqual(mock_migrate.return_value.index, node.index)
self.assertIsNotNone(node.updated_at)
@mock.patch.object(pb.Profile, 'join_cluster')
def test_node_join_fail_profile_call(self, mock_join):
node = nodem.Node('node1', PROFILE_ID, None, self.context)
node.id = uuidutils.generate_uuid()
mock_join.return_value = False
cluster_id = '<KEY>'
res = node.do_join(self.context, cluster_id)
self.assertFalse(res)
mock_join.assert_called_once_with(self.context, node, cluster_id)
self.assertEqual(-1, node.index)
@mock.patch.object(node_obj.Node, 'migrate')
def test_node_leave_no_cluster(self, mock_migrate):
node = nodem.Node('node1', PROFILE_ID, '', self.context)
self.assertTrue(node.do_leave(self.context))
self.assertFalse(mock_migrate.called)
self.assertEqual('', node.cluster_id)
self.assertIsNone(node.updated_at)
@mock.patch.object(pb.Profile, 'leave_cluster')
@mock.patch.object(node_obj.Node, 'migrate')
def test_node_leave(self, mock_migrate, mock_leave_cluster):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context)
mock_leave_cluster.return_value = True
res = node.do_leave(self.context)
self.assertTrue(res)
self.assertEqual('', node.cluster_id)
self.assertIsNotNone(node.updated_at)
self.assertEqual(-1, node.index)
mock_migrate.assert_called_once_with(self.context, node.id,
None, mock.ANY)
mock_leave_cluster.assert_called_once_with(self.context, node)
@mock.patch.object(pb.Profile, 'leave_cluster')
def test_node_leave_fail_update_server_metadata(self, mock_leave):
node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context,
index=1)
mock_leave.return_value = False
res = node.do_leave(self.context)
self.assertFalse(res)
self.assertNotEqual('', node.cluster_id)
self.assertIsNone(node.updated_at)
self.assertEqual(1, node.index)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'check_object')
def test_node_check(self, mock_check, mock_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.status = consts.NS_ACTIVE
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_check.return_value = True
res = node.do_check(self.context)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_status.assert_called_once_with(self.context, consts.NS_ACTIVE,
'Check: Node is ACTIVE.')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'check_object')
def test_node_check_warning(self, mock_check, mock_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.status = consts.NS_WARNING
node.status_reason = 'bad news'
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_check.return_value = True
res = node.do_check(self.context)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
msg = ("Check: Physical object is ACTIVE but the node status was "
"WARNING. %s") % node.status_reason
mock_status.assert_called_once_with(self.context, consts.NS_WARNING,
msg)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'check_object')
def test_node_check_not_active(self, mock_check, mock_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.status = consts.NS_WARNING
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_check.return_value = False
res = node.do_check(self.context)
self.assertTrue(res)
mock_status.assert_called_once_with(self.context, consts.NS_ERROR,
'Check: Node is not ACTIVE.')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'check_object')
def test_node_check_check_with_exc(self, mock_check, mock_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
err = exception.EResourceOperation(op='checking', type='server',
id=node.physical_id,
message='failed get')
mock_check.side_effect = err
res = node.do_check(self.context)
self.assertFalse(res)
mock_status.assert_called_once_with(
self.context,
consts.NS_ERROR,
"Failed in checking server '%s': failed get." % node.physical_id)
def test_node_check_no_physical_id(self):
node = nodem.Node('node1', PROFILE_ID, '')
res = node.do_check(self.context)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'check_object')
def test_node_check_no_server(self, mock_check, mock_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
err = exception.EServerNotFound(type='server',
id=node.physical_id,
message='No Server found')
mock_check.side_effect = err
res = node.do_check(self.context)
self.assertTrue(res)
mock_status.assert_called_once_with(
self.context, consts.NS_ERROR,
"Failed in found server '%s': No Server found."
% node.physical_id,
physical_id=None)
@mock.patch.object(pb.Profile, 'healthcheck_object')
def test_node_healthcheck(self, mock_healthcheck):
node = nodem.Node('node1', PROFILE_ID, '')
node.status = consts.NS_ACTIVE
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_healthcheck.return_value = True
res = node.do_healthcheck(self.context)
self.assertTrue(res)
mock_healthcheck.assert_called_once_with(self.context, node)
def test_node_healthcheck_no_physical_id(self):
node = nodem.Node('node1', PROFILE_ID, '')
res = node.do_healthcheck(self.context)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_new_object(self, mock_recover, mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'RECREATE'}
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
# action = node_action.NodeAction(node.id, 'ACTION', self.ctx)
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
action = mock.Mock()
action.inputs = {'operation': 'SWIM'}
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
self.assertEqual({'recovery': 'RECREATE'}, node.data)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_in_place(self, mock_recover, mock_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_recover.return_value = node.physical_id, True
action = mock.Mock(inputs={})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_recover.assert_called_once_with(self.context, node)
self.assertEqual('node1', node.name)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded')])
@mock.patch.object(nodem.Node, 'set_status')
def test_node_recover_check_active(self, mock_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
node.status = 'ACTIVE'
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.return_value = True
action = mock.Mock(inputs={'check': True})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_status.assert_called_once_with(self.context, consts.NS_ACTIVE,
reason='Recover: Node is ACTIVE.')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_check_error(self, mock_recover, mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'recreate'}
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.return_value = False
action = mock.Mock(inputs={'check': True})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_recreate(self, mock_recover, mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'RECREATE'}
node = nodem.Node('node1', PROFILE_ID, '', id='fake')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.return_value = False
action = mock.Mock(
outputs={}, inputs={'operation': 'RECREATE',
'check': True})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_check_exception(self, mock_recover, mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'RECREATE'}
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.side_effect = exception.EResourceOperation(
op='checking',
type='server',
id=node.physical_id,
reason='Boom!'
)
action = mock.Mock(inputs={'operation': 'boom',
'check': True})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_failed_recover(self, mock_recover, mock_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
mock_recover.return_value = node.physical_id, None
action = mock.Mock(inputs={'operation': 'RECREATE'})
res = node.do_recover(self.context, action)
self.assertFalse(res)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ERROR,
reason='Recovery failed')])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_failed_recover_with_old_physical_id(self,
mock_recover,
mock_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
action = mock.Mock(
inputs={'operation': consts.RECOVER_RECREATE, 'check': True})
mock_recover.side_effect = exception.EResourceOperation(
op=consts.RECOVER_RECREATE,
type='server',
id=node.physical_id,
resource_id=node.physical_id,
reason='Recovery failed',
)
res = node.do_recover(self.context, action)
self.assertFalse(res)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
reason = ("Failed in RECREATE server 'd94d6333-82e6-4f87-b7ab-b786776d"
"f9d1': Internal error happened.")
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ERROR,
reason=six.text_type(reason),
physical_id=node.physical_id)])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_failed_recover_with_new_physical_id(self,
mock_recover,
mock_status):
def set_status(*args, **kwargs):
if args[1] == consts.NS_ERROR:
node.physical_id = new_id
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_status.side_effect = set_status
action = mock.Mock(inputs={'operation': consts.RECOVER_RECREATE,
'check': True})
mock_recover.side_effect = exception.EResourceOperation(
op=consts.RECOVER_RECREATE,
type='server',
id=node.physical_id,
resource_id=new_id,
reason='Recovery failed',
)
res = node.do_recover(self.context, action)
self.assertFalse(res)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
reason = ("Failed in RECREATE server 'd94d6333-82e6-4f87-b7ab-b786776d"
"f9d1': Internal error happened.")
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ERROR,
reason=six.text_type(reason),
physical_id=new_id)])
def test_node_recover_no_physical_id_reboot_op(self):
node = nodem.Node('node1', PROFILE_ID, None)
action = mock.Mock(inputs={'operation': 'REBOOT'})
res = node.do_recover(self.context, action)
self.assertFalse(res)
def test_node_recover_no_physical_id_rebuild_op(self):
node = nodem.Node('node1', PROFILE_ID, None)
action = mock.Mock(inputs={'operation': 'REBUILD'})
res = node.do_recover(self.context, action)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_no_physical_id_no_op(self, mock_recover,
mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'RECREATE'}
node = nodem.Node('node1', PROFILE_ID, '', id='fake')
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.return_value = False
action = mock.Mock(
outputs={}, inputs={})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_not_called()
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(pb.Profile, 'recover_object')
def test_node_recover_no_physical_id_recreate_op(self, mock_recover,
mock_status):
def set_status(*args, **kwargs):
if args[1] == 'ACTIVE':
node.physical_id = new_id
node.data = {'recovery': 'RECREATE'}
node = nodem.Node('node1', PROFILE_ID, '', id='fake')
new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a'
mock_recover.return_value = new_id, True
mock_status.side_effect = set_status
mock_check = self.patchobject(pb.Profile, 'check_object')
mock_check.return_value = False
action = mock.Mock(
outputs={}, inputs={'operation': 'RECREATE',
'check': True})
res = node.do_recover(self.context, action)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
mock_recover.assert_called_once_with(
self.context, node, **action.inputs)
self.assertEqual('node1', node.name)
self.assertEqual(new_id, node.physical_id)
self.assertEqual(PROFILE_ID, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recovery in progress'),
mock.call(self.context, consts.NS_ACTIVE,
reason='Recovery succeeded',
physical_id=new_id,
data={'recovery': 'RECREATE'})])
@mock.patch.object(nodem.Node, 'set_status')
def test_node_recover_operation_not_support(self, mock_set_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
action = mock.Mock(
outputs={}, inputs={'operation': 'foo'})
res = node.do_recover(self.context, action)
self.assertEqual({}, action.outputs)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
def test_node_recover_operation_not_string(self, mock_set_status):
node = nodem.Node('node1', PROFILE_ID, None)
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
action = mock.Mock(
outputs={}, inputs={'operation': 'foo'})
res = node.do_recover(self.context, action)
self.assertEqual({}, action.outputs)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
def test_node_operation(self, mock_set_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
x_profile = mock.Mock()
x_profile.handle_dance = mock.Mock(return_value=True)
node.rt['profile'] = x_profile
inputs = {'operation': 'dance', 'params': {'style': 'tango'}}
res = node.do_operation(self.context, **inputs)
self.assertTrue(res)
mock_set_status.assert_has_calls([
mock.call(self.context, consts.NS_OPERATING,
reason="Operation 'dance' in progress"),
mock.call(self.context, consts.NS_ACTIVE,
reason="Operation 'dance' succeeded")
])
x_profile.handle_dance.assert_called_once_with(node, style='tango')
def test_node_operation_no_physical_id(self):
node = nodem.Node('node1', PROFILE_ID, None)
inputs = {'operation': 'dance', 'params': {'style': 'tango'}}
res = node.do_operation(self.context, **inputs)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
def test_node_operation_failed_op(self, mock_set_status):
node = nodem.Node('node1', PROFILE_ID, '')
node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1'
x_profile = mock.Mock()
err = exception.EResourceOperation(
op='dance', type='container', id='test_id', message='Boom')
x_profile.handle_dance = mock.Mock(side_effect=err)
node.rt['profile'] = x_profile
inputs = {'operation': 'dance', 'params': {'style': 'tango'}}
res = node.do_operation(self.context, **inputs)
self.assertFalse(res)
mock_set_status.assert_has_calls([
mock.call(self.context, consts.NS_OPERATING,
reason="Operation 'dance' in progress"),
mock.call(self.context, consts.NS_ERROR,
reason="Failed in dance container 'test_id': Boom.")
])
x_profile.handle_dance.assert_called_once_with(node, style='tango')
def _verify_execution_create_args(self, expected_name,
expected_inputs_dict, wfc):
wfc.execution_create.assert_called_once_with(mock.ANY, mock.ANY)
actual_call_args, call_kwargs = wfc.execution_create.call_args
# execution_create parameters are name and inputs
actual_call_name, actual_call_inputs = actual_call_args
# actual_call_inputs is string representation of a dictionary.
# convert actual_call_inputs to json, then dump it back as string
# sorted by key
final_actual_call_inputs = jsonutils.dumps(
jsonutils.loads(actual_call_inputs), sort_keys=True)
# dump expected_inputs_dict as string sorted by key
final_expected_inputs = jsonutils.dumps(
expected_inputs_dict, sort_keys=True)
# compare the sorted input strings along with the names
self.assertEqual(actual_call_name, expected_name)
self.assertEqual(final_actual_call_inputs, final_expected_inputs)
def test_run_workflow(self):
node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER')
node.physical_id = 'FAKE_NODE'
wfc = mock.Mock()
wfc.workflow_find.return_value = None
wfc.workflow_create = mock.Mock()
wfc.execution_create = mock.Mock()
x_profile = mock.Mock()
x_profile.workflow = mock.Mock(return_value=wfc)
node.rt['profile'] = x_profile
options = {
'workflow_name': 'foo',
'inputs': {
'definition': {
'bar': 'baz'
},
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
}
res = node.run_workflow(**options)
self.assertTrue(res)
x_profile.workflow.assert_called_once_with(node)
wfc.workflow_find.assert_called_once_with('foo')
wfc.workflow_create.assert_called_once_with(
{'bar': 'baz'}, scope='private')
final_dict = {
'cluster_id': 'FAKE_CLUSTER',
'node_id': 'FAKE_NODE',
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
self._verify_execution_create_args('foo', final_dict, wfc)
def test_run_workflow_no_physical_id(self):
node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER')
node.physical_id = None
res = node.run_workflow()
self.assertFalse(res)
def test_run_workflow_workflow_is_found(self):
node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER')
node.physical_id = 'FAKE_NODE'
wfc = mock.Mock()
wfc.workflow_find.return_value = mock.Mock(definition={'bar': 'baz'})
wfc.workflow_create = mock.Mock()
wfc.execution_create = mock.Mock()
x_profile = mock.Mock()
x_profile.workflow = mock.Mock(return_value=wfc)
node.rt['profile'] = x_profile
options = {
'workflow_name': 'foo',
'inputs': {
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
}
res = node.run_workflow(**options)
self.assertTrue(res)
x_profile.workflow.assert_called_once_with(node)
wfc.workflow_find.assert_called_once_with('foo')
self.assertEqual(0, wfc.workflow_create.call_count)
final_dict = {
'cluster_id': 'FAKE_CLUSTER',
'node_id': 'FAKE_NODE',
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
self._verify_execution_create_args('foo', final_dict, wfc)
def test_run_workflow_failed_creation(self):
node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER')
node.physical_id = 'FAKE_NODE'
wfc = mock.Mock()
wfc.workflow_find.return_value = None
err = exception.InternalError(message='boom')
wfc.workflow_create.side_effect = err
wfc.execution_create = mock.Mock()
x_profile = mock.Mock()
x_profile.workflow = mock.Mock(return_value=wfc)
node.rt['profile'] = x_profile
options = {
'workflow_name': 'foo',
'inputs': {
'definition': {'bar': 'baz'},
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
}
ex = self.assertRaises(exception.EResourceOperation,
node.run_workflow,
**options)
self.assertEqual("Failed in executing workflow 'foo': boom.",
six.text_type(ex))
x_profile.workflow.assert_called_once_with(node)
wfc.workflow_find.assert_called_once_with('foo')
wfc.workflow_create.assert_called_once_with(
{'bar': 'baz'}, scope='private')
self.assertEqual(0, wfc.execution_create.call_count)
def test_run_workflow_failed_execution(self):
node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER')
node.physical_id = 'FAKE_NODE'
wfc = mock.Mock()
wfc.workflow_find.return_value = None
wfc.workflow_create = mock.Mock()
err = exception.InternalError(message='boom')
wfc.execution_create.side_effect = err
x_profile = mock.Mock()
x_profile.workflow = mock.Mock(return_value=wfc)
node.rt['profile'] = x_profile
options = {
'workflow_name': 'foo',
'inputs': {
'definition': {'bar': 'baz'},
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
}
ex = self.assertRaises(exception.EResourceOperation,
node.run_workflow,
**options)
self.assertEqual("Failed in executing workflow 'foo': boom.",
six.text_type(ex))
x_profile.workflow.assert_called_once_with(node)
wfc.workflow_find.assert_called_once_with('foo')
wfc.workflow_create.assert_called_once_with(
{'bar': 'baz'}, scope='private')
final_dict = {
'cluster_id': 'FAKE_CLUSTER',
'node_id': 'FAKE_NODE',
'FAKE_KEY1': 'FAKE_VALUE1',
'FAKE_KEY2': 'FAKE_VALUE2',
}
self._verify_execution_create_args('foo', final_dict, wfc) | 0.453504 | 0.344251 |
try:
import sys
import os
import click
from tabulate import tabulate
from utilities_common.util_base import UtilHelper
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
VERSION = '2.0'
ERROR_PERMISSIONS = 1
ERROR_CHASSIS_LOAD = 2
ERROR_NOT_IMPLEMENTED = 3
ERROR_PDDF_NOT_SUPPORTED = 4
# Global platform-specific chassis class instance
platform_chassis = None
# Load the helper class
helper = UtilHelper()
# ==================== CLI commands and groups ====================
# This is our main entrypoint - the main 'pddf_fanutil' command
@click.group()
def cli():
"""pddf_fanutil - Command line utility for providing FAN information"""
global platform_chassis
if os.geteuid() != 0:
click.echo("Root privileges are required for this operation")
sys.exit(ERROR_PERMISSIONS)
if not helper.check_pddf_mode():
click.echo("PDDF mode should be supported and enabled for this platform for this operation")
sys.exit(ERROR_PDDF_NOT_SUPPORTED)
# Load platform-specific chassis 2.0 api class
platform_chassis = helper.load_platform_chassis()
if not platform_chassis:
sys.exit(ERROR_CHASSIS_LOAD)
# 'version' subcommand
@cli.command()
def version():
"""Display version info"""
click.echo("PDDF fanutil version {0}".format(VERSION))
# 'numfans' subcommand
@cli.command()
def numfans():
"""Display number of FANs installed on device"""
num_fans = platform_chassis.get_num_fans()
click.echo(num_fans)
# 'status' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="Index of FAN (1-based)")
def status(index):
"""Display FAN status"""
fan_list = []
if (index < 0):
fan_list = platform_chassis.get_all_fans()
default_index = 0
else:
fan_list = platform_chassis.get_fan(index-1)
default_index = index-1
header = ['FAN', 'Status']
status_table = []
for idx, fan in enumerate(fan_list, default_index):
fan_name = helper.try_get(fan.get_name, "Fan {}".format(idx+1))
status = 'NOT PRESENT'
if fan.get_presence():
oper_status = helper.try_get(fan.get_status, 'UNKNOWN')
if oper_status is True:
status = 'OK'
elif oper_status is False:
status = 'NOT OK'
else:
status = oper_status
status_table.append([fan_name, status])
if status_table:
click.echo(tabulate(status_table, header, tablefmt="simple"))
# 'direction' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="Index of FAN (1-based)")
def direction(index):
"""Display FAN airflow direction"""
fan_list = []
if (index < 0):
fan_list = platform_chassis.get_all_fans()
default_index = 0
else:
fan_list = platform_chassis.get_fan(index-1)
default_index = index-1
header = ['FAN', 'Direction']
dir_table = []
for idx, fan in enumerate(fan_list, default_index):
fan_name = helper.try_get(fan.get_name, "Fan {}".format(idx+1))
direction = helper.try_get(fan.get_direction, 'N/A')
dir_table.append([fan_name, direction.capitalize()])
if dir_table:
click.echo(tabulate(dir_table, header, tablefmt="simple"))
# 'speed' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="Index of FAN (1-based)")
def getspeed(index):
"""Display FAN speed in RPM"""
fan_list = []
if (index < 0):
fan_list = platform_chassis.get_all_fans()
default_index = 0
else:
fan_list = platform_chassis.get_fan(index-1)
default_index = index-1
header = ['FAN', 'SPEED (RPM)']
speed_table = []
for idx, fan in enumerate(fan_list, default_index):
fan_name = helper.try_get(fan.get_name, "Fan {}".format(idx+1))
rpm = helper.try_get(fan.get_speed_rpm, 'N/A')
speed_table.append([fan_name, rpm])
if speed_table:
click.echo(tabulate(speed_table, header, tablefmt="simple"))
# 'setspeed' subcommand
@cli.command()
@click.argument('speed', type=int)
def setspeed(speed):
"""Set FAN speed in percentage"""
if speed is None:
click.echo("speed value is required")
raise click.Abort()
fan_list = platform_chassis.get_all_fans()
for idx, fan in enumerate(fan_list):
try:
status = fan.set_speed(speed)
except NotImplementedError:
click.echo("Set speed API not implemented")
sys.exit(0)
if not status:
click.echo("Failed")
sys.exit(1)
click.echo("Successful")
@cli.group()
def debug():
"""pddf_fanutil debug commands"""
pass
@debug.command()
def dump_sysfs():
"""Dump all Fan related SysFS paths"""
fan_list = platform_chassis.get_all_fans()
for idx, fan in enumerate(fan_list):
status = fan.dump_sysfs()
if status:
for i in status:
click.echo(i)
if __name__ == '__main__':
cli() | pddf_fanutil/main.py |
try:
import sys
import os
import click
from tabulate import tabulate
from utilities_common.util_base import UtilHelper
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
VERSION = '2.0'
ERROR_PERMISSIONS = 1
ERROR_CHASSIS_LOAD = 2
ERROR_NOT_IMPLEMENTED = 3
ERROR_PDDF_NOT_SUPPORTED = 4
# Global platform-specific chassis class instance
platform_chassis = None
# Load the helper class
helper = UtilHelper()
# ==================== CLI commands and groups ====================
# This is our main entrypoint - the main 'pddf_fanutil' command
@click.group()
def cli():
"""pddf_fanutil - Command line utility for providing FAN information"""
global platform_chassis
if os.geteuid() != 0:
click.echo("Root privileges are required for this operation")
sys.exit(ERROR_PERMISSIONS)
if not helper.check_pddf_mode():
click.echo("PDDF mode should be supported and enabled for this platform for this operation")
sys.exit(ERROR_PDDF_NOT_SUPPORTED)
# Load platform-specific chassis 2.0 api class
platform_chassis = helper.load_platform_chassis()
if not platform_chassis:
sys.exit(ERROR_CHASSIS_LOAD)
# 'version' subcommand
@cli.command()
def version():
"""Display version info"""
click.echo("PDDF fanutil version {0}".format(VERSION))
# 'numfans' subcommand
@cli.command()
def numfans():
"""Display number of FANs installed on device"""
num_fans = platform_chassis.get_num_fans()
click.echo(num_fans)
# 'status' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="Index of FAN (1-based)")
def status(index):
"""Display FAN status"""
fan_list = []
if (index < 0):
fan_list = platform_chassis.get_all_fans()
default_index = 0
else:
fan_list = platform_chassis.get_fan(index-1)
default_index = index-1
header = ['FAN', 'Status']
status_table = []
for idx, fan in enumerate(fan_list, default_index):
fan_name = helper.try_get(fan.get_name, "Fan {}".format(idx+1))
status = 'NOT PRESENT'
if fan.get_presence():
oper_status = helper.try_get(fan.get_status, 'UNKNOWN')
if oper_status is True:
status = 'OK'
elif oper_status is False:
status = 'NOT OK'
else:
status = oper_status
status_table.append([fan_name, status])
if status_table:
click.echo(tabulate(status_table, header, tablefmt="simple"))
# 'direction' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="Index of FAN (1-based)")
def direction(index):
"""Display FAN airflow direction"""
fan_list = []
if (index < 0):
fan_list = platform_chassis.get_all_fans()
default_index = 0
else:
fan_list = platform_chassis.get_fan(index-1)
default_index = index-1
header = ['FAN', 'Direction']
dir_table = []
for idx, fan in enumerate(fan_list, default_index):
fan_name = helper.try_get(fan.get_name, "Fan {}".format(idx+1))
direction = helper.try_get(fan.get_direction, 'N/A')
dir_table.append([fan_name, direction.capitalize()])
if dir_table:
click.echo(tabulate(dir_table, header, tablefmt="simple"))
# 'speed' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="Index of FAN (1-based)")
def getspeed(index):
"""Display FAN speed in RPM"""
fan_list = []
if (index < 0):
fan_list = platform_chassis.get_all_fans()
default_index = 0
else:
fan_list = platform_chassis.get_fan(index-1)
default_index = index-1
header = ['FAN', 'SPEED (RPM)']
speed_table = []
for idx, fan in enumerate(fan_list, default_index):
fan_name = helper.try_get(fan.get_name, "Fan {}".format(idx+1))
rpm = helper.try_get(fan.get_speed_rpm, 'N/A')
speed_table.append([fan_name, rpm])
if speed_table:
click.echo(tabulate(speed_table, header, tablefmt="simple"))
# 'setspeed' subcommand
@cli.command()
@click.argument('speed', type=int)
def setspeed(speed):
"""Set FAN speed in percentage"""
if speed is None:
click.echo("speed value is required")
raise click.Abort()
fan_list = platform_chassis.get_all_fans()
for idx, fan in enumerate(fan_list):
try:
status = fan.set_speed(speed)
except NotImplementedError:
click.echo("Set speed API not implemented")
sys.exit(0)
if not status:
click.echo("Failed")
sys.exit(1)
click.echo("Successful")
@cli.group()
def debug():
"""pddf_fanutil debug commands"""
pass
@debug.command()
def dump_sysfs():
"""Dump all Fan related SysFS paths"""
fan_list = platform_chassis.get_all_fans()
for idx, fan in enumerate(fan_list):
status = fan.dump_sysfs()
if status:
for i in status:
click.echo(i)
if __name__ == '__main__':
cli() | 0.327346 | 0.07333 |
import os
import re
import argparse
import pandas as pd
import importlib
import yaml
import copy
from zdb.modules.multirun import multidraw
from atsge.build_parallel import build_parallel
import logging
logging.getLogger(__name__).setLevel(logging.INFO)
logging.getLogger("atsge.SGEJobSubmitter").setLevel(logging.INFO)
logging.getLogger("atsge.WorkingArea").setLevel(logging.INFO)
logging.getLogger(__name__).propagate = False
logging.getLogger("atsge.SGEJobSubmitter").propagate = False
logging.getLogger("atsge.WorkingArea").propagate = False
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("mc", help="Path to MC pandas pickle")
parser.add_argument("drawer", help="Path to drawing function")
parser.add_argument("cfg", help="Plotting config file")
parser.add_argument(
"-m", "--mode", default="multiprocessing", type=str,
help="Parallelisation: 'multiprocessing', 'sge', 'htcondor'",
)
parser.add_argument(
"-j", "--ncores", default=0, type=int,
help="Number of cores for 'multiprocessing' jobs",
)
parser.add_argument(
"-n", "--nplots", default=-1, type=int,
help="Number of plots to draw. -1 = all",
)
parser.add_argument(
"-o", "--outdir", default="temp", type=str, help="Output directory",
)
return parser.parse_args()
def rename_df_index(df, index, rename_list):
if df is None:
return df
indexes = df.index.names
tdf = df.reset_index()
for new_val, selection in rename_list:
tdf.loc[tdf.eval(selection),index] = new_val
return tdf.set_index(indexes)
def parallel_draw(draw, jobs, options):
if len(jobs)==0:
return
njobs = options.ncores
if options.mode in ["multiprocessing"]:
njobs = len(jobs)+1
jobs = [
jobs[i:i+len(jobs)//njobs+1]
for i in xrange(0, len(jobs), len(jobs)//njobs+1)
]
parallel = build_parallel(
options.mode, processes=options.ncores, quiet=False,
dispatcher_options={"vmem": 6, "walltime": 3*60*60},
)
parallel.begin()
try:
parallel.communicationChannel.put_multiple([{
'task': multidraw,
'args': (draw, args),
'kwargs': {},
} for args in jobs])
results = parallel.communicationChannel.receive()
except KeyboardInterrupt:
parallel.terminate()
parallel.end()
def main():
options = parse_args()
# Setup drawer function
module_name, function_name = options.drawer.split(":")
draw = getattr(importlib.import_module(module_name), function_name)
# open cfg
with open(options.cfg, 'r') as f:
cfg = yaml.load(f)
# Read in dataframes
df = pd.read_pickle(options.mc) if options.mc is not None else None
df = rename_df_index(df, "parent", [
("WJetsToENu", "(parent=='WJetsToLNu') & (LeptonIsElectron==1)"),
("WJetsToMuNu", "(parent=='WJetsToLNu') & (LeptonIsMuon==1)"),
#("WJetsToTauNu", "(parent=='WJetsToLNu') & (LeptonIsTau==1)"),
("WJetsToTauHNu", "(parent=='WJetsToLNu') & (LeptonIsTau==1) & (nGenTauL==0)"),
("WJetsToTauLNu", "(parent=='WJetsToLNu') & (LeptonIsTau==1) & (nGenTauL==1)"),
("DYJetsToEE", "(parent=='DYJetsToLL') & (LeptonIsElectron==1)"),
("DYJetsToMuMu", "(parent=='DYJetsToLL') & (LeptonIsMuon==1)"),
#("DYJetsToTauTau", "(parent=='DYJetsToLL') & (LeptonIsTau==1)"),
("DYJetsToTauHTauH", "(parent=='DYJetsToLL') & (LeptonIsTau==1) & (nGenTauL==0)"),
("DYJetsToTauHTauL", "(parent=='DYJetsToLL') & (LeptonIsTau==1) & (nGenTauL==1)"),
("DYJetsToTauLTauL", "(parent=='DYJetsToLL') & (LeptonIsTau==1) & (nGenTauL==2)"),
]).reset_index(["LeptonIsElectron", "LeptonIsMuon", "LeptonIsTau", "nGenTauL"], drop=True)
df = df.groupby(df.index.names).sum()
# cutflows
cutflows = df.index.get_level_values("selection").unique()
# variations
varnames = df.index.get_level_values("varname").unique()
regex = re.compile("^(?P<varname>[a-zA-Z0-9_]+)_(?P<variation>[a-zA-Z0-9]+)(Up|Down)$")
varname_variations = {}
for v in varnames:
match = regex.search(v)
if match:
varname = match.group("varname")
variation = match.group("variation")
if varname not in varname_variations:
varname_variations[varname] = []
if variation not in varname_variations[varname]:
varname_variations[varname].append(variation)
# group into histograms
jobs = []
for cutflow in cutflows:
for varname, variations in varname_variations.items():
for variation in variations:
job_cfg = copy.deepcopy(cfg[variation])
job_cfg.update(cfg.get("defaults", {}))
job_cfg.update(cfg.get(cutflow, {}))
job_cfg.update(cfg.get(varname, {}))
outdir = os.path.join(options.outdir, cutflow, varname)
if not os.path.exists(outdir):
os.makedirs(outdir)
job_cfg["outpath"] = os.path.abspath(
os.path.join(outdir, cfg[variation]["outpath"])
)
df_loc = df.loc[
(
(df.index.get_level_values("selection")==cutflow)
& (df.index.get_level_values("varname").isin(
[varname+"_nominal", varname+"_"+variation+"Up", varname+"_"+variation+"Down"]
))
), :
]
jobs.append((df_loc, copy.deepcopy(job_cfg)))
if options.nplots >= 0 and options.nplots < len(jobs):
jobs = jobs[:options.nplots]
parallel_draw(draw, jobs, options)
if __name__ == "__main__":
main() | zdb/scripts/draw_variation.py | import os
import re
import argparse
import pandas as pd
import importlib
import yaml
import copy
from zdb.modules.multirun import multidraw
from atsge.build_parallel import build_parallel
import logging
logging.getLogger(__name__).setLevel(logging.INFO)
logging.getLogger("atsge.SGEJobSubmitter").setLevel(logging.INFO)
logging.getLogger("atsge.WorkingArea").setLevel(logging.INFO)
logging.getLogger(__name__).propagate = False
logging.getLogger("atsge.SGEJobSubmitter").propagate = False
logging.getLogger("atsge.WorkingArea").propagate = False
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("mc", help="Path to MC pandas pickle")
parser.add_argument("drawer", help="Path to drawing function")
parser.add_argument("cfg", help="Plotting config file")
parser.add_argument(
"-m", "--mode", default="multiprocessing", type=str,
help="Parallelisation: 'multiprocessing', 'sge', 'htcondor'",
)
parser.add_argument(
"-j", "--ncores", default=0, type=int,
help="Number of cores for 'multiprocessing' jobs",
)
parser.add_argument(
"-n", "--nplots", default=-1, type=int,
help="Number of plots to draw. -1 = all",
)
parser.add_argument(
"-o", "--outdir", default="temp", type=str, help="Output directory",
)
return parser.parse_args()
def rename_df_index(df, index, rename_list):
if df is None:
return df
indexes = df.index.names
tdf = df.reset_index()
for new_val, selection in rename_list:
tdf.loc[tdf.eval(selection),index] = new_val
return tdf.set_index(indexes)
def parallel_draw(draw, jobs, options):
if len(jobs)==0:
return
njobs = options.ncores
if options.mode in ["multiprocessing"]:
njobs = len(jobs)+1
jobs = [
jobs[i:i+len(jobs)//njobs+1]
for i in xrange(0, len(jobs), len(jobs)//njobs+1)
]
parallel = build_parallel(
options.mode, processes=options.ncores, quiet=False,
dispatcher_options={"vmem": 6, "walltime": 3*60*60},
)
parallel.begin()
try:
parallel.communicationChannel.put_multiple([{
'task': multidraw,
'args': (draw, args),
'kwargs': {},
} for args in jobs])
results = parallel.communicationChannel.receive()
except KeyboardInterrupt:
parallel.terminate()
parallel.end()
def main():
options = parse_args()
# Setup drawer function
module_name, function_name = options.drawer.split(":")
draw = getattr(importlib.import_module(module_name), function_name)
# open cfg
with open(options.cfg, 'r') as f:
cfg = yaml.load(f)
# Read in dataframes
df = pd.read_pickle(options.mc) if options.mc is not None else None
df = rename_df_index(df, "parent", [
("WJetsToENu", "(parent=='WJetsToLNu') & (LeptonIsElectron==1)"),
("WJetsToMuNu", "(parent=='WJetsToLNu') & (LeptonIsMuon==1)"),
#("WJetsToTauNu", "(parent=='WJetsToLNu') & (LeptonIsTau==1)"),
("WJetsToTauHNu", "(parent=='WJetsToLNu') & (LeptonIsTau==1) & (nGenTauL==0)"),
("WJetsToTauLNu", "(parent=='WJetsToLNu') & (LeptonIsTau==1) & (nGenTauL==1)"),
("DYJetsToEE", "(parent=='DYJetsToLL') & (LeptonIsElectron==1)"),
("DYJetsToMuMu", "(parent=='DYJetsToLL') & (LeptonIsMuon==1)"),
#("DYJetsToTauTau", "(parent=='DYJetsToLL') & (LeptonIsTau==1)"),
("DYJetsToTauHTauH", "(parent=='DYJetsToLL') & (LeptonIsTau==1) & (nGenTauL==0)"),
("DYJetsToTauHTauL", "(parent=='DYJetsToLL') & (LeptonIsTau==1) & (nGenTauL==1)"),
("DYJetsToTauLTauL", "(parent=='DYJetsToLL') & (LeptonIsTau==1) & (nGenTauL==2)"),
]).reset_index(["LeptonIsElectron", "LeptonIsMuon", "LeptonIsTau", "nGenTauL"], drop=True)
df = df.groupby(df.index.names).sum()
# cutflows
cutflows = df.index.get_level_values("selection").unique()
# variations
varnames = df.index.get_level_values("varname").unique()
regex = re.compile("^(?P<varname>[a-zA-Z0-9_]+)_(?P<variation>[a-zA-Z0-9]+)(Up|Down)$")
varname_variations = {}
for v in varnames:
match = regex.search(v)
if match:
varname = match.group("varname")
variation = match.group("variation")
if varname not in varname_variations:
varname_variations[varname] = []
if variation not in varname_variations[varname]:
varname_variations[varname].append(variation)
# group into histograms
jobs = []
for cutflow in cutflows:
for varname, variations in varname_variations.items():
for variation in variations:
job_cfg = copy.deepcopy(cfg[variation])
job_cfg.update(cfg.get("defaults", {}))
job_cfg.update(cfg.get(cutflow, {}))
job_cfg.update(cfg.get(varname, {}))
outdir = os.path.join(options.outdir, cutflow, varname)
if not os.path.exists(outdir):
os.makedirs(outdir)
job_cfg["outpath"] = os.path.abspath(
os.path.join(outdir, cfg[variation]["outpath"])
)
df_loc = df.loc[
(
(df.index.get_level_values("selection")==cutflow)
& (df.index.get_level_values("varname").isin(
[varname+"_nominal", varname+"_"+variation+"Up", varname+"_"+variation+"Down"]
))
), :
]
jobs.append((df_loc, copy.deepcopy(job_cfg)))
if options.nplots >= 0 and options.nplots < len(jobs):
jobs = jobs[:options.nplots]
parallel_draw(draw, jobs, options)
if __name__ == "__main__":
main() | 0.183228 | 0.149656 |
from django.conf import settings
from django.utils import translation
import pytest
from mock import Mock, patch
from olympia.amo.tests import TestCase
from olympia.amo.utils import from_string
from olympia.addons.models import Addon
from olympia.translations.templatetags import jinja_helpers
from olympia.translations.fields import save_signal
from olympia.translations.models import PurifiedTranslation
from olympia.translations.tests.testapp.models import TranslatedModel
pytestmark = pytest.mark.django_db
def test_locale_html():
"""Test HTML attributes for languages different than the site language"""
testfield = Mock()
# same language: no need for attributes
this_lang = translation.get_language()
testfield.locale = this_lang
s = jinja_helpers.locale_html(testfield)
assert not s, 'no special HTML attributes for site language'
# non-rtl language
testfield.locale = 'de'
s = jinja_helpers.locale_html(testfield)
assert s == ' lang="de" dir="ltr"'
# rtl language
for lang in settings.LANGUAGES_BIDI:
testfield.locale = lang
s = jinja_helpers.locale_html(testfield)
assert s == ' lang="%s" dir="rtl"' % testfield.locale
def test_locale_html_xss():
"""Test for nastiness-removal in the transfield's locale"""
testfield = Mock()
# same language: no need for attributes
testfield.locale = '<script>alert(1)</script>'
s = jinja_helpers.locale_html(testfield)
assert '<script>' not in s
assert '<script>alert(1)</script>' in s
def test_empty_locale_html():
"""locale_html must still work if field is None."""
s = jinja_helpers.locale_html(None)
assert not s, 'locale_html on None must be empty.'
def test_truncate_purified_field():
s = '<i>one</i><i>two</i>'
t = PurifiedTranslation(localized_string=s)
actual = from_string('{{ s|truncate(6) }}').render({'s': t})
assert actual == s
def test_truncate_purified_field_xss():
"""Truncating should not introduce xss issues."""
s = 'safe <script>alert("omg")</script>'
t = PurifiedTranslation(localized_string=s)
actual = from_string('{{ s|truncate(100) }}').render({'s': t})
assert actual == 'safe <script>alert("omg")</script>'
actual = from_string('{{ s|truncate(5) }}').render({'s': t})
assert actual == 'safe ...'
def test_clean():
# Links are not mangled, bad HTML is escaped, newlines are slimmed.
s = '<ul><li><a href="#woo">\n\nyeah</a></li>\n\n<li><script></li></ul>'
assert jinja_helpers.clean(s) == (
'<ul><li><a href="#woo">\n\nyeah</a></li><li><script></li></ul>')
def test_clean_in_template():
s = '<a href="#woo">yeah</a>'
assert from_string('{{ s|clean }}').render({'s': s}) == s
def test_clean_strip_all_html():
s = '<a href="#woo">yeah</a>'
expected = 'yeah'
assert from_string('{{ s|clean(true) }}').render({'s': s}) == expected
def test_no_links():
template = from_string('{{ s|no_links }}')
s = 'a <a href="http://url.link">http://example.com</a>, http://text.link'
expected = 'a http://example.com, http://text.link'
assert template.render({'s': s}) == expected
# Bad markup.
s = '<http://bad.markup.com'
assert template.render({'s': s}) == ''
# Bad markup.
s = 'some text <http://bad.markup.com'
assert template.render({'s': s}) == 'some text'
def test_l10n_menu():
# No remove_locale_url provided.
menu = jinja_helpers.l10n_menu({})
assert 'data-rm-locale=""' in menu, menu
# Specific remove_locale_url provided (eg for user).
menu = jinja_helpers.l10n_menu({}, remove_locale_url='/some/url/')
assert 'data-rm-locale="/some/url/"' in menu, menu
# Use the remove_locale_url taken from the addon in the context.
menu = jinja_helpers.l10n_menu(
{'addon': Addon()}, remove_locale_url='some/url/')
assert 'data-rm-locale="/en-US/developers/addon/None/rmlocale"' in menu
@patch.object(settings, 'AMO_LANGUAGES', ('de', 'en-US', 'es', 'fr', 'pt-BR'))
class TestAllLocales(TestCase):
def test_all_locales_none(self):
addon = None
field_name = 'description'
assert jinja_helpers.all_locales(addon, field_name) is None
addon = Mock()
field_name = 'description'
del addon.description
assert jinja_helpers.all_locales(addon, field_name) is None
def test_all_locales(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': 'Spoon'
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = jinja_helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr">Spoon</span>' in result
def test_all_locales_empty(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': ''
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = jinja_helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr"></span>' in result
result = jinja_helpers.all_locales(
obj, 'description', prettify_empty=True)
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span class="empty" lang="fr">None</span>' in result | src/olympia/translations/tests/test_helpers.py | from django.conf import settings
from django.utils import translation
import pytest
from mock import Mock, patch
from olympia.amo.tests import TestCase
from olympia.amo.utils import from_string
from olympia.addons.models import Addon
from olympia.translations.templatetags import jinja_helpers
from olympia.translations.fields import save_signal
from olympia.translations.models import PurifiedTranslation
from olympia.translations.tests.testapp.models import TranslatedModel
pytestmark = pytest.mark.django_db
def test_locale_html():
"""Test HTML attributes for languages different than the site language"""
testfield = Mock()
# same language: no need for attributes
this_lang = translation.get_language()
testfield.locale = this_lang
s = jinja_helpers.locale_html(testfield)
assert not s, 'no special HTML attributes for site language'
# non-rtl language
testfield.locale = 'de'
s = jinja_helpers.locale_html(testfield)
assert s == ' lang="de" dir="ltr"'
# rtl language
for lang in settings.LANGUAGES_BIDI:
testfield.locale = lang
s = jinja_helpers.locale_html(testfield)
assert s == ' lang="%s" dir="rtl"' % testfield.locale
def test_locale_html_xss():
"""Test for nastiness-removal in the transfield's locale"""
testfield = Mock()
# same language: no need for attributes
testfield.locale = '<script>alert(1)</script>'
s = jinja_helpers.locale_html(testfield)
assert '<script>' not in s
assert '<script>alert(1)</script>' in s
def test_empty_locale_html():
"""locale_html must still work if field is None."""
s = jinja_helpers.locale_html(None)
assert not s, 'locale_html on None must be empty.'
def test_truncate_purified_field():
s = '<i>one</i><i>two</i>'
t = PurifiedTranslation(localized_string=s)
actual = from_string('{{ s|truncate(6) }}').render({'s': t})
assert actual == s
def test_truncate_purified_field_xss():
"""Truncating should not introduce xss issues."""
s = 'safe <script>alert("omg")</script>'
t = PurifiedTranslation(localized_string=s)
actual = from_string('{{ s|truncate(100) }}').render({'s': t})
assert actual == 'safe <script>alert("omg")</script>'
actual = from_string('{{ s|truncate(5) }}').render({'s': t})
assert actual == 'safe ...'
def test_clean():
# Links are not mangled, bad HTML is escaped, newlines are slimmed.
s = '<ul><li><a href="#woo">\n\nyeah</a></li>\n\n<li><script></li></ul>'
assert jinja_helpers.clean(s) == (
'<ul><li><a href="#woo">\n\nyeah</a></li><li><script></li></ul>')
def test_clean_in_template():
s = '<a href="#woo">yeah</a>'
assert from_string('{{ s|clean }}').render({'s': s}) == s
def test_clean_strip_all_html():
s = '<a href="#woo">yeah</a>'
expected = 'yeah'
assert from_string('{{ s|clean(true) }}').render({'s': s}) == expected
def test_no_links():
template = from_string('{{ s|no_links }}')
s = 'a <a href="http://url.link">http://example.com</a>, http://text.link'
expected = 'a http://example.com, http://text.link'
assert template.render({'s': s}) == expected
# Bad markup.
s = '<http://bad.markup.com'
assert template.render({'s': s}) == ''
# Bad markup.
s = 'some text <http://bad.markup.com'
assert template.render({'s': s}) == 'some text'
def test_l10n_menu():
# No remove_locale_url provided.
menu = jinja_helpers.l10n_menu({})
assert 'data-rm-locale=""' in menu, menu
# Specific remove_locale_url provided (eg for user).
menu = jinja_helpers.l10n_menu({}, remove_locale_url='/some/url/')
assert 'data-rm-locale="/some/url/"' in menu, menu
# Use the remove_locale_url taken from the addon in the context.
menu = jinja_helpers.l10n_menu(
{'addon': Addon()}, remove_locale_url='some/url/')
assert 'data-rm-locale="/en-US/developers/addon/None/rmlocale"' in menu
@patch.object(settings, 'AMO_LANGUAGES', ('de', 'en-US', 'es', 'fr', 'pt-BR'))
class TestAllLocales(TestCase):
def test_all_locales_none(self):
addon = None
field_name = 'description'
assert jinja_helpers.all_locales(addon, field_name) is None
addon = Mock()
field_name = 'description'
del addon.description
assert jinja_helpers.all_locales(addon, field_name) is None
def test_all_locales(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': 'Spoon'
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = jinja_helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr">Spoon</span>' in result
def test_all_locales_empty(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': ''
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = jinja_helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr"></span>' in result
result = jinja_helpers.all_locales(
obj, 'description', prettify_empty=True)
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span class="empty" lang="fr">None</span>' in result | 0.63624 | 0.363506 |
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
from app import app
from apps import theme_explorer as te, text
import util
"""
=====================================================================
Helper functions and components
"""
df = px.data.gapminder()
code = util.get_code_file("dash_bootstrap_templates_app.py")
copy_code_div = util.get_copy_code_div(code, id="copy_template_code")
# make control panel
use_templates = dbc.RadioItems(
options=[
{"label": "Use figure templates from dash-bootstrap-templates", "value": 1},
{"label": "Use Plotly default figure template", "value": 2},
],
value=1,
id="use_figure_template",
)
control_panel_text = dcc.Markdown(
text.dash_bootstrap_templates_text, className="border mb-5 p-4"
)
# needed because the theme dropdown also updates "css" on Theme Explorer page but not here
dummy_output = html.Div(id="css", className='d-none')
control_panel = [control_panel_text, te.boostrap_card, use_templates, dummy_output]
carousel = dbc.Carousel(
ride="carousel",
items=[
{
"key": "1",
"src": "https://user-images.githubusercontent.com/72614349/129459807-30c22ffe-7a8c-44b9-9555-6cfd50ec355b.png",
},
{
"key": "2",
"src": "https://user-images.githubusercontent.com/72614349/129459808-40032148-82e1-47ce-a49a-05e598c69400.png",
},
],
)
carousel_text = dcc.Markdown(text.dash_bootstrap_templates_app_text)
"""
===============================================================================
Layout
"""
layout = dbc.Container(
[
util.header,
dbc.Row(
[
dbc.Col(control_panel, lg=4, sm=12),
dbc.Col(
html.Div(
id="db_templates_sample_app", className="mx-1 mb-4 shadow p-4",
),
lg=8,
sm=12,
),
],
),
dbc.Row(
[
dbc.Col([carousel_text, carousel], lg=4, sm=12),
dbc.Col(html.Div(copy_code_div,), lg=8, sm=12,),
],
),
],
fluid=True,
id="bootstrap_templates",
)
"""
=====================================================================
Display Sample App based on theme selected
"""
@app.callback(
Output("db_templates_sample_app", "children"),
Input("themes", "value"),
Input("use_figure_template", "value"),
)
def update_graphs(theme, use_template):
template = util.url_dbc_themes[theme].lower() if use_template == 1 else {}
heading_txt = (
"App with dash-bootstrap-templates"
if use_template == 1
else "App with Plotly default figure template"
)
heading = html.H3(heading_txt, className="bg-primary text-white p-2")
dff = df[df.year.between(1952, 1982)]
dff = dff[dff.continent.isin(df.continent.unique()[1:])]
line_fig = px.line(
dff,
x="year",
y="gdpPercap",
color="continent",
line_group="country",
template=template,
)
dff = dff[dff.year == 1982]
scatter_fig = px.scatter(
dff,
x="lifeExp",
y="gdpPercap",
size="pop",
color="pop",
size_max=60,
template=template,
).update_traces(marker_opacity=0.8)
avg_lifeExp = (dff["lifeExp"] * dff["pop"]).sum() / dff["pop"].sum()
map_fig = px.choropleth(
dff,
locations="iso_alpha",
color="lifeExp",
title="%.0f World Average Life Expectancy was %.1f years" % (1982, avg_lifeExp),
template=template,
)
hist_fig = px.histogram(
dff, x="lifeExp", nbins=10, title="Life Expectancy", template=template
)
graph_height = 300
graphs = html.Div(
[
dbc.Row(
[
dbc.Col(
dcc.Graph(figure=line_fig, style={"height": graph_height}), lg=6
),
dbc.Col(
dcc.Graph(figure=scatter_fig, style={"height": graph_height}),
lg=6,
),
],
className="mt-4",
),
dbc.Row(
[
dbc.Col(
dcc.Graph(figure=hist_fig, style={"height": graph_height}), lg=6
),
dbc.Col(
dcc.Graph(figure=map_fig, style={"height": graph_height}), lg=6
),
],
className="mt-4",
),
]
)
# These buttons are added to the app just to show the Boostrap theme colors
buttons = html.Div(
[
dbc.Button("Primary", color="primary", className="mr-1"),
dbc.Button("Secondary", color="secondary", className="mr-1"),
dbc.Button("Success", color="success", className="mr-1"),
dbc.Button("Warning", color="warning", className="mr-1"),
dbc.Button("Danger", color="danger", className="mr-1"),
dbc.Button("Info", color="info", className="mr-1"),
dbc.Button("Light", color="light", className="mr-1"),
dbc.Button("Dark", color="dark", className="mr-1"),
dbc.Button("Link", color="link"),
],
)
return [heading, buttons, graphs]
@app.callback(
Output("bootstrap_templates", "className"), Input("light_dark", "value"),
)
def update_css(value):
return "dbc_light" if value == "Light Themes" else "dbc_dark" | apps/bootstrap_templates.py | import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
from app import app
from apps import theme_explorer as te, text
import util
"""
=====================================================================
Helper functions and components
"""
df = px.data.gapminder()
code = util.get_code_file("dash_bootstrap_templates_app.py")
copy_code_div = util.get_copy_code_div(code, id="copy_template_code")
# make control panel
use_templates = dbc.RadioItems(
options=[
{"label": "Use figure templates from dash-bootstrap-templates", "value": 1},
{"label": "Use Plotly default figure template", "value": 2},
],
value=1,
id="use_figure_template",
)
control_panel_text = dcc.Markdown(
text.dash_bootstrap_templates_text, className="border mb-5 p-4"
)
# needed because the theme dropdown also updates "css" on Theme Explorer page but not here
dummy_output = html.Div(id="css", className='d-none')
control_panel = [control_panel_text, te.boostrap_card, use_templates, dummy_output]
carousel = dbc.Carousel(
ride="carousel",
items=[
{
"key": "1",
"src": "https://user-images.githubusercontent.com/72614349/129459807-30c22ffe-7a8c-44b9-9555-6cfd50ec355b.png",
},
{
"key": "2",
"src": "https://user-images.githubusercontent.com/72614349/129459808-40032148-82e1-47ce-a49a-05e598c69400.png",
},
],
)
carousel_text = dcc.Markdown(text.dash_bootstrap_templates_app_text)
"""
===============================================================================
Layout
"""
layout = dbc.Container(
[
util.header,
dbc.Row(
[
dbc.Col(control_panel, lg=4, sm=12),
dbc.Col(
html.Div(
id="db_templates_sample_app", className="mx-1 mb-4 shadow p-4",
),
lg=8,
sm=12,
),
],
),
dbc.Row(
[
dbc.Col([carousel_text, carousel], lg=4, sm=12),
dbc.Col(html.Div(copy_code_div,), lg=8, sm=12,),
],
),
],
fluid=True,
id="bootstrap_templates",
)
"""
=====================================================================
Display Sample App based on theme selected
"""
@app.callback(
Output("db_templates_sample_app", "children"),
Input("themes", "value"),
Input("use_figure_template", "value"),
)
def update_graphs(theme, use_template):
template = util.url_dbc_themes[theme].lower() if use_template == 1 else {}
heading_txt = (
"App with dash-bootstrap-templates"
if use_template == 1
else "App with Plotly default figure template"
)
heading = html.H3(heading_txt, className="bg-primary text-white p-2")
dff = df[df.year.between(1952, 1982)]
dff = dff[dff.continent.isin(df.continent.unique()[1:])]
line_fig = px.line(
dff,
x="year",
y="gdpPercap",
color="continent",
line_group="country",
template=template,
)
dff = dff[dff.year == 1982]
scatter_fig = px.scatter(
dff,
x="lifeExp",
y="gdpPercap",
size="pop",
color="pop",
size_max=60,
template=template,
).update_traces(marker_opacity=0.8)
avg_lifeExp = (dff["lifeExp"] * dff["pop"]).sum() / dff["pop"].sum()
map_fig = px.choropleth(
dff,
locations="iso_alpha",
color="lifeExp",
title="%.0f World Average Life Expectancy was %.1f years" % (1982, avg_lifeExp),
template=template,
)
hist_fig = px.histogram(
dff, x="lifeExp", nbins=10, title="Life Expectancy", template=template
)
graph_height = 300
graphs = html.Div(
[
dbc.Row(
[
dbc.Col(
dcc.Graph(figure=line_fig, style={"height": graph_height}), lg=6
),
dbc.Col(
dcc.Graph(figure=scatter_fig, style={"height": graph_height}),
lg=6,
),
],
className="mt-4",
),
dbc.Row(
[
dbc.Col(
dcc.Graph(figure=hist_fig, style={"height": graph_height}), lg=6
),
dbc.Col(
dcc.Graph(figure=map_fig, style={"height": graph_height}), lg=6
),
],
className="mt-4",
),
]
)
# These buttons are added to the app just to show the Boostrap theme colors
buttons = html.Div(
[
dbc.Button("Primary", color="primary", className="mr-1"),
dbc.Button("Secondary", color="secondary", className="mr-1"),
dbc.Button("Success", color="success", className="mr-1"),
dbc.Button("Warning", color="warning", className="mr-1"),
dbc.Button("Danger", color="danger", className="mr-1"),
dbc.Button("Info", color="info", className="mr-1"),
dbc.Button("Light", color="light", className="mr-1"),
dbc.Button("Dark", color="dark", className="mr-1"),
dbc.Button("Link", color="link"),
],
)
return [heading, buttons, graphs]
@app.callback(
Output("bootstrap_templates", "className"), Input("light_dark", "value"),
)
def update_css(value):
return "dbc_light" if value == "Light Themes" else "dbc_dark" | 0.656218 | 0.160661 |
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import datasets
import learning
import copy
import argparse
# argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='cora', help='Datasets: cora, email, ssets')
parser.add_argument('--version', default='1', help='version for ssets (1,2,3,4), default 1 for others')
parser.add_argument('--result', action='store_true', help='already have the results, want to plot')
args = parser.parse_args()
def shift_clusters(clusters, found_classes):
unique_classes = set(np.unique(clusters))
non_found_classes = list(unique_classes - found_classes)
non_found_classes_original = copy.deepcopy(non_found_classes)
np.random.shuffle(non_found_classes)
m = {non_found_classes_original[i]: non_found_classes[i] for i in range(len(non_found_classes))}
new_clusters = []
for cluster in clusters:
if cluster in m:
new_clusters.append(m[cluster])
else:
new_clusters.append(cluster)
return np.array(new_clusters)
def find_best_clusters(clusters, labels):
trail_number = 500
unique_classes = set(np.unique(labels))
found_classes = set()
best_clusters = clusters
while len(found_classes) < len(unique_classes):
clusters = best_clusters
best_acc = accuracy_score(labels, clusters.astype(int))
for trial in range(trail_number):
shifted_clusters = shift_clusters(clusters, found_classes)
acc = accuracy_score(shifted_clusters, labels)
if acc > best_acc:
print(acc)
best_acc = acc
best_clusters = shifted_clusters
# check matching
match_acc = 0
match_number = 0
for class_ in unique_classes - found_classes:
pos = labels == class_
acc = accuracy_score(labels[pos], best_clusters[pos])
if acc > match_acc:
match_number = class_
match_acc = acc
if match_number == 0:
break
found_classes.add(match_number)
print(found_classes)
return best_clusters
if __name__ == '__main__':
if not args.result:
if args.dataset == 'ssets':
points = datasets.read_points(f'ssets/s{args.version}.txt')
labels = datasets.read_labels(f'ssets/s{args.version}-label.pa')
plt.scatter(points[:, 0], points[:, 1], c=labels)
# plt.title(f'{args.dataset} V{args.version}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph.png', bbox_inches='tight')
plt.show(bbox_inches='tight')
else:
A = datasets.load_graph_preprocessed(args.dataset, args.version)
labels = datasets.load_labels_preprocessed(args.dataset, args.version)
# graph draw
G = nx.from_numpy_matrix(A.todense())
layout = nx.spring_layout(G)
nx.draw_networkx(G, pos=layout, node_color=labels, node_size=20, with_labels=False)
# plt.title(f'{args.dataset}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph.png', bbox_inches='tight')
plt.show()
else:
# best results
best = {
'cora_1': 25,
'email_1': 24,
'ssets_1': 13,
'ssets_4': 8
}
if args.dataset == 'ssets':
points = datasets.read_points(f'ssets/s{args.version}.txt')
labels = datasets.read_labels(f'ssets/s{args.version}-label.pa')
eigv = datasets.load_embeddings(args.dataset, args.version, 'N')[:, :best[f'{args.dataset}_{args.version}']]
score, clusters = learning.k_means(eigv, labels, k=len(np.unique(labels)))
best_clusters = find_best_clusters(clusters, labels)
print(accuracy_score(labels, best_clusters))
plt.scatter(points[:, 0], points[:, 1], c=labels)
# plt.title(f'{args.dataset} V{args.version}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph.png', bbox_inches='tight')
plt.show(bbox_inches='tight')
plt.scatter(points[:, 0], points[:, 1], c=best_clusters)
# plt.title(f'{args.dataset} V{args.version}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph_result.png', bbox_inches='tight')
plt.show(bbox_inches='tight')
else:
A = datasets.load_graph_preprocessed(args.dataset, args.version)
eigv = datasets.load_embeddings(args.dataset, args.version, 'N')[:, :best[f'{args.dataset}_{args.version}']]
labels = datasets.load_labels_preprocessed(args.dataset, args.version)
score, clusters = learning.k_means(eigv, labels, k=len(np.unique(labels)))
best_clusters = find_best_clusters(clusters, labels)
print(accuracy_score(labels, best_clusters))
# original graph draw
G = nx.from_numpy_matrix(A.todense())
layout = nx.spring_layout(G)
nx.draw_networkx(G, pos=layout, node_color=labels, node_size=20, with_labels=False)
# plt.title(f'{args.dataset}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph.png', bbox_inches='tight')
plt.show()
# result graph draw
nx.draw_networkx(G, pos=layout, node_color=best_clusters, node_size=20, with_labels=False)
# plt.title(f'{args.dataset}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph_result.png', bbox_inches='tight')
plt.show() | plot.py | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import datasets
import learning
import copy
import argparse
# argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='cora', help='Datasets: cora, email, ssets')
parser.add_argument('--version', default='1', help='version for ssets (1,2,3,4), default 1 for others')
parser.add_argument('--result', action='store_true', help='already have the results, want to plot')
args = parser.parse_args()
def shift_clusters(clusters, found_classes):
unique_classes = set(np.unique(clusters))
non_found_classes = list(unique_classes - found_classes)
non_found_classes_original = copy.deepcopy(non_found_classes)
np.random.shuffle(non_found_classes)
m = {non_found_classes_original[i]: non_found_classes[i] for i in range(len(non_found_classes))}
new_clusters = []
for cluster in clusters:
if cluster in m:
new_clusters.append(m[cluster])
else:
new_clusters.append(cluster)
return np.array(new_clusters)
def find_best_clusters(clusters, labels):
trail_number = 500
unique_classes = set(np.unique(labels))
found_classes = set()
best_clusters = clusters
while len(found_classes) < len(unique_classes):
clusters = best_clusters
best_acc = accuracy_score(labels, clusters.astype(int))
for trial in range(trail_number):
shifted_clusters = shift_clusters(clusters, found_classes)
acc = accuracy_score(shifted_clusters, labels)
if acc > best_acc:
print(acc)
best_acc = acc
best_clusters = shifted_clusters
# check matching
match_acc = 0
match_number = 0
for class_ in unique_classes - found_classes:
pos = labels == class_
acc = accuracy_score(labels[pos], best_clusters[pos])
if acc > match_acc:
match_number = class_
match_acc = acc
if match_number == 0:
break
found_classes.add(match_number)
print(found_classes)
return best_clusters
if __name__ == '__main__':
if not args.result:
if args.dataset == 'ssets':
points = datasets.read_points(f'ssets/s{args.version}.txt')
labels = datasets.read_labels(f'ssets/s{args.version}-label.pa')
plt.scatter(points[:, 0], points[:, 1], c=labels)
# plt.title(f'{args.dataset} V{args.version}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph.png', bbox_inches='tight')
plt.show(bbox_inches='tight')
else:
A = datasets.load_graph_preprocessed(args.dataset, args.version)
labels = datasets.load_labels_preprocessed(args.dataset, args.version)
# graph draw
G = nx.from_numpy_matrix(A.todense())
layout = nx.spring_layout(G)
nx.draw_networkx(G, pos=layout, node_color=labels, node_size=20, with_labels=False)
# plt.title(f'{args.dataset}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph.png', bbox_inches='tight')
plt.show()
else:
# best results
best = {
'cora_1': 25,
'email_1': 24,
'ssets_1': 13,
'ssets_4': 8
}
if args.dataset == 'ssets':
points = datasets.read_points(f'ssets/s{args.version}.txt')
labels = datasets.read_labels(f'ssets/s{args.version}-label.pa')
eigv = datasets.load_embeddings(args.dataset, args.version, 'N')[:, :best[f'{args.dataset}_{args.version}']]
score, clusters = learning.k_means(eigv, labels, k=len(np.unique(labels)))
best_clusters = find_best_clusters(clusters, labels)
print(accuracy_score(labels, best_clusters))
plt.scatter(points[:, 0], points[:, 1], c=labels)
# plt.title(f'{args.dataset} V{args.version}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph.png', bbox_inches='tight')
plt.show(bbox_inches='tight')
plt.scatter(points[:, 0], points[:, 1], c=best_clusters)
# plt.title(f'{args.dataset} V{args.version}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph_result.png', bbox_inches='tight')
plt.show(bbox_inches='tight')
else:
A = datasets.load_graph_preprocessed(args.dataset, args.version)
eigv = datasets.load_embeddings(args.dataset, args.version, 'N')[:, :best[f'{args.dataset}_{args.version}']]
labels = datasets.load_labels_preprocessed(args.dataset, args.version)
score, clusters = learning.k_means(eigv, labels, k=len(np.unique(labels)))
best_clusters = find_best_clusters(clusters, labels)
print(accuracy_score(labels, best_clusters))
# original graph draw
G = nx.from_numpy_matrix(A.todense())
layout = nx.spring_layout(G)
nx.draw_networkx(G, pos=layout, node_color=labels, node_size=20, with_labels=False)
# plt.title(f'{args.dataset}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph.png', bbox_inches='tight')
plt.show()
# result graph draw
nx.draw_networkx(G, pos=layout, node_color=best_clusters, node_size=20, with_labels=False)
# plt.title(f'{args.dataset}'.upper())
plt.axis('off')
plt.savefig(f'{args.dataset}/graph_plots/{args.dataset}_{args.version}_graph_result.png', bbox_inches='tight')
plt.show() | 0.681303 | 0.502563 |
from sfm.decorator import run_if_is_main
from learn_mongodb.db_test import col
@run_if_is_main(__name__)
def prepare_data():
data = [{
"_id": 1,
"title": "abc123",
"isbn": "0001122223334",
"author": {"last": "zzz", "first": "aaa"},
"copies": 5,
}]
col.insert(data)
prepare_data()
@run_if_is_main(__name__)
def include_specific_fields_in_output_documents():
"""只输出title和author两个field。
"""
pipeline = [
{
"$project": {
"full_title": "$title", # 将title的field name换成full_title
"author": 1,
},
},
]
doc = list(col.aggregate(pipeline))[0]
assert set(doc.keys()) == {"_id", "full_title", "author"}
include_specific_fields_in_output_documents()
@run_if_is_main(__name__)
def exclude_id_field():
"""跟上例一样, 只不过抛弃掉_id项。
"""
pipeline = [
{
"$project": {
"_id": 0, "title": 1, "author": 1,
},
},
]
doc = list(col.aggregate(pipeline))[0]
assert set(doc.keys()) == {"title", "author"}
exclude_id_field()
@run_if_is_main(__name__)
def include_computed_fields():
"""对文档进行计算之后输出。本例中主要将isbn拆分为了几个子field。
关于string aggregation operations, 请参考:
http://docs.mongodb.org/manual/reference/operator/aggregation-string/
"""
pipeline = [
{
"$project": {
"title": 1,
"isbn": {
"prefix": {"$substr": ["$isbn", 0, 3]},
"group": {"$substr": ["$isbn", 3, 2]},
"publisher": {"$substr": ["$isbn", 5, 4]},
"title": {"$substr": ["$isbn", 9, 3]},
"checkDigit": {"$substr": ["$isbn", 12, 1]}
},
"lastName": "$author.last",
"copiesSold": "$copies",
},
},
]
doc = list(col.aggregate(pipeline))[0]
assert doc == {
"_id": 1,
"copiesSold": 5,
"isbn": {
"checkDigit": "4",
"group": "11",
"prefix": "000",
"publisher": "2222",
"title": "333"
},
"lastName": "zzz",
"title": "abc123"
}
include_computed_fields() | learn_mongodb/p3_aggregation/p2_project.py | from sfm.decorator import run_if_is_main
from learn_mongodb.db_test import col
@run_if_is_main(__name__)
def prepare_data():
data = [{
"_id": 1,
"title": "abc123",
"isbn": "0001122223334",
"author": {"last": "zzz", "first": "aaa"},
"copies": 5,
}]
col.insert(data)
prepare_data()
@run_if_is_main(__name__)
def include_specific_fields_in_output_documents():
"""只输出title和author两个field。
"""
pipeline = [
{
"$project": {
"full_title": "$title", # 将title的field name换成full_title
"author": 1,
},
},
]
doc = list(col.aggregate(pipeline))[0]
assert set(doc.keys()) == {"_id", "full_title", "author"}
include_specific_fields_in_output_documents()
@run_if_is_main(__name__)
def exclude_id_field():
"""跟上例一样, 只不过抛弃掉_id项。
"""
pipeline = [
{
"$project": {
"_id": 0, "title": 1, "author": 1,
},
},
]
doc = list(col.aggregate(pipeline))[0]
assert set(doc.keys()) == {"title", "author"}
exclude_id_field()
@run_if_is_main(__name__)
def include_computed_fields():
"""对文档进行计算之后输出。本例中主要将isbn拆分为了几个子field。
关于string aggregation operations, 请参考:
http://docs.mongodb.org/manual/reference/operator/aggregation-string/
"""
pipeline = [
{
"$project": {
"title": 1,
"isbn": {
"prefix": {"$substr": ["$isbn", 0, 3]},
"group": {"$substr": ["$isbn", 3, 2]},
"publisher": {"$substr": ["$isbn", 5, 4]},
"title": {"$substr": ["$isbn", 9, 3]},
"checkDigit": {"$substr": ["$isbn", 12, 1]}
},
"lastName": "$author.last",
"copiesSold": "$copies",
},
},
]
doc = list(col.aggregate(pipeline))[0]
assert doc == {
"_id": 1,
"copiesSold": 5,
"isbn": {
"checkDigit": "4",
"group": "11",
"prefix": "000",
"publisher": "2222",
"title": "333"
},
"lastName": "zzz",
"title": "abc123"
}
include_computed_fields() | 0.413359 | 0.420719 |